Skip to content
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
83d31dc
refactor: rename parameter pretrained_model_name_or_path
Tohrusky Sep 25, 2025
c67e3a8
refactor: rename parameter pretrained_model_name_or_path
Tohrusky Sep 25, 2025
5ef996f
refactor: rename parameter pretrained_model_name_or_path
Tohrusky Sep 25, 2025
c34d5e7
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 25, 2025
0315580
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 25, 2025
6292e2f
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 25, 2025
c448ec1
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 25, 2025
65bafa7
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
bd472bb
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
eb0cad6
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
13e0910
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
4ad047f
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
459488b
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
93d36e4
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
7216b9a
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
c894c2b
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
9fa55b8
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
4bfb4a6
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
771c2b3
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
1d92040
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
9433b88
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
1ba22b7
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
23d4c4b
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
4bd845c
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
045ce40
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
42eed70
feat: enhance AutoConfig to support path-based model configuration
Tohrusky Sep 26, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,5 @@ cython_debug/

*.mp4
*.mkv

/cccv/cache_models/
18 changes: 17 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pip install cccv

### Start

#### cv2
#### Load a registered model in cccv

a simple example to use the SISR (Single Image Super-Resolution) model to process an image

Expand All @@ -37,6 +37,22 @@ img = model.inference_image(img)
cv2.imwrite("test_out.jpg", img)
```

#### Load a custom model from remote repository or local path

a simple example to use [remote repository](https://github.com/EutropicAI/cccv_demo_remote_model), auto register then load

```python
import cv2
import numpy as np

from cccv import AutoModel, SRBaseModel

# remote repo
model: SRBaseModel = AutoModel.from_pretrained("https://github.com/EutropicAI/cccv_demo_remote_model")
# local path
model: SRBaseModel = AutoModel.from_pretrained("/path/to/cccv_demo_model")
```

#### VapourSynth

a simple example to use the VapourSynth to process a video
Expand Down
2 changes: 1 addition & 1 deletion cccv/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,6 @@

from cccv.arch import ARCH_REGISTRY
from cccv.auto import AutoConfig, AutoModel
from cccv.config import CONFIG_REGISTRY, BaseConfig, SRBaseConfig, VFIBaseConfig, VSRBaseConfig
from cccv.config import CONFIG_REGISTRY, AutoBaseConfig, BaseConfig, SRBaseConfig, VFIBaseConfig, VSRBaseConfig
from cccv.model import MODEL_REGISTRY, AuxiliaryBaseModel, CCBaseModel, SRBaseModel, VFIBaseModel, VSRBaseModel
from cccv.type import ArchType, BaseModelInterface, ConfigType, ModelType
3 changes: 1 addition & 2 deletions cccv/arch/sr/dat_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,8 +365,7 @@ def __init__(
elif idx == 1:
W_sp, H_sp = self.split_size[0], self.split_size[1]
else:
print("ERROR MODE", idx)
exit(0)
raise ValueError(f"[CCCV] ERROR MODE: invalid idx {idx}, expected 0 or 1")
self.H_sp = H_sp
self.W_sp = W_sp

Expand Down
18 changes: 6 additions & 12 deletions cccv/arch/sr/upcunet_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,8 +376,7 @@ def forward(self, x, tile_mode, cache_mode, alpha, pro):
t2 = tile_mode * 2
crop_size = (((h0 - 1) // t2 * t2 + t2) // tile_mode, ((w0 - 1) // t2 * t2 + t2) // tile_mode)
else:
print("tile_mode config error")
os._exit(233)
raise ValueError("[CCCV] tile_mode config error: invalid tile_mode value")

ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
Expand Down Expand Up @@ -526,8 +525,7 @@ def forward_gap_sync(self, x, tile_mode, alpha, pro):
t2 = tile_mode * 2
crop_size = (((h0 - 1) // t2 * t2 + t2) // tile_mode, ((w0 - 1) // t2 * t2 + t2) // tile_mode)
else:
print("tile_mode config error")
os._exit(233)
raise ValueError("[CCCV] tile_mode config error: invalid tile_mode value")
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), "reflect")
Expand Down Expand Up @@ -767,8 +765,7 @@ def forward(self, x, tile_mode, cache_mode, alpha, pro):
t4 = tile_mode * 4
crop_size = (((h0 - 1) // t4 * t4 + t4) // tile_mode, ((w0 - 1) // t4 * t4 + t4) // tile_mode)
else:
print("tile_mode config error")
os._exit(233)
raise ValueError("[CCCV] tile_mode config error: invalid tile_mode value")
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), "reflect")
Expand Down Expand Up @@ -916,8 +913,7 @@ def forward_gap_sync(self, x, tile_mode, alpha, pro):
t4 = tile_mode * 4
crop_size = (((h0 - 1) // t4 * t4 + t4) // tile_mode, ((w0 - 1) // t4 * t4 + t4) // tile_mode)
else:
print("tile_mode config error")
os._exit(233)
raise ValueError("[CCCV] tile_mode config error: invalid tile_mode value")
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), "reflect")
Expand Down Expand Up @@ -1162,8 +1158,7 @@ def forward(self, x, tile_mode, cache_mode, alpha, pro):
t2 = tile_mode * 2
crop_size = (((h0 - 1) // t2 * t2 + t2) // tile_mode, ((w0 - 1) // t2 * t2 + t2) // tile_mode)
else:
print("tile_mode config error")
os._exit(233)
raise ValueError("[CCCV] tile_mode config error: invalid tile_mode value")
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), "reflect")
Expand Down Expand Up @@ -1323,8 +1318,7 @@ def forward_gap_sync(self, x, tile_mode, alpha, pro):
t2 = tile_mode * 2
crop_size = (((h0 - 1) // t2 * t2 + t2) // tile_mode, ((w0 - 1) // t2 * t2 + t2) // tile_mode) # 5.6G
else:
print("tile_mode config error")
os._exit(233)
raise ValueError("[CCCV] tile_mode config error: invalid tile_mode value")
ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), "reflect")
Expand Down
8 changes: 5 additions & 3 deletions cccv/arch/vfi/drba_arch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# type: ignore
import warnings

import numpy as np
import torch
import torch.nn as nn
Expand Down Expand Up @@ -61,7 +63,7 @@ def inference(self, x, timestep=0.5, scale_list=None, fastmode=True, ensemble=Fa
torch.cat((img0[:, :3], img1[:, :3], f0, f1, timestep), 1), None, scale=scale_list[i]
)
if ensemble:
print("warning: ensemble is not supported since RIFEv4.21")
warnings.warn("[CCCV] ensemble is not supported since RIFEv4.21", stacklevel=2)
else:
wf0 = warp(f0, flow[:, :2])
wf1 = warp(f1, flow[:, 2:4])
Expand All @@ -71,7 +73,7 @@ def inference(self, x, timestep=0.5, scale_list=None, fastmode=True, ensemble=Fa
scale=scale_list[i],
)
if ensemble:
print("warning: ensemble is not supported since RIFEv4.21")
warnings.warn("[CCCV] ensemble is not supported since RIFEv4.21", stacklevel=2)
else:
mask = m0
flow = flow + fd
Expand All @@ -83,7 +85,7 @@ def inference(self, x, timestep=0.5, scale_list=None, fastmode=True, ensemble=Fa
mask = torch.sigmoid(mask)
merged[4] = warped_img0 * mask + warped_img1 * (1 - mask)
if not fastmode:
print("contextnet is removed")
warnings.warn("[CCCV] contextnet is removed", stacklevel=2)
"""
c0 = self.contextnet(img0, flow[:, :2])
c1 = self.contextnet(img1, flow[:, 2:4])
Expand Down
8 changes: 5 additions & 3 deletions cccv/arch/vfi/ifnet_arch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# type: ignore
import warnings

import torch
import torch.nn as nn
import torch.nn.functional as F
Expand Down Expand Up @@ -43,7 +45,7 @@ def forward(self, x, timestep=0.5, scale_list=None, fastmode=True, ensemble=Fals
torch.cat((img0[:, :3], img1[:, :3], f0, f1, timestep), 1), None, scale=scale_list[i]
)
if ensemble:
print("warning: ensemble is not supported since RIFEv4.21")
warnings.warn("[CCCV] ensemble is not supported since RIFEv4.21", stacklevel=2)
else:
wf0 = warp(f0, flow[:, :2])
wf1 = warp(f1, flow[:, 2:4])
Expand All @@ -53,7 +55,7 @@ def forward(self, x, timestep=0.5, scale_list=None, fastmode=True, ensemble=Fals
scale=scale_list[i],
)
if ensemble:
print("warning: ensemble is not supported since RIFEv4.21")
warnings.warn("[CCCV] ensemble is not supported since RIFEv4.21", stacklevel=2)
else:
mask = m0
flow = flow + fd
Expand All @@ -65,7 +67,7 @@ def forward(self, x, timestep=0.5, scale_list=None, fastmode=True, ensemble=Fals
mask = torch.sigmoid(mask)
merged[4] = warped_img0 * mask + warped_img1 * (1 - mask)
if not fastmode:
print("contextnet is removed")
warnings.warn("[CCCV] contextnet is removed", stacklevel=2)
"""
c0 = self.contextnet(img0, flow[:, :2])
c1 = self.contextnet(img1, flow[:, 2:4])
Expand Down
6 changes: 3 additions & 3 deletions cccv/arch/vfi/vfi_utils/softsplat.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def cuda_kernel(strFunction: str, strKernel: str, objVariables: typing.Dict):
strKey += str(objValue.stride())

elif True:
print(strVariable, type(objValue))
print(f"[CCCV] {strVariable}, {type(objValue)}")
Comment on lines 63 to +64

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

This elif True: acts as a catch-all that simply prints information about unhandled types. This can hide bugs or unsupported cases. It would be more robust to raise an exception for unexpected types to ensure the program fails fast and provides a clear error.

Suggested change
elif True:
print(strVariable, type(objValue))
print(f"[CCCV] {strVariable}, {type(objValue)}")
else:
raise TypeError(f"Unsupported type for cuda_kernel variable '{strVariable}': {type(objValue)}")


# end
# end
Expand Down Expand Up @@ -106,10 +106,10 @@ def cuda_kernel(strFunction: str, strKernel: str, objVariables: typing.Dict):
strKernel = strKernel.replace("{{type}}", "long")

elif isinstance(objValue, torch.Tensor):
print(strVariable, objValue.dtype)
print(f"[CCCV] {strVariable}, {objValue.dtype}")

elif True:
print(strVariable, type(objValue))
print(f"[CCCV] {strVariable}, {type(objValue)}")
Comment on lines 108 to +112

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

Similar to the previous comment, these elif blocks handle unexpected tensor dtypes and other types by printing to the console. This can mask errors. It's better to raise a TypeError to explicitly signal that an unsupported type was passed to the function.

Suggested change
elif isinstance(objValue, torch.Tensor):
print(strVariable, objValue.dtype)
print(f"[CCCV] {strVariable}, {objValue.dtype}")
elif True:
print(strVariable, type(objValue))
print(f"[CCCV] {strVariable}, {type(objValue)}")
elif isinstance(objValue, torch.Tensor):
raise TypeError(f"Unsupported tensor dtype for cuda_kernel variable '{strVariable}': {objValue.dtype}")
else:
raise TypeError(f"Unsupported type for cuda_kernel variable '{strVariable}': {type(objValue)}")


# end
# end
Expand Down
87 changes: 71 additions & 16 deletions cccv/auto/config.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,87 @@
import importlib.util
import json
import warnings
from pathlib import Path
from typing import Any, Optional, Union

from cccv.config import CONFIG_REGISTRY, BaseConfig
from cccv.config import CONFIG_REGISTRY, AutoBaseConfig
from cccv.type import ConfigType
from cccv.util.remote import git_clone


class AutoConfig:
@staticmethod
def from_pretrained(
pretrained_model_name: Union[ConfigType, str],
pretrained_model_name_or_path: Union[ConfigType, str, Path],
*,
model_dir: Optional[Union[Path, str]] = None,
**kwargs: Any,
) -> Any:
"""
Get a config instance of a pretrained model configuration.
Get a config instance of a pretrained model configuration, can be a registered config name or a local path or a git url.

:param pretrained_model_name: The name of the pretrained model configuration
:param pretrained_model_name_or_path:
:param model_dir: The path to cache the downloaded model configuration. Should be a full path. If None, use default cache path.
:return:
"""
Comment on lines 20 to 26

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The new functionality to load from a path or URL is great! To help users adopt it, it would be very beneficial to expand this docstring to describe the expected directory structure. For example:

  • The path should point to a directory.
  • The directory must contain a config.json file.
  • config.json must have name, arch, and model keys.
  • Any custom Python code (e.g., for architectures or models) should be in .py files in that directory to be auto-registered.
  • The model weights file path is determined by the path key in config.json. If path is omitted, it defaults to a file in the same directory with a name matching the name key.

return CONFIG_REGISTRY.get(pretrained_model_name)
if "pretrained_model_name" in kwargs:
warnings.warn(
"[CCCV] 'pretrained_model_name' is deprecated, please use 'pretrained_model_name_or_path' instead.",
DeprecationWarning,
stacklevel=2,
)
pretrained_model_name_or_path = kwargs.pop("pretrained_model_name")

@staticmethod
def register(config: Union[BaseConfig, Any], name: Optional[str] = None) -> None:
"""
Register the given config class instance under the name BaseConfig.name or the given name.
Can be used as a function call. See docstring of this class for usage.
# 1. check if it's a registered config name, early return if found
if isinstance(pretrained_model_name_or_path, ConfigType):
pretrained_model_name_or_path = pretrained_model_name_or_path.value
if str(pretrained_model_name_or_path) in CONFIG_REGISTRY:
return CONFIG_REGISTRY.get(str(pretrained_model_name_or_path))

:param config: The config class instance to register
:param name: The name to register the config class instance under. If None, use BaseConfig.name
:return:
"""
# used as a function call
CONFIG_REGISTRY.register(obj=config, name=name)
# 2. check is a url or not, if it's a url, git clone it to model_dir then replace pretrained_model_name_or_path with the local path (Path)
if str(pretrained_model_name_or_path).startswith("http"):
pretrained_model_name_or_path = git_clone(
git_url=str(pretrained_model_name_or_path),
model_dir=model_dir,
**kwargs,
)

# 3. check if it's a real path
dir_path = Path(str(pretrained_model_name_or_path))

if not dir_path.exists() or not dir_path.is_dir():
raise ValueError(f"[CCCV] model configuration '{dir_path}' is not a valid config name or path")

# load config,json from the directory
config_path = dir_path / "config.json"
# check if config.json exists
if not config_path.exists():
raise FileNotFoundError(f"[CCCV] no valid config.json not found in {dir_path}")

with open(config_path, "r", encoding="utf-8") as f:
config_dict = json.load(f)

for k in ["arch", "model", "name"]:
if k not in config_dict:
raise KeyError(
f"[CCCV] no key '{k}' in config.json in {dir_path}, you should provide a valid config.json contain a key '{k}'"
)

# auto import all .py files in the directory to register the arch, model and config
try:
for py_file in dir_path.glob("*.py"):
spec = importlib.util.spec_from_file_location(py_file.stem, py_file)
if spec is None or spec.loader is None:
continue
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
except Exception as e:
raise ImportError(f"[CCCV] failed register model from {dir_path}, error: {e}, please check your .py files")

if "path" not in config_dict or config_dict["path"] is None or config_dict["path"] == "":
# add the path to the config_dict
config_dict["path"] = str(dir_path / config_dict["name"])

# convert config_dict to pydantic model
cfg = AutoBaseConfig.model_validate(config_dict)
return cfg
Loading
Loading