From d9786f368eb59d5a55996cfb7767451752aafa72 Mon Sep 17 00:00:00 2001
From: syvalamarek <72654834+syvalamarek@users.noreply.github.com>
Date: Sun, 1 Mar 2026 13:52:44 +0100
Subject: [PATCH 1/5] feat: Introduce custom catalog proxy endpoints and ID
resolution, and add new documentation files.
---
comet/api/app.py | 12 +-
comet/api/endpoints/custom_catalog.py | 227 ++++++++++++++++++++++++++
comet/api/endpoints/manifest.py | 73 +++++++++
comet/api/endpoints/stream.py | 77 +++++++--
comet/core/models.py | 50 +++++-
comet/templates/index.html | 127 ++++++++++++++
6 files changed, 542 insertions(+), 24 deletions(-)
create mode 100644 comet/api/endpoints/custom_catalog.py
diff --git a/comet/api/app.py b/comet/api/app.py
index a7dbf9f0..85a1dca8 100644
--- a/comet/api/app.py
+++ b/comet/api/app.py
@@ -10,7 +10,7 @@
from starlette.requests import Request
from comet.api.endpoints import (admin, base, chilllink, cometnet, cometnet_ui,
- config, debrid_sync, kodi, manifest, playback)
+ config, custom_catalog, debrid_sync, kodi, manifest, playback)
from comet.api.endpoints import stream as streams_router
from comet.background_scraper.worker import background_scraper
from comet.cometnet.manager import init_cometnet_service
@@ -80,7 +80,8 @@ async def lifespan(app: FastAPI):
if settings.BACKGROUND_SCRAPER_ENABLED:
background_scraper.clear_finished_task()
if not background_scraper.task:
- background_scraper.task = asyncio.create_task(background_scraper.start())
+ background_scraper.task = asyncio.create_task(
+ background_scraper.start())
# Start DMM Ingester if enabled
dmm_ingester_task = None
@@ -108,8 +109,10 @@ async def lifespan(app: FastAPI):
# Set callback to save torrents received from the network
cometnet_service.set_save_torrent_callback(save_torrent_from_network)
- cometnet_service.set_check_torrent_exists_callback(check_torrent_exists)
- cometnet_service.set_check_torrents_exist_callback(check_torrents_exist)
+ cometnet_service.set_check_torrent_exists_callback(
+ check_torrent_exists)
+ cometnet_service.set_check_torrents_exist_callback(
+ check_torrents_exist)
await cometnet_service.start()
# Start indexer manager
@@ -231,6 +234,7 @@ async def lifespan(app: FastAPI):
debrid_sync.router,
streams_router.streams,
chilllink.router,
+ custom_catalog.router,
)
for stremio_router in stremio_routers:
diff --git a/comet/api/endpoints/custom_catalog.py b/comet/api/endpoints/custom_catalog.py
new file mode 100644
index 00000000..91a4ace2
--- /dev/null
+++ b/comet/api/endpoints/custom_catalog.py
@@ -0,0 +1,227 @@
+"""
+Custom catalog proxy endpoints.
+
+Handles:
+- GET /{b64config}/catalog/{type}/{id}.json
+- GET /{b64config}/catalog/{type}/{id}/{extra:path}.json
+
+Catalog IDs with the pattern `cstm{idx}_{prefix}_{type}` are proxied
+to the user-configured custom catalog addon URL.
+
+Also exposes a helper `resolve_custom_prefix_to_imdb` used by stream.py
+to convert custom-prefix IDs (e.g. csfd12345) into IMDB IDs.
+"""
+
+import asyncio
+from typing import Optional
+
+import aiohttp
+from fastapi import APIRouter
+from fastapi.responses import JSONResponse
+from loguru import logger
+
+from comet.core.config_validation import config_check
+from comet.utils.http_client import http_client_manager
+
+router = APIRouter()
+
+
+# ---------------------------------------------------------------------------
+# Internal HTTP helper
+# ---------------------------------------------------------------------------
+
+async def _fetch_json(url: str, timeout: int = 15) -> Optional[dict]:
+ try:
+ session = await http_client_manager.get_session()
+ async with session.get(
+ url,
+ timeout=__import__("aiohttp").ClientTimeout(total=timeout),
+ headers={"Accept": "application/json"},
+ ) as resp:
+ if resp.status == 200:
+ data = await resp.json(content_type=None)
+ logger.info(f"Custom catalog: fetch success for {url}")
+ return data
+ logger.warning(f"Custom catalog: HTTP {resp.status} for {url}")
+ try:
+ text = await resp.text()
+ logger.warning(f"Custom catalog err body: {text}")
+ except Exception:
+ pass
+ except asyncio.TimeoutError:
+ logger.warning(f"Custom catalog: timeout fetching {url}")
+ except Exception as e:
+ logger.warning(f"Custom catalog: error fetching {url}: {e}")
+ return None
+
+
+# ---------------------------------------------------------------------------
+# Helper: resolve custom-prefix ID → IMDB ID
+# ---------------------------------------------------------------------------
+
+async def resolve_custom_prefix_to_imdb(
+ media_type: str,
+ media_id: str,
+ custom_catalogs: list,
+ timeout: int = 15,
+) -> tuple[Optional[str], Optional[dict]]:
+ """
+ For a media_id whose prefix matches one of the user's customCatalogs,
+ call /meta/{type}/{media_id}.json on the corresponding addon URL and
+ attempt to extract an IMDB ID from the response.
+
+ Returns ``(imdb_id, meta_dict)``. If IMDB ID is not found, imdb_id is None,
+ but meta_dict might still contain title/year for fallback scraping.
+ """
+ matched_url: Optional[str] = None
+ for entry in custom_catalogs or []:
+ prefix = (entry.get("prefix") or "").strip()
+ url = (entry.get("url") or "").strip().rstrip("/")
+ if prefix and url and media_id.startswith(prefix):
+ matched_url = url
+ break
+
+ if not matched_url:
+ logger.warning(
+ f"Custom catalog plugin logic: NO MATCHED URL for {media_id}")
+ return None, None
+
+ # Stremio uses IDs like prefix123:1:2 for series streams, but custom catalogs
+ # usually only respond to the base ID (e.g. prefix123) for meta endpoints.
+ base_id = media_id.split(":")[0]
+
+ meta_url = f"{matched_url}/meta/{media_type}/{base_id}.json"
+ logger.info(f"Custom catalog: requesting IMDB resolution from {meta_url}")
+ data = await _fetch_json(meta_url, timeout)
+ if not data:
+ logger.warning(
+ f"Custom catalog: fetch returned empty/none for {meta_url}")
+ return None, None
+
+ meta = data.get("meta") or {}
+ logger.info(f"Custom catalog: received meta keys = {list(meta.keys())}")
+
+ # Try common locations for IMDB ID – adjust to actual API response structure
+ for candidate in [
+ meta.get("imdbId"),
+ meta.get("imdb"),
+ meta.get("tt"),
+ (meta.get("externalIds") or {}).get("imdb"),
+ (meta.get("externalIds") or {}).get("imdbId"),
+ (meta.get("filmOverviewOut") or {}).get("imdbId"),
+ ((meta.get("filmOverviewOut") or {}).get("externalIds") or {}).get("imdb"),
+ ]:
+ if candidate and str(candidate).startswith("tt"):
+ return str(candidate), meta
+
+ return None, meta
+
+
+# ---------------------------------------------------------------------------
+# Catalog proxy endpoints
+# ---------------------------------------------------------------------------
+
+def _parse_catalog_id(catalog_id: str) -> Optional[tuple]:
+ """
+ Parse a catalog ID of the form ``cstm{idx}_{prefix}_{type}``.
+ Returns ``(idx, prefix, catalog_type)`` or ``None``.
+ """
+ if not catalog_id.startswith("cstm"):
+ return None
+ rest = catalog_id[4:] # strip "cstm"
+ try:
+ underscore_pos = rest.index("_")
+ idx = int(rest[:underscore_pos])
+ remainder = rest[underscore_pos + 1:]
+ # remainder is "{prefix}_{type}" – split at the *last* underscore
+ # because prefix itself may not contain underscores and type is
+ # always the rightmost segment.
+ last_underscore = remainder.rfind("_")
+ if last_underscore < 0:
+ return None
+ prefix = remainder[:last_underscore]
+ cat_type = remainder[last_underscore + 1:]
+ if not prefix or not cat_type:
+ return None
+ return idx, prefix, cat_type
+ except (ValueError, IndexError):
+ return None
+
+
+async def _handle_catalog(
+ b64config: str,
+ catalog_type: str,
+ catalog_id: str,
+ extra: str,
+) -> JSONResponse:
+ parsed = _parse_catalog_id(catalog_id)
+ if not parsed:
+ return JSONResponse({"metas": []})
+
+ idx, prefix, _declared_type = parsed
+
+ config = config_check(b64config, strict_b64config=False)
+ if not config:
+ return JSONResponse({"metas": []})
+
+ custom_catalogs = config.get("customCatalogs") or []
+ if idx >= len(custom_catalogs):
+ logger.warning(
+ f"Custom catalog: index {idx} out of range for user config")
+ return JSONResponse({"metas": []})
+
+ entry = custom_catalogs[idx]
+ base_url = (entry.get("url") or "").strip().rstrip("/")
+ entry_prefix = (entry.get("prefix") or "").strip()
+
+ if not base_url or not entry_prefix:
+ return JSONResponse({"metas": []})
+
+ # Safety: verify prefix still matches what is stored in user config
+ if entry_prefix != prefix:
+ logger.warning(
+ f"Custom catalog: prefix mismatch: config has {entry_prefix!r}, "
+ f"catalog_id implies {prefix!r}"
+ )
+ return JSONResponse({"metas": []})
+
+ # The original catalog ID on the remote addon is constructed from the prefix
+ # and the requested catalog type. The manifest endpoint registers catalogs
+ # using the pattern ``cstm{idx}_{prefix}_{type}`` which maps to
+ # ``{prefix}_{catalog_type}`` on the upstream addon.
+ original_catalog_id = f"{prefix}_{catalog_type}"
+ if extra:
+ proxy_url = f"{base_url}/catalog/{catalog_type}/{original_catalog_id}/{extra}.json"
+ else:
+ proxy_url = f"{base_url}/catalog/{catalog_type}/{original_catalog_id}.json"
+
+ data = await _fetch_json(proxy_url)
+ if data and isinstance(data.get("metas"), list):
+ return JSONResponse(
+ content=data,
+ headers={"Access-Control-Allow-Origin": "*"},
+ )
+ return JSONResponse(
+ content={"metas": []},
+ headers={"Access-Control-Allow-Origin": "*"},
+ )
+
+
+@router.get(
+ "/{b64config}/catalog/{catalog_type}/{catalog_id}.json",
+ tags=["Stremio"],
+ summary="Custom Catalog Proxy",
+ description="Proxies catalog requests to user-configured external Stremio catalog addons.",
+)
+async def catalog(b64config: str, catalog_type: str, catalog_id: str):
+ return await _handle_catalog(b64config, catalog_type, catalog_id, extra="")
+
+
+@router.get(
+ "/{b64config}/catalog/{catalog_type}/{catalog_id}/{extra:path}.json",
+ tags=["Stremio"],
+ summary="Custom Catalog Proxy (with extra)",
+ description="Proxies catalog requests with extra params (search, skip, genre…) to external catalog addons.",
+)
+async def catalog_with_extra(b64config: str, catalog_type: str, catalog_id: str, extra: str):
+ return await _handle_catalog(b64config, catalog_type, catalog_id, extra=extra)
diff --git a/comet/api/endpoints/manifest.py b/comet/api/endpoints/manifest.py
index 294c6d59..3d74defe 100644
--- a/comet/api/endpoints/manifest.py
+++ b/comet/api/endpoints/manifest.py
@@ -9,6 +9,51 @@
router = APIRouter()
+# ID prefixes that are always handled natively (not via custom catalogs)
+_BUILTIN_PREFIXES = {"tt", "kitsu"}
+
+
+def _build_custom_catalog_manifest(custom_catalogs: list) -> tuple[list, list]:
+ """
+ Given a user's customCatalogs config (list of {url, prefix} dicts),
+ return (stremio_catalogs, extra_id_prefixes).
+
+ stremio_catalogs – catalog entries to include in the manifest
+ extra_id_prefixes – additional idPrefixes to advertise so Stremio sends
+ stream requests for IDs with those prefixes to Comet
+ """
+ stremio_catalogs = []
+ extra_prefixes = []
+
+ seen_prefixes = set()
+ for idx, entry in enumerate(custom_catalogs or []):
+ url = (entry.get("url") or "").strip().rstrip("/")
+ prefix = (entry.get("prefix") or "").strip()
+ if not url or not prefix:
+ continue
+ if prefix in _BUILTIN_PREFIXES:
+ continue # never override built-ins
+
+ # One search-style catalog per custom addon (minimal; Stremio will
+ # discover via the addon's own manifest, but we still expose it so
+ # the user can search from the Comet manifest).
+ stremio_catalogs.append({
+ "type": "movie",
+ "id": f"cstm{idx}_{prefix}_movie",
+ "name": f"Custom ({prefix})",
+ })
+ stremio_catalogs.append({
+ "type": "series",
+ "id": f"cstm{idx}_{prefix}_series",
+ "name": f"Custom ({prefix})",
+ })
+
+ if prefix not in seen_prefixes:
+ extra_prefixes.append(prefix)
+ seen_prefixes.add(prefix)
+
+ return stremio_catalogs, extra_prefixes
+
@router.get(
"/manifest.json",
@@ -51,6 +96,34 @@ async def manifest(request: Request, b64config: str = None):
base_manifest["name"] = build_addon_name(settings.ADDON_NAME, config)
+ # Inject custom catalog entries and extra idPrefixes from user config
+ custom_catalogs_cfg = config.get("customCatalogs") or []
+ if custom_catalogs_cfg:
+ stremio_catalogs, extra_prefixes = _build_custom_catalog_manifest(
+ custom_catalogs_cfg)
+ if stremio_catalogs:
+ base_manifest["catalogs"] = stremio_catalogs
+ # Add "catalog" to resources if not already present
+ resource_names = [
+ r["name"] if isinstance(r, dict) else r
+ for r in base_manifest["resources"]
+ ]
+ if "catalog" not in resource_names:
+ base_manifest["resources"].append("catalog")
+
+ if extra_prefixes:
+ # Extend the stream resource's idPrefixes
+ stream_resource = next(
+ (r for r in base_manifest["resources"] if isinstance(
+ r, dict) and r.get("name") == "stream"),
+ None,
+ )
+ if stream_resource:
+ existing = stream_resource.get("idPrefixes", [])
+ stream_resource["idPrefixes"] = existing + [
+ p for p in extra_prefixes if p not in existing
+ ]
+
if settings.HTTP_CACHE_ENABLED:
etag = generate_etag(base_manifest)
if check_etag_match(request, etag):
diff --git a/comet/api/endpoints/stream.py b/comet/api/endpoints/stream.py
index d2cd0ec3..c5b8edf3 100644
--- a/comet/api/endpoints/stream.py
+++ b/comet/api/endpoints/stream.py
@@ -3,6 +3,7 @@
from urllib.parse import quote
from fastapi import APIRouter, BackgroundTasks, Request
+from typing import Optional
from comet.core.config_validation import config_check
from comet.core.logger import logger
@@ -23,6 +24,7 @@
from comet.utils.cache import (CachedJSONResponse, CachePolicies,
check_etag_match, generate_etag,
not_modified_response)
+from comet.api.endpoints.custom_catalog import resolve_custom_prefix_to_imdb
from comet.utils.formatting import (format_chilllink, format_title,
get_formatted_components,
get_formatted_components_plain)
@@ -254,7 +256,8 @@ async def background_scrape(
f"📥 Background scrape complete for {media_id}!",
)
except Exception as e:
- logger.log("SCRAPER", f"❌ Background scrape failed for {media_id}: {e}")
+ logger.log(
+ "SCRAPER", f"❌ Background scrape failed for {media_id}: {e}")
finally:
await scrape_lock.release()
@@ -291,7 +294,8 @@ async def check_service(entry):
for result in results:
if isinstance(result, Exception):
- logger.log("DEBRID", f"❌ Error checking availability: {result}")
+ logger.log(
+ "DEBRID", f"❌ Error checking availability: {result}")
continue
service, cached_hashes = result
for info_hash in cached_hashes:
@@ -421,7 +425,8 @@ async def stream(
enable_torrent = config["_enableTorrent"]
deduplicate_streams = config["deduplicateStreams"]
scrape_debrid_account_torrents = config["scrapeDebridAccountTorrents"]
- use_account_scrape = bool(debrid_entries and scrape_debrid_account_torrents)
+ use_account_scrape = bool(
+ debrid_entries and scrape_debrid_account_torrents)
response_cache_policy = CachePolicies.no_cache() if use_account_scrape else None
def _stream_response(content: dict, is_empty: bool = False):
@@ -447,6 +452,43 @@ def _stream_response(content: dict, is_empty: bool = False):
session = await http_client_manager.get_session()
metadata_scraper = MetadataScraper(session)
+ # Resolve custom-prefix IDs (e.g. csfd12345 → tt0111161)
+ custom_catalogs = config.get("customCatalogs") or []
+ custom_meta_title: Optional[str] = None
+ custom_meta_year: Optional[int] = None
+
+ if custom_catalogs:
+ # Check if media_id starts with any user-configured custom prefix
+ for _entry in custom_catalogs:
+ _prefix = (_entry.get("prefix") or "").strip()
+ if _prefix and media_id.startswith(_prefix):
+ resolved_id, resolved_meta = await resolve_custom_prefix_to_imdb(
+ media_type, media_id, custom_catalogs
+ )
+ if resolved_id:
+ logger.log(
+ "SCRAPER", f"Custom prefix: resolved {media_id} → {resolved_id}")
+ media_id = resolved_id
+ elif resolved_meta and resolved_meta.get("name"):
+ logger.log(
+ "SCRAPER", f"Custom prefix: no IMDB ID for {media_id}, but found metadata.")
+ custom_meta_title = resolved_meta.get("name")
+
+ # Parse year from "2026-2026" or "2026" or "2026-"
+ year_str = str(resolved_meta.get("year")
+ or resolved_meta.get("releaseInfo") or "")
+ if year_str:
+ # try to get the first 4 digits
+ import re
+ match = re.search(r'\d{4}', year_str)
+ if match:
+ custom_meta_year = int(match.group(0))
+ else:
+ logger.warning(
+ f"Custom prefix: could not resolve {media_id} to anything, returning empty streams")
+ return _stream_response({"streams": []}, is_empty=True)
+ break
+
id, season, episode = parse_media_id(media_type, media_id)
if settings.DIGITAL_RELEASE_FILTER:
@@ -455,7 +497,8 @@ def _stream_response(content: dict, is_empty: bool = False):
)
if not is_released:
- logger.log("FILTER", f"🚫 {media_id} is not released yet. Skipping.")
+ logger.log(
+ "FILTER", f"🚫 {media_id} is not released yet. Skipping.")
return _stream_response(
{
"streams": [
@@ -471,9 +514,17 @@ def _stream_response(content: dict, is_empty: bool = False):
is_empty=True,
)
- metadata, aliases = await metadata_scraper.fetch_metadata_and_aliases(
- media_type, media_id, id, season, episode
- )
+ if custom_meta_title and custom_meta_year:
+ metadata, aliases = await metadata_scraper.fetch_aliases_with_metadata(
+ media_type, media_id, custom_meta_title, custom_meta_year, None, id
+ )
+ if metadata is not None:
+ metadata["season"] = season
+ metadata["episode"] = episode
+ else:
+ metadata, aliases = await metadata_scraper.fetch_metadata_and_aliases(
+ media_type, media_id, id, season, episode
+ )
if metadata is None:
logger.log("SCRAPER", f"❌ Failed to fetch metadata for {media_id}")
@@ -728,7 +779,8 @@ def _wait_response():
existing_service_cache_status = await check_multi_service_availability(
debrid_entries, torrent_manager.torrents, search_season, search_episode
)
- _merge_service_cache_status(service_cache_status, existing_service_cache_status)
+ _merge_service_cache_status(
+ service_cache_status, existing_service_cache_status)
_merge_service_cache_status(
verified_service_cache_status, existing_service_cache_status
)
@@ -779,7 +831,8 @@ def _wait_response():
search_episode,
ip,
)
- _merge_service_cache_status(service_cache_status, fresh_service_cache_status)
+ _merge_service_cache_status(
+ service_cache_status, fresh_service_cache_status)
for service, error in debrid_errors.items():
cached_results.append(
@@ -906,7 +959,8 @@ def _wait_response():
config["resultFormat"],
)
formatted_title = format_title_fn(formatted_components)
- kodi_meta = _build_kodi_meta(rtn_data, formatted_components) if kodi else None
+ kodi_meta = _build_kodi_meta(
+ rtn_data, formatted_components) if kodi else None
info_hash_cache_status = service_cache_status.get(info_hash)
quoted_torrent_title = quote(torrent_title)
@@ -1005,7 +1059,8 @@ def _wait_response():
}
if chilllink:
- the_stream["_chilllink"] = format_chilllink(formatted_components, False)
+ the_stream["_chilllink"] = format_chilllink(
+ formatted_components, False)
if torrent.get("fileIndex") is not None:
the_stream["fileIdx"] = torrent["fileIndex"]
diff --git a/comet/core/models.py b/comet/core/models.py
index 028201a0..0f1be6c1 100644
--- a/comet/core/models.py
+++ b/comet/core/models.py
@@ -92,9 +92,11 @@ class AppSettings(BaseSettings):
SCRAPE_NEKOBT: Union[bool, str] = False
NEKOBT_ANIME_ONLY: Optional[bool] = True
SCRAPE_ZILEAN: Union[bool, str] = False
- ZILEAN_URL: Union[str, List[str]] = "https://zileanfortheweebs.midnightignite.me"
+ ZILEAN_URL: Union[str, List[str]
+ ] = "https://zileanfortheweebs.midnightignite.me"
SCRAPE_STREMTHRU: Union[bool, str] = False
- STREMTHRU_SCRAPE_URL: Union[str, List[str]] = "https://stremthru.13377001.xyz"
+ STREMTHRU_SCRAPE_URL: Union[str, List[str]
+ ] = "https://stremthru.13377001.xyz"
SCRAPE_DMM: Union[bool, str] = False
DMM_INGEST_ENABLED: Optional[bool] = False
DMM_INGEST_INTERVAL: Optional[int] = 86400
@@ -109,7 +111,8 @@ class AppSettings(BaseSettings):
SCRAPE_TORRENTIO: Union[bool, str] = False
TORRENTIO_URL: Union[str, List[str]] = "https://torrentio.strem.fun"
SCRAPE_MEDIAFUSION: Union[bool, str] = False
- MEDIAFUSION_URL: Union[str, List[str]] = "https://mediafusion.elfhosted.com"
+ MEDIAFUSION_URL: Union[str, List[str]
+ ] = "https://mediafusion.elfhosted.com"
MEDIAFUSION_API_PASSWORD: Union[str, List[str], None] = None
MEDIAFUSION_LIVE_SEARCH: Optional[bool] = True
SCRAPE_AIOSTREAMS: Union[bool, str] = False
@@ -251,14 +254,16 @@ class AppSettings(BaseSettings):
10000.0 # Max acceptable latency before disconnection
)
COMETNET_TRANSPORT_RATE_LIMIT_ENABLED: Optional[bool] = True
- COMETNET_TRANSPORT_RATE_LIMIT_COUNT: Optional[int] = 20 # Messages per window
+ # Messages per window
+ COMETNET_TRANSPORT_RATE_LIMIT_COUNT: Optional[int] = 20
COMETNET_TRANSPORT_RATE_LIMIT_WINDOW: Optional[float] = 1.0 # Seconds
# CometNet Reputation Tuning
COMETNET_REPUTATION_INITIAL: Optional[float] = 100.0
COMETNET_REPUTATION_MIN: Optional[float] = 0.0
COMETNET_REPUTATION_MAX: Optional[float] = 10000.0
- COMETNET_REPUTATION_THRESHOLD_UNTRUSTED: Optional[float] = 50.0 # Ban threshold
+ # Ban threshold
+ COMETNET_REPUTATION_THRESHOLD_UNTRUSTED: Optional[float] = 50.0
COMETNET_REPUTATION_THRESHOLD_TRUSTED: Optional[float] = (
1000.0 # Trust threshold (approx 1 day of heavy scraping)
)
@@ -450,7 +455,8 @@ def _resolve_persisted_token(
except FileNotFoundError:
pass
except Exception as error:
- logger.warning(f"Failed to read {token_name}_FILE ({token_file}): {error}")
+ logger.warning(
+ f"Failed to read {token_name}_FILE ({token_file}): {error}")
generated_token = secrets.token_urlsafe(32)
if not token_file:
@@ -483,7 +489,8 @@ def _resolve_persisted_token(
) from last_read_error
raise RuntimeError(error_context) from None
except Exception as error:
- logger.error(f"Failed to persist {token_name}_FILE ({token_file}): {error}")
+ logger.error(
+ f"Failed to persist {token_name}_FILE ({token_file}): {error}")
raise
@@ -1002,6 +1009,30 @@ class ConfigModel(BaseModel):
rtnSettings: Optional[CometSettingsModel] = rtn_settings_default
rtnRanking: Optional[DefaultRanking] = rtn_ranking_default
+ # Custom catalog addons configured per-user via the configure page
+ # Each entry: {"url": "https://...", "prefix": "csfd"}
+ # The prefix is used to route stream requests to the correct catalog addon.
+ # The "tt" prefix is always handled by Cinemeta (built-in).
+ customCatalogs: Optional[List[dict]] = []
+
+ @field_validator("customCatalogs", mode="before")
+ @classmethod
+ def validate_custom_catalogs(cls, v):
+ if not v:
+ return []
+ if not isinstance(v, list):
+ return []
+ sanitized = []
+ for entry in v:
+ if not isinstance(entry, dict):
+ continue
+ url = str(entry.get("url") or "").strip().rstrip("/")
+ prefix = str(entry.get("prefix") or "").strip()
+ # Skip entries with missing data or that would override built-in prefixes
+ if url and prefix and prefix not in ("tt", "kitsu"):
+ sanitized.append({"url": url, "prefix": prefix})
+ return sanitized
+
@field_validator("maxResultsPerResolution")
def check_max_results_per_resolution(cls, v):
if not isinstance(v, int):
@@ -1032,7 +1063,8 @@ def validate_debrid_services(cls, v):
return []
if isinstance(v, list):
return [
- DebridServiceEntry(**entry) if isinstance(entry, dict) else entry
+ DebridServiceEntry(
+ **entry) if isinstance(entry, dict) else entry
for entry in v
]
return v
@@ -1065,7 +1097,7 @@ def _build_database_instance(raw_url: str):
for scheme in ["postgresql://", "postgres://"]:
if raw_url.startswith(scheme):
- raw_url = raw_url[len(scheme) :]
+ raw_url = raw_url[len(scheme):]
break
return Database(f"postgresql+asyncpg://{raw_url}")
diff --git a/comet/templates/index.html b/comet/templates/index.html
index 5739f054..70527de0 100644
--- a/comet/templates/index.html
+++ b/comet/templates/index.html
@@ -517,6 +517,35 @@
>
+
+