From 27da4cd8f73126239c81df3178141f2c19245fab Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Thu, 20 Nov 2025 13:48:36 -0500 Subject: [PATCH] fix: remove parts of trace_protocol and telemetry that were overlooked --- .../k8s-benchmark/stack-configmap.yaml | 7 ----- .../distributions/k8s/stack-configmap.yaml | 7 ----- .../self_hosted_distro/starter.md | 4 --- .../references/python_sdk_reference/index.md | 26 ------------------- pyproject.toml | 1 - .../openapi_generator/schema_collection.py | 21 --------------- .../ci-tests/run-with-postgres-store.yaml | 2 -- .../distributions/ci-tests/run.yaml | 2 -- .../distributions/dell/run-with-safety.yaml | 2 -- src/llama_stack/distributions/dell/run.yaml | 2 -- .../meta-reference-gpu/run-with-safety.yaml | 2 -- .../distributions/meta-reference-gpu/run.yaml | 2 -- .../distributions/nvidia/run-with-safety.yaml | 2 -- src/llama_stack/distributions/nvidia/run.yaml | 2 -- src/llama_stack/distributions/oci/run.yaml | 2 -- .../distributions/open-benchmark/run.yaml | 2 -- .../distributions/postgres-demo/run.yaml | 2 -- .../starter-gpu/run-with-postgres-store.yaml | 2 -- .../distributions/starter-gpu/run.yaml | 2 -- .../starter/run-with-postgres-store.yaml | 2 -- .../distributions/starter/run.yaml | 2 -- src/llama_stack/distributions/template.py | 3 --- .../distributions/watsonx/run.yaml | 2 -- src/llama_stack_api/common/tracing.py | 22 ---------------- src/llama_stack_api/conversations.py | 2 -- src/llama_stack_api/datatypes.py | 1 - src/llama_stack_api/files.py | 2 -- src/llama_stack_api/inference.py | 2 -- src/llama_stack_api/models.py | 2 -- src/llama_stack_api/prompts.py | 2 -- src/llama_stack_api/safety.py | 2 -- src/llama_stack_api/shields.py | 2 -- src/llama_stack_api/tools.py | 3 --- src/llama_stack_api/vector_io.py | 2 -- 34 files changed, 143 deletions(-) delete mode 100644 src/llama_stack_api/common/tracing.py diff --git a/benchmarking/k8s-benchmark/stack-configmap.yaml b/benchmarking/k8s-benchmark/stack-configmap.yaml index 58518ec18d..aed3b97c29 100644 --- a/benchmarking/k8s-benchmark/stack-configmap.yaml +++ b/benchmarking/k8s-benchmark/stack-configmap.yaml @@ -9,7 +9,6 @@ data: - inference - files - safety - - telemetry - tool_runtime - vector_io providers: @@ -67,12 +66,6 @@ data: db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=console} tool_runtime: - provider_id: brave-search provider_type: remote::brave-search diff --git a/docs/docs/distributions/k8s/stack-configmap.yaml b/docs/docs/distributions/k8s/stack-configmap.yaml index 255e39ac20..d0e083d297 100644 --- a/docs/docs/distributions/k8s/stack-configmap.yaml +++ b/docs/docs/distributions/k8s/stack-configmap.yaml @@ -8,7 +8,6 @@ data: - inference - files - safety - - telemetry - tool_runtime - vector_io providers: @@ -73,12 +72,6 @@ data: db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=console} tool_runtime: - provider_id: brave-search provider_type: remote::brave-search diff --git a/docs/docs/distributions/self_hosted_distro/starter.md b/docs/docs/distributions/self_hosted_distro/starter.md index 84c35f3d32..3d4c1fc1ac 100644 --- a/docs/docs/distributions/self_hosted_distro/starter.md +++ b/docs/docs/distributions/self_hosted_distro/starter.md @@ -116,10 +116,6 @@ The following environment variables can be configured: - `BRAVE_SEARCH_API_KEY`: Brave Search API key - `TAVILY_SEARCH_API_KEY`: Tavily Search API key -### Telemetry Configuration -- `OTEL_SERVICE_NAME`: OpenTelemetry service name -- `OTEL_EXPORTER_OTLP_ENDPOINT`: OpenTelemetry collector endpoint URL - ## Enabling Providers You can enable specific providers by setting appropriate environment variables. For example, diff --git a/docs/docs/references/python_sdk_reference/index.md b/docs/docs/references/python_sdk_reference/index.md index 6865674580..532341a4d9 100644 --- a/docs/docs/references/python_sdk_reference/index.md +++ b/docs/docs/references/python_sdk_reference/index.md @@ -360,32 +360,6 @@ Methods: - client.synthetic_data_generation.generate(\*\*params) -> SyntheticDataGenerationResponse -## Telemetry - -Types: - -```python -from llama_stack_client.types import ( - QuerySpansResponse, - SpanWithStatus, - Trace, - TelemetryGetSpanResponse, - TelemetryGetSpanTreeResponse, - TelemetryQuerySpansResponse, - TelemetryQueryTracesResponse, -) -``` - -Methods: - -- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse -- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse -- client.telemetry.get_trace(trace_id) -> Trace -- client.telemetry.log_event(\*\*params) -> None -- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse -- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse -- client.telemetry.save_spans_to_dataset(\*\*params) -> None - ## Datasetio Types: diff --git a/pyproject.toml b/pyproject.toml index 3e16dc08fa..654819e1c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -346,7 +346,6 @@ exclude = [ "^src/llama_stack/providers/utils/scoring/aggregation_utils\\.py$", "^src/llama_stack/providers/utils/scoring/base_scoring_fn\\.py$", "^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$", - "^src/llama_stack/providers/utils/telemetry/trace_protocol\\.py$", "^src/llama_stack/providers/utils/telemetry/tracing\\.py$", "^src/llama_stack/distributions/template\\.py$", ] diff --git a/scripts/openapi_generator/schema_collection.py b/scripts/openapi_generator/schema_collection.py index 51a70c62ad..127f6da9cc 100644 --- a/scripts/openapi_generator/schema_collection.py +++ b/scripts/openapi_generator/schema_collection.py @@ -8,7 +8,6 @@ Schema discovery and collection for OpenAPI generation. """ -import importlib from typing import Any @@ -20,23 +19,6 @@ def _ensure_components_schemas(openapi_schema: dict[str, Any]) -> None: openapi_schema["components"]["schemas"] = {} -def _load_extra_schema_modules() -> None: - """ - Import modules outside llama_stack_api that use schema_utils to register schemas. - - The API package already imports its submodules via __init__, but server-side modules - like telemetry need to be imported explicitly so their decorator side effects run. - """ - extra_modules = [ - "llama_stack.core.telemetry.telemetry", - ] - for module_name in extra_modules: - try: - importlib.import_module(module_name) - except ImportError: - continue - - def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]) -> None: """ Extract $defs from a schema, move them to components/schemas, and fix references. @@ -79,9 +61,6 @@ def _ensure_json_schema_types_included(openapi_schema: dict[str, Any]) -> dict[s iter_registered_schema_types, ) - # Import extra modules (e.g., telemetry) whose schema registrations live outside llama_stack_api - _load_extra_schema_modules() - # Handle explicitly registered schemas first (union types, Annotated structs, etc.) for registration_info in iter_registered_schema_types(): schema_type = registration_info.type diff --git a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml index d942c23a4e..8d270aea66 100644 --- a/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/ci-tests/run-with-postgres-store.yaml @@ -281,8 +281,6 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true vector_stores: default_provider_id: faiss default_embedding_model: diff --git a/src/llama_stack/distributions/ci-tests/run.yaml b/src/llama_stack/distributions/ci-tests/run.yaml index 8b1cd2bb25..2c628fbf9b 100644 --- a/src/llama_stack/distributions/ci-tests/run.yaml +++ b/src/llama_stack/distributions/ci-tests/run.yaml @@ -272,8 +272,6 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true vector_stores: default_provider_id: faiss default_embedding_model: diff --git a/src/llama_stack/distributions/dell/run-with-safety.yaml b/src/llama_stack/distributions/dell/run-with-safety.yaml index e0da8060d8..63bd951680 100644 --- a/src/llama_stack/distributions/dell/run-with-safety.yaml +++ b/src/llama_stack/distributions/dell/run-with-safety.yaml @@ -140,5 +140,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/dell/run.yaml b/src/llama_stack/distributions/dell/run.yaml index bc3117d88f..93f0c35bcd 100644 --- a/src/llama_stack/distributions/dell/run.yaml +++ b/src/llama_stack/distributions/dell/run.yaml @@ -131,5 +131,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml b/src/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml index 2fa9d198b2..63fc3b1d2d 100644 --- a/src/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +++ b/src/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml @@ -153,5 +153,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/meta-reference-gpu/run.yaml b/src/llama_stack/distributions/meta-reference-gpu/run.yaml index 5c7f75ca8e..ba8235398d 100644 --- a/src/llama_stack/distributions/meta-reference-gpu/run.yaml +++ b/src/llama_stack/distributions/meta-reference-gpu/run.yaml @@ -138,5 +138,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/nvidia/run-with-safety.yaml b/src/llama_stack/distributions/nvidia/run-with-safety.yaml index d2c7dd0906..7d95565e55 100644 --- a/src/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/src/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -135,5 +135,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/nvidia/run.yaml b/src/llama_stack/distributions/nvidia/run.yaml index c267587c7c..8c80b83032 100644 --- a/src/llama_stack/distributions/nvidia/run.yaml +++ b/src/llama_stack/distributions/nvidia/run.yaml @@ -114,5 +114,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/oci/run.yaml b/src/llama_stack/distributions/oci/run.yaml index e385ec6067..ff0c818be2 100644 --- a/src/llama_stack/distributions/oci/run.yaml +++ b/src/llama_stack/distributions/oci/run.yaml @@ -132,5 +132,3 @@ registered_resources: provider_id: tavily-search server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/open-benchmark/run.yaml b/src/llama_stack/distributions/open-benchmark/run.yaml index 7ebc58841b..43aa45b514 100644 --- a/src/llama_stack/distributions/open-benchmark/run.yaml +++ b/src/llama_stack/distributions/open-benchmark/run.yaml @@ -251,5 +251,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/postgres-demo/run.yaml b/src/llama_stack/distributions/postgres-demo/run.yaml index 049f519cd5..c9316f9238 100644 --- a/src/llama_stack/distributions/postgres-demo/run.yaml +++ b/src/llama_stack/distributions/postgres-demo/run.yaml @@ -114,5 +114,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml index 75cc9d1887..17ef2ad22c 100644 --- a/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml @@ -284,8 +284,6 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true vector_stores: default_provider_id: faiss default_embedding_model: diff --git a/src/llama_stack/distributions/starter-gpu/run.yaml b/src/llama_stack/distributions/starter-gpu/run.yaml index 09c7be5a1e..58b6199727 100644 --- a/src/llama_stack/distributions/starter-gpu/run.yaml +++ b/src/llama_stack/distributions/starter-gpu/run.yaml @@ -275,8 +275,6 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true vector_stores: default_provider_id: faiss default_embedding_model: diff --git a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml index f59c809d2f..8c0362864c 100644 --- a/src/llama_stack/distributions/starter/run-with-postgres-store.yaml +++ b/src/llama_stack/distributions/starter/run-with-postgres-store.yaml @@ -281,8 +281,6 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true vector_stores: default_provider_id: faiss default_embedding_model: diff --git a/src/llama_stack/distributions/starter/run.yaml b/src/llama_stack/distributions/starter/run.yaml index 435bb22a72..b5bd5f18ca 100644 --- a/src/llama_stack/distributions/starter/run.yaml +++ b/src/llama_stack/distributions/starter/run.yaml @@ -272,8 +272,6 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true vector_stores: default_provider_id: faiss default_embedding_model: diff --git a/src/llama_stack/distributions/template.py b/src/llama_stack/distributions/template.py index 90b458805e..bab3211e97 100644 --- a/src/llama_stack/distributions/template.py +++ b/src/llama_stack/distributions/template.py @@ -24,7 +24,6 @@ Provider, SafetyConfig, ShieldInput, - TelemetryConfig, ToolGroupInput, VectorStoresConfig, ) @@ -189,7 +188,6 @@ class RunConfigSettings(BaseModel): default_benchmarks: list[BenchmarkInput] | None = None vector_stores_config: VectorStoresConfig | None = None safety_config: SafetyConfig | None = None - telemetry: TelemetryConfig = Field(default_factory=lambda: TelemetryConfig(enabled=True)) storage_backends: dict[str, Any] | None = None storage_stores: dict[str, Any] | None = None @@ -289,7 +287,6 @@ def run_config( "server": { "port": 8321, }, - "telemetry": self.telemetry.model_dump(exclude_none=True) if self.telemetry else None, } if self.vector_stores_config: diff --git a/src/llama_stack/distributions/watsonx/run.yaml b/src/llama_stack/distributions/watsonx/run.yaml index f8c489fe3f..55ea34cb69 100644 --- a/src/llama_stack/distributions/watsonx/run.yaml +++ b/src/llama_stack/distributions/watsonx/run.yaml @@ -132,5 +132,3 @@ registered_resources: provider_id: rag-runtime server: port: 8321 -telemetry: - enabled: true diff --git a/src/llama_stack_api/common/tracing.py b/src/llama_stack_api/common/tracing.py deleted file mode 100644 index 830c2945ac..0000000000 --- a/src/llama_stack_api/common/tracing.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - - -def telemetry_traceable(cls): - """ - Mark a protocol for automatic tracing when telemetry is enabled. - - This is a metadata-only decorator with no dependencies on core. - Actual tracing is applied by core routers at runtime if telemetry is enabled. - - Usage: - @runtime_checkable - @telemetry_traceable - class MyProtocol(Protocol): - ... - """ - cls.__marked_for_tracing__ = True - return cls diff --git a/src/llama_stack_api/conversations.py b/src/llama_stack_api/conversations.py index 4854181d18..81b5ab2c48 100644 --- a/src/llama_stack_api/conversations.py +++ b/src/llama_stack_api/conversations.py @@ -9,7 +9,6 @@ from pydantic import BaseModel, Field -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.openai_responses import ( OpenAIResponseInputFunctionToolCallOutput, OpenAIResponseMCPApprovalRequest, @@ -157,7 +156,6 @@ class ConversationItemDeletedResource(BaseModel): @runtime_checkable -@telemetry_traceable class Conversations(Protocol): """Conversations diff --git a/src/llama_stack_api/datatypes.py b/src/llama_stack_api/datatypes.py index f024068f36..c12fbc7780 100644 --- a/src/llama_stack_api/datatypes.py +++ b/src/llama_stack_api/datatypes.py @@ -102,7 +102,6 @@ class Api(Enum, metaclass=DynamicApiMeta): :cvar eval: Model evaluation and benchmarking framework :cvar post_training: Fine-tuning and model training :cvar tool_runtime: Tool execution and management - :cvar telemetry: Observability and system monitoring :cvar models: Model metadata and management :cvar shields: Safety shield implementations :cvar datasets: Dataset creation and management diff --git a/src/llama_stack_api/files.py b/src/llama_stack_api/files.py index 8a75a1c392..e515fe0aef 100644 --- a/src/llama_stack_api/files.py +++ b/src/llama_stack_api/files.py @@ -11,7 +11,6 @@ from pydantic import BaseModel, Field from llama_stack_api.common.responses import Order -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.version import LLAMA_STACK_API_V1 @@ -102,7 +101,6 @@ class OpenAIFileDeleteResponse(BaseModel): @runtime_checkable -@telemetry_traceable class Files(Protocol): """Files diff --git a/src/llama_stack_api/inference.py b/src/llama_stack_api/inference.py index b42de95be5..4a169486a5 100644 --- a/src/llama_stack_api/inference.py +++ b/src/llama_stack_api/inference.py @@ -22,7 +22,6 @@ from llama_stack_api.common.responses import ( Order, ) -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.models import Model from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod from llama_stack_api.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA @@ -989,7 +988,6 @@ class OpenAIEmbeddingsRequestWithExtraBody(BaseModel, extra="allow"): @runtime_checkable -@telemetry_traceable class InferenceProvider(Protocol): """ This protocol defines the interface that should be implemented by all inference providers. diff --git a/src/llama_stack_api/models.py b/src/llama_stack_api/models.py index 98c16b6c25..3efdfe66bd 100644 --- a/src/llama_stack_api/models.py +++ b/src/llama_stack_api/models.py @@ -9,7 +9,6 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.resource import Resource, ResourceType from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.version import LLAMA_STACK_API_V1 @@ -106,7 +105,6 @@ class OpenAIListModelsResponse(BaseModel): @runtime_checkable -@telemetry_traceable class Models(Protocol): async def list_models(self) -> ListModelsResponse: """List all models. diff --git a/src/llama_stack_api/prompts.py b/src/llama_stack_api/prompts.py index 8562e47042..2054ccd307 100644 --- a/src/llama_stack_api/prompts.py +++ b/src/llama_stack_api/prompts.py @@ -10,7 +10,6 @@ from pydantic import BaseModel, Field, field_validator, model_validator -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.version import LLAMA_STACK_API_V1 @@ -93,7 +92,6 @@ class ListPromptsResponse(BaseModel): @runtime_checkable -@telemetry_traceable class Prompts(Protocol): """Prompts diff --git a/src/llama_stack_api/safety.py b/src/llama_stack_api/safety.py index ef84be2ea6..7b4f2af5cb 100644 --- a/src/llama_stack_api/safety.py +++ b/src/llama_stack_api/safety.py @@ -9,7 +9,6 @@ from pydantic import BaseModel, Field -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.inference import OpenAIMessageParam from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.shields import Shield @@ -94,7 +93,6 @@ async def get_shield(self, identifier: str) -> Shield: ... @runtime_checkable -@telemetry_traceable class Safety(Protocol): """Safety diff --git a/src/llama_stack_api/shields.py b/src/llama_stack_api/shields.py index 19e412a5ae..36ad2351b5 100644 --- a/src/llama_stack_api/shields.py +++ b/src/llama_stack_api/shields.py @@ -8,7 +8,6 @@ from pydantic import BaseModel -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.resource import Resource, ResourceType from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.version import LLAMA_STACK_API_V1 @@ -49,7 +48,6 @@ class ListShieldsResponse(BaseModel): @runtime_checkable -@telemetry_traceable class Shields(Protocol): @webmethod(route="/shields", method="GET", level=LLAMA_STACK_API_V1) async def list_shields(self) -> ListShieldsResponse: diff --git a/src/llama_stack_api/tools.py b/src/llama_stack_api/tools.py index 4dd5d55d23..94f2251b0b 100644 --- a/src/llama_stack_api/tools.py +++ b/src/llama_stack_api/tools.py @@ -11,7 +11,6 @@ from typing_extensions import runtime_checkable from llama_stack_api.common.content_types import URL, InterleavedContent -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.resource import Resource, ResourceType from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.version import LLAMA_STACK_API_V1 @@ -109,7 +108,6 @@ class ListToolDefsResponse(BaseModel): @runtime_checkable -@telemetry_traceable class ToolGroups(Protocol): @webmethod(route="/toolgroups", method="POST", level=LLAMA_STACK_API_V1, deprecated=True) async def register_tool_group( @@ -191,7 +189,6 @@ class SpecialToolGroup(Enum): @runtime_checkable -@telemetry_traceable class ToolRuntime(Protocol): tool_store: ToolStore | None = None diff --git a/src/llama_stack_api/vector_io.py b/src/llama_stack_api/vector_io.py index 135468d193..188ea3307f 100644 --- a/src/llama_stack_api/vector_io.py +++ b/src/llama_stack_api/vector_io.py @@ -13,7 +13,6 @@ from fastapi import Body, Query from pydantic import BaseModel, Field, field_validator -from llama_stack_api.common.tracing import telemetry_traceable from llama_stack_api.inference import InterleavedContent from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod from llama_stack_api.vector_stores import VectorStore @@ -572,7 +571,6 @@ def get_vector_store(self, vector_store_id: str) -> VectorStore | None: ... @runtime_checkable -@telemetry_traceable class VectorIO(Protocol): vector_store_table: VectorStoreTable | None = None