Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion llama_stack/core/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,9 @@ def __init__(self, config: StackRunConfig, *args, **kwargs):

@asynccontextmanager
async def lifespan(app: StackApp):
logger.info("Starting up")
server_version = parse_version("llama-stack")

logger.info(f"Starting up Llama Stack server (version: {server_version})")
assert app.stack is not None
app.stack.create_registry_refresh_task()
yield
Expand Down
10 changes: 5 additions & 5 deletions tests/integration/datasets/test_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,18 +78,18 @@ def data_url_from_file(file_path: str) -> str:
],
)
def test_register_and_iterrows(llama_stack_client, purpose, source, provider_id, limit):
dataset = llama_stack_client.datasets.register(
dataset = llama_stack_client.beta.datasets.register(
purpose=purpose,
source=source,
)
assert dataset.identifier is not None
assert dataset.provider_id == provider_id
iterrow_response = llama_stack_client.datasets.iterrows(dataset.identifier, limit=limit)
iterrow_response = llama_stack_client.beta.datasets.iterrows(dataset.identifier, limit=limit)
assert len(iterrow_response.data) == limit

dataset_list = llama_stack_client.datasets.list()
dataset_list = llama_stack_client.beta.datasets.list()
assert dataset.identifier in [d.identifier for d in dataset_list]

llama_stack_client.datasets.unregister(dataset.identifier)
dataset_list = llama_stack_client.datasets.list()
llama_stack_client.beta.datasets.unregister(dataset.identifier)
dataset_list = llama_stack_client.beta.datasets.list()
assert dataset.identifier not in [d.identifier for d in dataset_list]
14 changes: 7 additions & 7 deletions tests/integration/eval/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,17 @@

@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
dataset = llama_stack_client.datasets.register(
dataset = llama_stack_client.beta.datasets.register(
purpose="eval/messages-answer",
source={
"type": "uri",
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
},
)
response = llama_stack_client.datasets.list()
response = llama_stack_client.beta.datasets.list()
assert any(x.identifier == dataset.identifier for x in response)

rows = llama_stack_client.datasets.iterrows(
rows = llama_stack_client.beta.datasets.iterrows(
dataset_id=dataset.identifier,
limit=3,
)
Expand All @@ -37,12 +37,12 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
scoring_fn_id,
]
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
llama_stack_client.alpha.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id=dataset.identifier,
scoring_functions=scoring_functions,
)
list_benchmarks = llama_stack_client.benchmarks.list()
list_benchmarks = llama_stack_client.alpha.benchmarks.list()
assert any(x.identifier == benchmark_id for x in list_benchmarks)

response = llama_stack_client.alpha.eval.evaluate_rows(
Expand All @@ -66,15 +66,15 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):

@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
dataset = llama_stack_client.datasets.register(
dataset = llama_stack_client.beta.datasets.register(
purpose="eval/messages-answer",
source={
"type": "uri",
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
},
)
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
llama_stack_client.alpha.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id=dataset.identifier,
scoring_functions=[scoring_fn_id],
Expand Down
Loading