Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 25 additions & 1 deletion .github/workflows/deploy-test-studio-kind.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
# populate-studio, or this workflow file.
# Explicitly skip docs-only changes.
if echo "$CHANGED_FILES" | grep -qE \
"^(operators/|deployment-scripts/|populate-studio/|geospatial-studio/|deploy_studio_k8s\.sh|requirements\.txt|\.github/workflows/deploy-test-studio\.yml)"; then
"^(operators/|deployment-scripts/|populate-studio/|geospatial-studio/|deploy_studio_k8s\.sh|requirements\.txt|\.github/workflows/deploy-test-studio(-kind)?\.yml)"; then
echo "deploy=true" >> $GITHUB_OUTPUT
echo "Deployment-relevant files changed – deployment will proceed"
else
Expand Down Expand Up @@ -370,6 +370,30 @@ jobs:
exit 1
fi

- name: Run Integration Tests
env:
GATEWAY_TLS_VERIFY: "0"
BASE_GATEWAY_URL: "https://localhost:4181"
run: |
echo "=== Running Integration Tests ==="

# Extract API key
if [[ -f ".studio-api-key" ]]; then
source .studio-api-key
export API_KEY=$STUDIO_API_KEY
echo "✅ Loaded API key"
else
echo "❌ Error: .studio-api-key file not found"
exit 1
fi

# Install test dependencies if not already installed
pip install -r requirements-dev.txt

# Run integration tests
python -m pytest -q -m integration --no-cov --log-file=run.log --log-file-level=INFO tests/integration/test_inference_models.py


- name: Run Workshop Labs
if: success()
env:
Expand Down
23 changes: 23 additions & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# © Copyright IBM Corporation 2025
# SPDX-License-Identifier: Apache-2.0


# For Python 3.11
-r requirements.txt

black
flake8
hunter==3.7.0
IPython>=8.18,<9.0.0
rich>=14.0.0

# Needed by conftest
fastapi
httpx
sqlalchemy
sqlalchemy-utils

# Test
pytest
pytest-cov
pre-commit
Empty file added tests/__init__.py
Empty file.
163 changes: 163 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
import os
import shlex
import subprocess
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional

import pytest
from dotenv import find_dotenv, load_dotenv
from fastapi.testclient import TestClient
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import create_database, database_exists, drop_database

from tests.integration.gateway import GatewayApiClient

from .integration.utils import make_timestamped_name


# -------------------------------
# Common configurations for Integration and Unit tests
# -------------------------------
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
# # --- load env and set test DB (unit tests support) ---
# load_dotenv(".env", override=True)
# db_url = os.environ.get("DATABASE_URI", str(settings.DATABASE_URI)) + "_test"
# settings.DATABASE_URI = db_url
# settings.AUTH_ENABLED = False
#
# --- register markers (integration tests support) ---
config.addinivalue_line(
"markers", "integration: marks tests that hit live external services"
)


# -------------------------------
# Unit Tests Support
# -------------------------------
def _db_session():
db_url = str(settings.DATABASE_URI)
if not database_exists(db_url):
create_database(db_url)

engine = create_engine(db_url)
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
return db_url, TestingSessionLocal, engine


@pytest.fixture(scope="session")
def db():
"""Fixture sets up and tears down PostgreSQL test database."""
db_url, TestingSessionLocal, engine = _db_session()
Base.metadata.create_all(bind=engine)

session = TestingSessionLocal()
yield session

session.close_all()
drop_database(db_url)


def override_get_db():
try:
_, TestingSessionLocal, _ = _db_session()
db = TestingSessionLocal()
yield db
finally:
db.close()


@pytest.fixture(scope="module")
def client(db):
"""Sets up FastAPI test client for sending HTTP requests during testing."""
app.dependency_overrides[get_db] = override_get_db
client = TestClient(app)
return client


@pytest.fixture(scope="session")
def repo_root() -> Path:
toplevel = (
subprocess.check_output(shlex.split("git rev-parse --show-toplevel"))
.decode()
.strip()
)
return Path(toplevel)


def pytest_addoption(parser):
# https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument
parser.addoption(
"--tune-id",
action="store",
default=None,
help="A ID of a geotune" "from the Fine Tune Service",
)


@pytest.fixture(scope="session")
def tune_id(pytestconfig):
tune_id = pytestconfig.getoption("tune_id")
if not tune_id:
pytest.skip("[unit] skipped due to missing tune-id")
return tune_id


@pytest.fixture(scope="session")
def token() -> Optional[str]:
"""
Returns a valid option for IBM Verify to authenticate. Only used for interactive
integration tests. This value can be provided by .envrc or .env
"""
token = os.environ.get("TOKEN")
if not token:
pytest.skip("[unit] skipped due to missing authentication TOKEN in environment")
return token


# -------------------------------
# Integration Tests Support (APIs)
# -------------------------------
def _int_env(name: str, default: str | None = None, required: bool = False) -> str:
v = os.getenv(name, default)
if required and not v:
pytest.skip(f"[integration] Missing env var {name}; skipping")
return v or ""


@pytest.fixture(scope="session")
def gateway() -> GatewayApiClient:
"""
External Gateway client.
Requires:
BASE_GATEWAY_URL and API_KEY in .env file
"""

# Make sure .env is found whether you run from repo root or a subfolder
load_dotenv(find_dotenv(usecwd=True), override=True)

base_url = os.getenv("BASE_GATEWAY_URL")
api_key = os.getenv("API_KEY")

if not base_url or not api_key:
pytest.skip("[integration] Missing BASE_GATEWAY_URL or API_KEY; skipping")

# Normalize possible CRLF or stray quotes from copy/paste
api_key = api_key.strip().strip('"').strip("'")
if api_key.endswith("\r"):
api_key = api_key[:-1]

return GatewayApiClient(base_url=base_url, api_key=api_key)


@pytest.fixture(scope="function")
def name_factory():
"""One fixed timestamp per test so related names match."""
fixed_now = datetime.now(timezone.utc)

def _make(base: str, *, prefix: str | None = None, ext: str = "") -> str:
return make_timestamped_name(base, prefix=prefix, ext=ext, now=fixed_now)

return _make
Empty file added tests/integration/__init__.py
Empty file.
Empty file.
50 changes: 50 additions & 0 deletions tests/integration/data/api_inference_models_and_inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
"""
Example payloads for integration tests.
Each payload is a Python dict, instead of JSON files.
"""

# Base valid model payload
SANDBOX_MODEL = {
"display_name": "integration-test-sandbox-model",
"description": "[Integration Test 175933_01we_10oct_25] Early-access test model made available for demonstration or limited user evaluation. These models may include incomplete features or evolving performance characteristics and are intended for feedback and experimentation before full deployment.",
"pipeline_steps": [
{"status": "READY", "process_id": "url-connector", "step_number": 0},
{"status": "WAITING", "process_id": "push-to-geoserver", "step_number": 1},
],
"geoserver_push": [],
"model_input_data_spec": [
{
"bands": [],
"connector": "sentinelhub",
"collection": "hls_s30",
"file_suffix": "S2Hand",
}
],
"postprocessing_options": {},
"sharable": False,
"model_onboarding_config": {
"fine_tuned_model_id": "",
"model_configs_url": "",
"model_checkpoint_url": "",
},
"latest": True,
"version": 1.0,
}

DEFAULT_ONBOARD_INFERENCE_MODEL = {
"model_framework": "terratorch",
"model_id": "string",
"model_name": "string",
"model_configs_url": "https://example.com/",
"model_checkpoint_url": "https://example.com/",
"deployment_type": "gpu",
"resources": {
"requests": {"cpu": "6", "memory": "16G"},
"limits": {"cpu": "12", "memory": "32G"},
},
"gpu_resources": {
"requests": {"nvidia.com/gpu": "1"},
"limits": {"nvidia.com/gpu": "1"},
},
"inference_container_image": "",
}
Loading
Loading