diff --git a/brevia/async_jobs.py b/brevia/async_jobs.py index 606f8ad..7a9d1c4 100644 --- a/brevia/async_jobs.py +++ b/brevia/async_jobs.py @@ -2,12 +2,15 @@ import logging import time from datetime import datetime -import sqlalchemy +from sqlalchemy import BinaryExpression, Column, desc, func, String, text +from pydantic import BaseModel as PydanticModel from sqlalchemy.dialects.postgresql import JSON, TIMESTAMP, SMALLINT -from sqlalchemy.orm import Session +from sqlalchemy.orm import Session, Query from langchain_community.vectorstores.pgembedding import BaseModel from brevia.connection import db_connection from brevia.services import BaseService +from brevia.utilities.dates import date_filter +from brevia.utilities.json_api import query_data_pagination from brevia.utilities.types import load_type MAX_DURATION = 120 # default max duration is 120 min / 2hr @@ -19,22 +22,22 @@ class AsyncJobsStore(BaseModel): """ Async Jobs table """ __tablename__ = "async_jobs" - service = sqlalchemy.Column(sqlalchemy.String(), nullable=False) - payload = sqlalchemy.Column(JSON()) - expires = sqlalchemy.Column(TIMESTAMP(timezone=False)) - created = sqlalchemy.Column( + service = Column(String(), nullable=False) + payload = Column(JSON()) + expires = Column(TIMESTAMP(timezone=False)) + created = Column( TIMESTAMP(timezone=False), nullable=False, - server_default=sqlalchemy.func.current_timestamp(), + server_default=func.current_timestamp(), ) - completed = sqlalchemy.Column(TIMESTAMP(timezone=False)) - locked_until = sqlalchemy.Column(TIMESTAMP(timezone=False)) - max_attempts = sqlalchemy.Column( + completed = Column(TIMESTAMP(timezone=False)) + locked_until = Column(TIMESTAMP(timezone=False)) + max_attempts = Column( SMALLINT(), nullable=False, server_default='1', ) - result = sqlalchemy.Column(JSON(), nullable=True) + result = Column(JSON(), nullable=True) def single_job(uuid: str) -> (AsyncJobsStore | None): @@ -43,6 +46,89 @@ def single_job(uuid: str) -> (AsyncJobsStore | None): return session.get(AsyncJobsStore, uuid) +class JobsFilter(PydanticModel): + """ Jobs filter """ + min_date: str | None = None + max_date: str | None = None + service: str | None = None + completed: bool | None = None + page: int = 1 + page_size: int = 50 + + +def get_jobs(filter: JobsFilter) -> dict: # pylint: disable=redefined-builtin + """ + Read async jobs with optional filters using pagination data in response. + """ + + # Handle date filters - only apply if explicitly provided + filter_min_date = text('1 = 1') # always true by default + filter_max_date = text('1 = 1') # always true by default + + if filter.min_date: + min_date = date_filter(filter.min_date, 'min') + filter_min_date = AsyncJobsStore.created >= min_date + + if filter.max_date: + max_date = date_filter(filter.max_date, 'max') + filter_max_date = AsyncJobsStore.created <= max_date + + filter_service = text('1 = 1') # (default) always true expression + if filter.service: + filter_service = AsyncJobsStore.service == filter.service + filter_completed = text('1 = 1') # (default) always true expression + if filter.completed is not None: + filter_completed = ( + AsyncJobsStore.completed.is_not(None) + if filter.completed + else AsyncJobsStore.completed.is_(None) + ) + + with Session(db_connection()) as session: + query = get_jobs_query( + session=session, + filter_min_date=filter_min_date, + filter_max_date=filter_max_date, + filter_service=filter_service, + filter_completed=filter_completed, + ) + result = query_data_pagination( + query=query, + page=filter.page, + page_size=filter.page_size + ) + return result + + +def get_jobs_query( + session: Session, + filter_min_date: BinaryExpression, + filter_max_date: BinaryExpression, + filter_service: BinaryExpression, + filter_completed: BinaryExpression, +) -> Query: + """ + Constructs a SQLAlchemy query to retrieve async jobs based on specified filters. + """ + + query = ( + session.query( + AsyncJobsStore.uuid, + AsyncJobsStore.service, + AsyncJobsStore.payload, + AsyncJobsStore.expires, + AsyncJobsStore.created, + AsyncJobsStore.completed, + AsyncJobsStore.locked_until, + AsyncJobsStore.max_attempts, + AsyncJobsStore.result, + ) + .filter(filter_min_date, filter_max_date, filter_service, filter_completed) + .order_by(desc(AsyncJobsStore.created)) + ) + return query + + def create_job( service: str, payload: dict, diff --git a/brevia/chat_history.py b/brevia/chat_history.py index c7def6d..efca025 100644 --- a/brevia/chat_history.py +++ b/brevia/chat_history.py @@ -1,7 +1,6 @@ """Chat history table & utilities""" from typing import List import logging -from datetime import datetime, time from langchain_community.vectorstores.pgembedding import BaseModel, CollectionStore from pydantic import BaseModel as PydanticModel import sqlalchemy @@ -12,6 +11,7 @@ from brevia.connection import db_connection from brevia.models import load_embeddings from brevia.settings import get_settings +from brevia.utilities.dates import date_filter from brevia.utilities.json_api import query_data_pagination from brevia.utilities.uuid import is_valid_uuid @@ -145,31 +145,6 @@ class ChatHistoryFilter(PydanticModel): page_size: int = 50 -def get_date_filter(date_str, type_str): - """ - Parses a date string into a datetime object with combined time information. - - Args: - date_str (str): A string representing a date in the format 'YYYY-MM-DD'. - None if no specific date is provided. - - type_str (str): Indicates whether to create a maximum or minimum date filter. - Valid values are 'max' or 'min'. - """ - max_date = datetime.now() - min_date = datetime.fromtimestamp(0) - - if date_str is not None: - parsed_date = datetime.strptime(date_str, '%Y-%m-%d') - if type_str == 'max': - max_date = parsed_date - return datetime.combine(max_date, time.max) - min_date = parsed_date - return datetime.combine(min_date, time.min) - - return max_date if type_str == 'max' else min_date - - def get_collection_filter(collection_name): """ Constructs a filter expression based on the collection name. @@ -196,8 +171,8 @@ def get_history(filter: ChatHistoryFilter) -> dict: # pylint: disable=redefined Read chat history with optional filters using pagination data in response. """ - min_date = get_date_filter(filter.min_date, 'min') - max_date = get_date_filter(filter.max_date, 'max') + min_date = date_filter(filter.min_date, 'min') + max_date = date_filter(filter.max_date, 'max') filter_collection = get_collection_filter(filter.collection) filter_session_id = get_session_filter(filter.session_id) @@ -223,8 +198,8 @@ def get_history_sessions(filter: ChatHistoryFilter) -> dict: Read chat history with optional filters using pagination data in response. """ - min_date = get_date_filter(filter.min_date, 'min') - max_date = get_date_filter(filter.max_date, 'max') + min_date = date_filter(filter.min_date, 'min') + max_date = date_filter(filter.max_date, 'max') filter_collection = get_collection_filter(filter.collection) with Session(db_connection()) as session: diff --git a/brevia/postman/Brevia API.postman_collection.json b/brevia/postman/Brevia API.postman_collection.json index 2252e86..c2928e7 100644 --- a/brevia/postman/Brevia API.postman_collection.json +++ b/brevia/postman/Brevia API.postman_collection.json @@ -2163,6 +2163,132 @@ } ] }, + { + "name": "Jobs", + "item": [ + { + "name": "jobs - Read single job", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{access_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json", + "type": "text", + "disabled": true + } + ], + "url": { + "raw": "{{baseUrl}}/jobs/{{job_id}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "jobs", + "{{job_id}}" + ] + } + }, + "response": [] + }, + { + "name": "Jobs - Read all jobs", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{access_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json", + "type": "text", + "disabled": true + } + ], + "url": { + "raw": "{{baseUrl}}/jobs", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "jobs" + ] + } + }, + "response": [] + }, + { + "name": "Jobs - Read jobs filters", + "request": { + "auth": { + "type": "bearer", + "bearer": [ + { + "key": "token", + "value": "{{access_token}}", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Content-Type", + "value": "application/json", + "type": "text", + "disabled": true + } + ], + "url": { + "raw": "{{baseUrl}}/jobs?min_date=2025-04-20&completed=true&page=1&page_size=10", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "jobs" + ], + "query": [ + { + "key": "min_date", + "value": "2025-04-20" + }, + { + "key": "completed", + "value": "true" + }, + { + "key": "page", + "value": "1" + }, + { + "key": "page_size", + "value": "10" + } + ] + } + }, + "response": [] + } + ] + }, { "name": "upload_analyze - Upload file and analyze using extension", "event": [ @@ -2277,41 +2403,6 @@ } }, "response": [] - }, - { - "name": "jobs - Read single job", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{access_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [ - { - "key": "Content-Type", - "value": "application/json", - "type": "text", - "disabled": true - } - ], - "url": { - "raw": "{{baseUrl}}/jobs/{{job_id}}", - "host": [ - "{{baseUrl}}" - ], - "path": [ - "jobs", - "{{job_id}}" - ] - } - }, - "response": [] } ] } diff --git a/brevia/routers/jobs_router.py b/brevia/routers/jobs_router.py index 32b8282..5858639 100644 --- a/brevia/routers/jobs_router.py +++ b/brevia/routers/jobs_router.py @@ -1,7 +1,8 @@ """API endpoints definitions to handle async jobs""" -from fastapi import APIRouter, HTTPException, status +from typing_extensions import Annotated +from fastapi import APIRouter, HTTPException, status, Depends from brevia.dependencies import get_dependencies -from brevia import async_jobs +from brevia.async_jobs import get_jobs, single_job, JobsFilter router = APIRouter() @@ -9,13 +10,13 @@ @router.get( '/jobs/{uuid}', dependencies=get_dependencies(json_content_type=False), - tags=['Analysis'], + tags=['Analysis', 'Jobs'], ) async def read_analysis_job(uuid: str): """ Read details of a single analisys Job via its UUID """ - job = async_jobs.single_job(uuid) + job = single_job(uuid) if job is None: raise HTTPException( status.HTTP_404_NOT_FOUND, @@ -23,3 +24,13 @@ async def read_analysis_job(uuid: str): ) return job + + +@router.get( + '/jobs', + dependencies=get_dependencies(json_content_type=False), + tags=['Jobs'], +) +async def list_analysis_jobs(filter: Annotated[JobsFilter, Depends()]): + """ /jobs endpoint, list all analysis jobs """ + return get_jobs(filter=filter) diff --git a/brevia/utilities/dates.py b/brevia/utilities/dates.py new file mode 100644 index 0000000..18ccc46 --- /dev/null +++ b/brevia/utilities/dates.py @@ -0,0 +1,27 @@ +"""Dates utilities""" +from datetime import datetime, time + + +def date_filter(date_str: str | None, type_str: str = 'min'): + """ + Parses a date string into a datetime object with combined time information. + + Args: + date_str (str): A string representing a date in the format 'YYYY-MM-DD'. + None if no specific date is provided. + + type_str (str): Indicates whether to create a maximum or minimum date filter. + Valid values are 'max' or 'min'. If None 'min' is i + """ + max_date = datetime.now() + min_date = datetime.fromtimestamp(0) + + if date_str is not None: + parsed_date = datetime.strptime(date_str, '%Y-%m-%d') + if type_str == 'max': + max_date = parsed_date + return datetime.combine(max_date, time.max) + min_date = parsed_date + return datetime.combine(min_date, time.min) + + return max_date if type_str == 'max' else min_date diff --git a/docs/async_jobs.md b/docs/async_jobs.md index ba4c85b..33f7488 100644 --- a/docs/async_jobs.md +++ b/docs/async_jobs.md @@ -12,4 +12,9 @@ These endpoints will simply return a job id as response like this: {"job": "7ca33644-5ddb-4747-84c0-8818715a65f8"} ``` -After that you can check the async job status calling [`GET /jobs/uuid`](endpoints_overview.md#get-jobsuuid) and retrieve the job results as it ends. +After that you can: + +- Check the async job status calling [`GET /jobs/{uuid}`](endpoints_overview.md#get-jobsuuid) to retrieve the job results as it ends +- List all jobs with optional filtering using [`GET /jobs`](endpoints_overview.md#get-jobs) to see all your async jobs with pagination and filtering capabilities + +The `/jobs` endpoint supports various filters such as completion status, service type, date ranges, and pagination parameters to help you manage and monitor your asynchronous tasks effectively. diff --git a/docs/endpoints_overview.md b/docs/endpoints_overview.md index 80ee171..cfa4c7a 100644 --- a/docs/endpoints_overview.md +++ b/docs/endpoints_overview.md @@ -308,6 +308,55 @@ It works only with `form-data` input that must include a `file` parameter with t ## Async jobs endpoints +### GET `/jobs` + +Retrieve a list of all analysis jobs with optional filtering and pagination. + +**Optional Parameters**: + +- `min_date`: Filter jobs created after this date (YYYY-MM-DD format) +- `max_date`: Filter jobs created before this date (YYYY-MM-DD format) +- `service`: Filter jobs by service name (e.g., "brevia.services.SummarizeFileService") +- `completed`: Filter by completion status (true for completed jobs, false for pending jobs) +- `page`: Page number for pagination (default: 1) +- `page_size`: Number of items per page (default: 50) + +**Example usage**: + +```http +GET /jobs?completed=true&page=1&page_size=20 +GET /jobs?service=brevia.services.SummarizeFileService&min_date=2024-01-01 +``` + +**Example response**: + +```JSON +{ + "data": [ + { + "uuid": "7ca33644-5ddb-4747-84c0-8818715a65f8", + "service": "brevia.services.SummarizeFileService", + "created": "2024-02-29T16:31:25.546740", + "completed": "2024-02-29T17:31:27.700342", + "expires": "2024-02-29T21:31:25", + "max_attempts": 1, + "payload": { + "file_path": "/tmp/tmpgns0ci85.pdf", + "chain_type": "", + "token_data": false + }, + "result": { + "output": "Lorem ipsum...." + } + } + ], + "page": 1, + "page_size": 20, + "total_pages": 5, + "total_items": 100 +} +``` + ### GET `/jobs/{uuid}` Retrieve aync job data using its `uuid` diff --git a/tests/routers/test_jobs_router.py b/tests/routers/test_jobs_router.py index 8708031..47f3124 100644 --- a/tests/routers/test_jobs_router.py +++ b/tests/routers/test_jobs_router.py @@ -1,9 +1,11 @@ """Jobs router tests""" import uuid +import time +from datetime import datetime, timedelta from fastapi.testclient import TestClient from fastapi import FastAPI from brevia.routers import jobs_router -from brevia.async_jobs import create_job +from brevia.async_jobs import create_job, complete_job app = FastAPI() app.include_router(jobs_router.router) @@ -24,3 +26,138 @@ def test_jobs_fail(): """Test /jobs/{uuid} failure""" response = client.get(f'/jobs/{uuid.uuid4()}', headers={}) assert response.status_code == 404 + + +def test_jobs_list_no_filters(): + """Test /jobs endpoint without filters""" + # Create some test jobs + create_job('TestService1', {'test': 'data1'}) + create_job('TestService2', {'test': 'data2'}) + + response = client.get('/jobs', headers={}) + assert response.status_code == 200 + data = response.json() + + assert 'data' in data + assert 'meta' in data + assert 'pagination' in data['meta'] + assert isinstance(data['data'], list) + assert len(data['data']) >= 2 # At least our test jobs should be present + + # Check pagination structure + pagination = data['meta']['pagination'] + assert 'page' in pagination + assert 'page_count' in pagination + assert 'count' in pagination + + +def test_jobs_list_with_service_filter(): + """Test /jobs endpoint with service filter""" + # Create jobs with different services + service_name = f'FilterTestService_{int(time.time())}' + create_job(service_name, {'test': 'data1'}) + create_job('DifferentService', {'test': 'data2'}) + + # Test filtering by service + response = client.get(f'/jobs?service={service_name}', headers={}) + assert response.status_code == 200 + data = response.json() + + assert 'data' in data + assert len(data['data']) == 1 + + # All returned jobs should have the specified service + for job in data['data']: + assert job['service'] == service_name + + +def test_jobs_list_with_completed_filter(): + """Test /jobs endpoint with completed filter""" + # Create a job and complete it + job1 = create_job('CompletedTestService', {'test': 'completed'}) + complete_job(str(job1.uuid), {'result': 'success'}) + + # Create an incomplete job + job2 = create_job('IncompleteTestService', {'test': 'incomplete'}) + + # Test filtering for completed jobs + response = client.get('/jobs?completed=true', headers={}) + assert response.status_code == 200 + data = response.json() + + assert 'data' in data + # All returned jobs should have completed timestamp + for job in data['data']: + if job['uuid'] == str(job1.uuid): + assert job['completed'] is not None + + # Test filtering for incomplete jobs + response = client.get('/jobs?completed=false', headers={}) + assert response.status_code == 200 + data = response.json() + + assert 'data' in data + # All returned jobs should not have completed timestamp + for job in data['data']: + if job['uuid'] == str(job2.uuid): + assert job['completed'] is None + + +def test_jobs_list_with_date_filters(): + """Test /jobs endpoint with date filters""" + # Create a job + create_job('DateTestService', {'test': 'date_filter'}) + + # Get current date for filtering + yesterday = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') + tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d') + + # Test with min_date filter + response = client.get(f'/jobs?min_date={yesterday}', headers={}) + assert response.status_code == 200 + data = response.json() + assert 'data' in data + + # Test with max_date filter + response = client.get(f'/jobs?max_date={tomorrow}', headers={}) + assert response.status_code == 200 + data = response.json() + assert 'data' in data + + # Test with both min_date and max_date + response = client.get(f'/jobs?min_date={yesterday}&max_date={tomorrow}', headers={}) + assert response.status_code == 200 + data = response.json() + assert 'data' in data + + +def test_jobs_list_with_multiple_filters(): + """Test /jobs endpoint with multiple filters combined""" + # Create a specific job for this test + service_name = f'MultiFilterTest_{int(time.time())}' + job = create_job(service_name, {'test': 'multi_filter'}) + + today = datetime.now().strftime('%Y-%m-%d') + tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d') + + # Test combining service, completed, and date filters + params = ( + f'service={service_name}&completed=false&' + f'min_date={today}&max_date={tomorrow}&page_size=10' + ) + response = client.get( + f'/jobs?{params}', + headers={} + ) + assert response.status_code == 200 + data = response.json() + + assert 'data' in data + assert 'meta' in data + assert 'pagination' in data['meta'] + + # Verify that the response structure is correct + for job_data in data['data']: + if job_data['uuid'] == str(job.uuid): + assert job_data['service'] == service_name + assert job_data['completed'] is None # Should be incomplete diff --git a/tests/test_async_jobs.py b/tests/test_async_jobs.py index 971d429..be6d046 100644 --- a/tests/test_async_jobs.py +++ b/tests/test_async_jobs.py @@ -1,12 +1,13 @@ """async_jobs module tests""" from datetime import datetime, timedelta +import time import pytest from sqlalchemy.orm import Session from brevia.connection import db_connection from brevia.async_jobs import ( single_job, create_job, complete_job, save_job_result, create_service, lock_job_service, - is_job_available, run_job_service, + is_job_available, run_job_service, get_jobs, JobsFilter, ) from brevia.services import BaseService @@ -129,3 +130,242 @@ def test_run_job_failure(): exp = 'ValueError: Class "NotExistingService" not found' assert 'error' in retrieved_job.result assert retrieved_job.result['error'].startswith(exp) + + +def test_get_jobs_no_filters(): + """Test get_jobs function without filters""" + # Create some test jobs + job1 = create_job('TestService1', {'test': 'data1'}) + job2 = create_job('TestService2', {'test': 'data2'}) + + # Get jobs without filters + filter_obj = JobsFilter() + result = get_jobs(filter_obj) + + # Verify response structure + assert isinstance(result, dict) + assert 'data' in result + assert 'meta' in result + assert 'pagination' in result['meta'] + + # Verify pagination structure + pagination = result['meta']['pagination'] + assert 'page' in pagination + assert 'count' in pagination + assert 'page_count' in pagination + + # Verify we have at least our test jobs + assert len(result['data']) >= 2 + + # Verify job data structure + job_uuids = [str(job1.uuid), str(job2.uuid)] + found_jobs = [job for job in result['data'] if str(job['uuid']) in job_uuids] + assert len(found_jobs) >= 2 + + +def test_get_jobs_with_service_filter(): + """Test get_jobs function with service filter""" + # Create jobs with different services + service_name = f'FilterTestService_{int(time.time())}' + job1 = create_job(service_name, {'test': 'data1'}) + job2 = create_job('DifferentService', {'test': 'data2'}) + + # Filter by specific service + filter_obj = JobsFilter(service=service_name) + result = get_jobs(filter_obj) + + assert 'data' in result + assert len(result['data']) >= 1 + + # All returned jobs should have the specified service + for job in result['data']: + if str(job['uuid']) == str(job1.uuid): + assert job['service'] == service_name + + # Verify the other job is not included when filtering + other_job_found = any(str(job['uuid']) == str(job2.uuid) for job in result['data']) + if other_job_found: + # If found, it should also have the same service (which shouldn't happen) + other_job_service = next( + job['service'] for job in result['data'] + if str(job['uuid']) == str(job2.uuid) + ) + assert other_job_service == service_name + else: + # This is the expected case - other job should not be found + assert not other_job_found + + +def test_get_jobs_with_completed_filter(): + """Test get_jobs function with completed filter""" + # Create jobs and complete one of them + job1 = create_job('CompletedTestService', {'test': 'completed'}) + job2 = create_job('IncompleteTestService', {'test': 'incomplete'}) + + # Complete the first job + complete_job(str(job1.uuid), {'result': 'success'}) + + # Filter for completed jobs + filter_obj = JobsFilter(completed=True) + result = get_jobs(filter_obj) + + assert 'data' in result + + # Find our completed job in the results + completed_job_found = None + for job in result['data']: + if str(job['uuid']) == str(job1.uuid): + completed_job_found = job + break + + assert completed_job_found is not None + assert completed_job_found['completed'] is not None + + # Filter for incomplete jobs + filter_obj = JobsFilter(completed=False) + result = get_jobs(filter_obj) + + assert 'data' in result + + # Find our incomplete job in the results + incomplete_job_found = None + for job in result['data']: + if str(job['uuid']) == str(job2.uuid): + incomplete_job_found = job + break + + assert incomplete_job_found is not None + assert incomplete_job_found['completed'] is None + + +def test_get_jobs_with_date_filters(): + """Test get_jobs function with date filters""" + # Create a job + job = create_job('DateTestService', {'test': 'date_filter'}) + + # Get current date for filtering + yesterday = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') + tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d') + + # Test with min_date filter + filter_obj = JobsFilter(min_date=yesterday) + result = get_jobs(filter_obj) + + assert 'data' in result + # Our job should be included since it was created today (after yesterday) + job_found = any( + str(job_data['uuid']) == str(job.uuid) for job_data in result['data'] + ) + assert job_found + + # Test with max_date filter + filter_obj = JobsFilter(max_date=tomorrow) + result = get_jobs(filter_obj) + + assert 'data' in result + # Our job should be included since it was created today (before tomorrow) + job_found = any( + str(job_data['uuid']) == str(job.uuid) for job_data in result['data'] + ) + assert job_found + + # Test with both min_date and max_date + filter_obj = JobsFilter(min_date=yesterday, max_date=tomorrow) + result = get_jobs(filter_obj) + + assert 'data' in result + job_found = any( + str(job_data['uuid']) == str(job.uuid) for job_data in result['data'] + ) + assert job_found + + # Test with restrictive date range (future dates) + future_date = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d') + filter_obj = JobsFilter(min_date=future_date) + result = get_jobs(filter_obj) + + # Our job should not be included since it was created before the future date + job_found = any( + str(job_data['uuid']) == str(job.uuid) for job_data in result['data'] + ) + assert not job_found + + +def test_get_jobs_with_pagination(): + """Test get_jobs function with pagination""" + # Create multiple jobs for pagination testing + jobs = [] + for i in range(5): + job = create_job(f'PaginationTestService_{i}', {'test': f'pagination_{i}'}) + jobs.append(job) + + # Test with custom page size + filter_obj = JobsFilter(page=1, page_size=3) + result = get_jobs(filter_obj) + + assert 'data' in result + assert 'meta' in result + assert 'pagination' in result['meta'] + + pagination = result['meta']['pagination'] + assert pagination['page'] == 1 + assert len(result['data']) <= 3 + + # Test second page + filter_obj = JobsFilter(page=2, page_size=3) + result = get_jobs(filter_obj) + + pagination = result['meta']['pagination'] + assert pagination['page'] == 2 + + +def test_get_jobs_with_multiple_filters(): + """Test get_jobs function with multiple filters combined""" + # Create a specific job for this test + service_name = f'MultiFilterTest_{int(time.time())}' + job = create_job(service_name, {'test': 'multi_filter'}) + + yesterday = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') + tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d') + + # Test combining service, completed, and date filters + filter_obj = JobsFilter( + service=service_name, + completed=False, + min_date=yesterday, + max_date=tomorrow, + page_size=10 + ) + result = get_jobs(filter_obj) + + assert 'data' in result + assert 'meta' in result + assert 'pagination' in result['meta'] + + # Find our specific job in the results + target_job = None + for job_data in result['data']: + if str(job_data['uuid']) == str(job.uuid): + target_job = job_data + break + + assert target_job is not None + assert target_job['service'] == service_name + assert target_job['completed'] is None # Should be incomplete + + +def test_get_jobs_empty_results(): + """Test get_jobs function with filters that return no results""" + # Use a service name that doesn't exist + non_existent_service = f'NonExistentService_{int(time.time())}' + + filter_obj = JobsFilter(service=non_existent_service) + result = get_jobs(filter_obj) + + assert 'data' in result + assert 'meta' in result + assert 'pagination' in result['meta'] + assert len(result['data']) == 0 + + pagination = result['meta']['pagination'] + assert pagination['count'] == 0