diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 835c696..25f1fe5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,7 +57,17 @@ jobs: needs: test runs-on: ubuntu-latest steps: - - name: Build & Release - uses: FullFact/ff_release@v2 + - name: Bump version and push tag + id: tag_version + uses: mathieudutour/github-tag-action@v6.2 with: - docker_build: false + github_token: ${{ secrets.GITHUB_TOKEN }} + release_branches: main + pre_release_branches: dev + + - name: Create a GitHub release + uses: ncipollo/release-action@v1 + with: + tag: ${{ steps.tag_version.outputs.new_tag }} + name: Release ${{ steps.tag_version.outputs.new_tag }} + body: ${{ steps.tag_version.outputs.changelog }} diff --git a/pyproject.toml b/pyproject.toml index 555bd3d..24c3455 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,10 +19,11 @@ dependencies = [ "scipy>=1.15.3", "scipy-stubs>=1.15.3.0", "types-requests>=2.32.4.20250809", + "tenacity>=9.1.2", ] [tool.uv.sources] -genai_utils = { git = "ssh://git@github.com/FullFact/genai-utils.git", tag = "5.0.0"} +genai_utils = { git = "https://github.com/FullFact/genai-utils.git", tag = "5.0.0"} [build-system] requires = ["setuptools >= 61.0", diff --git a/scripts/demo_beam_search.py b/scripts/demo_beam_search.py index fb1ac5a..0d69ae9 100644 --- a/scripts/demo_beam_search.py +++ b/scripts/demo_beam_search.py @@ -12,7 +12,7 @@ def get_functions() -> list[str]: def get_questions(use_all_from_db: bool = False) -> list[str]: - """Either load all available questions from the local cached-pastel databse + """Either load all available questions from the local cached-pastel database or return a sample.""" if use_all_from_db: db = DatabaseManager() diff --git a/scripts/demo_cached_pastel.py b/scripts/demo_cached_pastel.py index e40e4d7..a55fb21 100644 --- a/scripts/demo_cached_pastel.py +++ b/scripts/demo_cached_pastel.py @@ -1,6 +1,8 @@ """Simple demo of cached pastel: Using a pastel model with a local database for long-term caching responses""" +import asyncio + from pastel.models import Sentence from pastel.pastel import Pastel from training.cached_pastel import CachedPastel @@ -30,11 +32,11 @@ def DANGER_clear_database() -> None: cached_pastel.display_model() # Use cached_pastel exactly the same as Pastel (which it extends:) - scores = cached_pastel.make_predictions(test_sentences) - _ = [print(f"{s:4.1f} \t{e.sentence_text}") for s, e in zip(scores, test_sentences)] + scores = asyncio.run(cached_pastel.make_predictions(test_sentences)) + _ = [print(f"{scores[e].score:4.1f} \t{e.sentence_text}") for e in test_sentences] print("-" * 100) # second pass of same sentences will make zero calls to Gemini - scores = cached_pastel.make_predictions(test_sentences) - _ = [print(f"{s:4.1f} \t{e.sentence_text}") for s, e in zip(scores, test_sentences)] + scores = asyncio.run(cached_pastel.make_predictions(test_sentences)) + _ = [print(f"{scores[e].score:4.1f} \t{e.sentence_text}") for e in test_sentences] diff --git a/scripts/demo_pastel.py b/scripts/demo_pastel.py index 243a300..f550101 100644 --- a/scripts/demo_pastel.py +++ b/scripts/demo_pastel.py @@ -1,10 +1,11 @@ +import asyncio import copy import json import tempfile -from pastel.models import Sentence +from pastel.models import BiasType, Sentence from pastel.optimise_weights import learn_weights -from pastel.pastel import BiasType, Pastel +from pastel.pastel import Pastel def demo_predict(pasteliser: Pastel) -> None: @@ -22,9 +23,8 @@ def demo_predict(pasteliser: Pastel) -> None: ] examples = [Sentence(t, tuple(["quantity"])) for t in texts] - scores = pasteliser.make_predictions(examples) - # _ = [print(f"{s:4.1f} \t{e}") for s, e in sorted(zip(scores, examples))] - _ = [print(f"{s:4.1f} \t{e}") for s, e in zip(scores, examples)] + scores = asyncio.run(pasteliser.make_predictions(examples)) + _ = [print(f"{scores[e].score:4.1f} \t{e.sentence_text}") for e in examples] def demo_learn(pasteliser: Pastel) -> Pastel: diff --git a/src/pastel/models.py b/src/pastel/models.py index c515736..a05268f 100644 --- a/src/pastel/models.py +++ b/src/pastel/models.py @@ -1,5 +1,15 @@ +import enum +from collections.abc import Callable from dataclasses import dataclass -from typing import Tuple +from typing import Tuple, TypeAlias + +from pydantic import BaseModel + + +class BiasType(enum.Enum): + """Used as the key for the bias term in Pastel models""" + + BIAS = "BIAS" @dataclass(frozen=True) @@ -8,3 +18,15 @@ class Sentence: sentence_text: str claim_type: Tuple[str, ...] = () + + +FEATURE_TYPE: TypeAlias = Callable[[Sentence], float] | str | BiasType + + +class ScoreAndAnswers(BaseModel): + """Used to parse scores for sentences and store the answers to + PASTEL questions.""" + + sentence: Sentence + score: float + answers: dict[FEATURE_TYPE, float] diff --git a/src/pastel/pastel.py b/src/pastel/pastel.py index 65654ab..1c1d753 100644 --- a/src/pastel/pastel.py +++ b/src/pastel/pastel.py @@ -2,11 +2,10 @@ # See paper: https://arxiv.org/abs/2309.07601v3 "Weakly Supervised Veracity Classification with LLM-Predicted Credibility Signals" import asyncio -import enum import json import logging from collections.abc import Callable -from typing import Dict, Sequence, Tuple, TypeAlias +from typing import Sequence, Tuple, TypeAlias import numpy as np import numpy.typing as npt @@ -15,7 +14,7 @@ from google.api_core import exceptions as core_exceptions from pastel import pastel_functions -from pastel.models import Sentence +from pastel.models import FEATURE_TYPE, BiasType, ScoreAndAnswers, Sentence _logger = logging.getLogger(__name__) @@ -31,15 +30,6 @@ ) -class BiasType(enum.Enum): - """Used as the key for the bias term in Pastel models""" - - BIAS = "BIAS" - - -FEATURE_TYPE: TypeAlias = Callable[[Sentence], float] | str | BiasType - - def feature_as_string(feature: FEATURE_TYPE) -> str: if callable(feature): return feature.__name__ @@ -170,28 +160,28 @@ def get_functions(self) -> list[Callable[[Sentence], float]]: def make_prompt(self, sentence: Sentence) -> str: """Makes a prompt for a single given sentence.""" + questions = self.get_questions() + prompt = """ - Your task is to answer a series of questions about a sentence. Ensure your answers are truthful and reliable. - You are expected to answer with ‘Yes’ or ‘No’ but you are also allowed to answer with ‘Unsure’ if you do not - have enough information or context to provide a reliable answer. - Your response should be limited to the question number and yes/no/unsure. - Example output: - 0. Yes - 1. Yes - 2. No +Your task is to answer a series of questions about a sentence. Ensure your answers are truthful and reliable. +You are expected to answer with ‘Yes’ or ‘No’ but you are also allowed to answer with ‘Unsure’ if you do not +have enough information or context to provide a reliable answer. +Your response should be limited to the question number and yes/no/unsure. +Example output: +0. Yes +1. Yes +2. No - Here are the questions: - [QUESTIONS] +Here are the questions: +[QUESTIONS] - Here is the sentence: ```[SENT1]``` +Here is the sentence: ```[SENT1]``` - """ +""" # extract the PastelFeatures whose type is string prompt = prompt.replace( "[QUESTIONS]", - "\n".join( - [f"Question {idx} {q}" for idx, q in enumerate(self.get_questions())] - ), + "\n".join([f"Question {idx} {q}" for idx, q in enumerate(questions)]), ) prompt = prompt.replace("[SENT1]", sentence.sentence_text) @@ -211,11 +201,11 @@ def _label_mapping(label: str) -> float: retry=tenacity.retry_if_exception_type(RETRYABLE_EXCEPTIONS), before=log_retry_attempt, ) - async def _get_answers_for_single_sentence( + async def _get_llm_answers_for_single_sentence( self, sentence: Sentence ) -> dict[FEATURE_TYPE, float]: - sent_answers: Dict[FEATURE_TYPE, float] = {} - # First, get answers to all the questions from genAI: + """Runs all genAI questions on the given sentence.""" + sent_answers: dict[FEATURE_TYPE, float] = {} prompt = self.make_prompt(sentence) raw_output = run_prompt(prompt) raw_output = raw_output.strip().lower() @@ -233,16 +223,33 @@ async def _get_answers_for_single_sentence( raise ValueError( f"Failed to parse output for the sentence: {sentence.sentence_text}. Output received: {output}" ) - # Second, get values from the functions + return sent_answers + + def _get_function_answers_for_single_sentence( + self, sentence: Sentence + ) -> dict[FEATURE_TYPE, float]: + """Runs all the functions in the model on the given sentence.""" + sent_answers: dict[FEATURE_TYPE, float] = {} for f in self.get_functions(): sent_answers[f] = f(sentence) - return sent_answers + async def _get_answers_for_single_sentence( + self, sentence: Sentence + ) -> dict[FEATURE_TYPE, float]: + # First, get answers to all the questions from genAI: + llm_sent_answers = await self._get_llm_answers_for_single_sentence(sentence) + + # Second, get values from the functions + function_sent_answers = self._get_function_answers_for_single_sentence(sentence) + + return llm_sent_answers | function_sent_answers + async def get_answers_to_questions( self, sentences: list[Sentence] ) -> dict[Sentence, dict[FEATURE_TYPE, float]]: - """Embed each example into the prompt and pass to genAI. + """Embed each example into the prompt and pass to genAI, then + get answers for non-genAI functions. For each sentence, this Returns a dictionary mapping features to scores.""" jobs = [ @@ -299,10 +306,13 @@ def get_scores_from_answers( scores = X.dot(weights) return scores - def make_predictions(self, sentences: list[Sentence]) -> ARRAY_TYPE: + async def make_predictions( + self, sentences: list[Sentence] + ) -> dict[Sentence, ScoreAndAnswers]: """Use the Pastel questions and weights model to generate - a score for each of a list of sentences.""" - answers = asyncio.run(self.get_answers_to_questions(sentences)) + a score for each of a list of sentences. Return this along with + the questions and their scores.""" + answers = await self.get_answers_to_questions(sentences) if answers: scores = self.get_scores_from_answers(list(answers.values())) else: @@ -310,10 +320,44 @@ def make_predictions(self, sentences: list[Sentence]) -> ARRAY_TYPE: scores_dict = {} for sentence, score in zip(answers.keys(), scores): - scores_dict[sentence] = float(score) + scores_dict[sentence.sentence_text] = float(score) for sentence in sentences: - if sentence not in scores_dict: - scores_dict[sentence] = 0.0 + if sentence.sentence_text not in scores_dict: + scores_dict[sentence.sentence_text] = 0.0 + if sentence not in answers.keys(): + answers[sentence] = {} - return np.array([scores_dict[sentence] for sentence in sentences]) + return { + sentence: ScoreAndAnswers( + sentence=sentence, + score=scores_dict[sentence.sentence_text], + answers=answers[sentence], + ) + for sentence in sentences + } + + def update_predictions( + self, sentences: list[Sentence], old_answers: list[dict[FEATURE_TYPE, float]] + ) -> dict[Sentence, ScoreAndAnswers]: + """Takes a list of sentences and their original LLM and function answers, + then re-runs the functions only and updates the scores with these new answers. + Returns ScoresAndAnswers for each sentence as before.""" + new_answers = [ + self._get_function_answers_for_single_sentence(sentence) + for sentence in sentences + ] + updated_answers = [old | new for old, new in zip(old_answers, new_answers)] + updated_scores = self.get_scores_from_answers(updated_answers) + + updated_scores_and_answers = { + sentence: ScoreAndAnswers( + sentence=sentence, + score=score, + answers=answers, + ) + for sentence, score, answers in zip( + sentences, updated_scores, updated_answers + ) + } + return updated_scores_and_answers diff --git a/src/training/beam_search.py b/src/training/beam_search.py index d2f380e..5231a1a 100644 --- a/src/training/beam_search.py +++ b/src/training/beam_search.py @@ -10,8 +10,9 @@ import numpy as np from sklearn.model_selection import train_test_split # type: ignore +from pastel.models import FEATURE_TYPE, BiasType from pastel.optimise_weights import lin_reg -from pastel.pastel import EXAMPLES_TYPE, FEATURE_TYPE, BiasType, Pastel +from pastel.pastel import EXAMPLES_TYPE, Pastel from training.cached_pastel import CachedPastel from training.crossvalidate_pastel import ( evaluate_model, diff --git a/src/training/cached_pastel.py b/src/training/cached_pastel.py index 8bb2400..d422fce 100644 --- a/src/training/cached_pastel.py +++ b/src/training/cached_pastel.py @@ -5,8 +5,8 @@ import logging from typing import List, Optional, Set, Tuple -from pastel.models import Sentence -from pastel.pastel import ARRAY_TYPE, FEATURE_TYPE, BiasType, Pastel, feature_as_string +from pastel.models import FEATURE_TYPE, BiasType, Sentence +from pastel.pastel import ARRAY_TYPE, Pastel, feature_as_string from training.db_manager import DatabaseManager _logger = logging.getLogger(__name__) diff --git a/src/training/crossvalidate_pastel.py b/src/training/crossvalidate_pastel.py index bbfe7d6..3ca72ad 100644 --- a/src/training/crossvalidate_pastel.py +++ b/src/training/crossvalidate_pastel.py @@ -15,9 +15,9 @@ from sklearn.metrics import f1_score, precision_score, recall_score # type: ignore from sklearn.model_selection import train_test_split # type: ignore -from pastel.models import Sentence +from pastel.models import FEATURE_TYPE, BiasType, Sentence from pastel.optimise_weights import lin_reg -from pastel.pastel import EXAMPLES_TYPE, FEATURE_TYPE, BiasType, Pastel +from pastel.pastel import EXAMPLES_TYPE, Pastel from training.cached_pastel import CachedPastel diff --git a/tests/pastel/test_beam_search.py b/tests/pastel/test_beam_search.py index e51efd7..90ff831 100644 --- a/tests/pastel/test_beam_search.py +++ b/tests/pastel/test_beam_search.py @@ -1,11 +1,12 @@ from unittest.mock import Mock, patch -from pastel.pastel import BiasType, Pastel +from pastel.models import BiasType +from pastel.pastel import Pastel from training.beam_search import add_one, run_beam_search def test_add_one(): - current_set = set(["a", "b"]) + current_set = frozenset(["a", "b"]) all_features = ["a", "b", "c", "d"] new_sets = add_one(current_set, all_features) print(new_sets) @@ -16,7 +17,7 @@ def test_add_one(): def test_add_one_empty_set(): - current_set = set() + current_set = frozenset() all_features = ["a", "b"] new_sets = add_one(current_set, all_features) assert len(new_sets) == 2 diff --git a/tests/pastel/test_cached_pastel.py b/tests/pastel/test_cached_pastel.py index 2cccd49..7621d59 100644 --- a/tests/pastel/test_cached_pastel.py +++ b/tests/pastel/test_cached_pastel.py @@ -1,7 +1,7 @@ import numpy as np -from pastel.models import Sentence -from pastel.pastel import FEATURE_TYPE, BiasType, Pastel +from pastel.models import FEATURE_TYPE, BiasType, Sentence +from pastel.pastel import Pastel from training.cached_pastel import CachedPastel Q1 = "Is the statement factual?" @@ -19,9 +19,9 @@ def __init__(self, questions=None): super().__init__(questions) async def get_answers_to_questions( - self, sentences: list[str] - ) -> dict[str, dict[FEATURE_TYPE, float]]: - return {s: 1.0 for s in sentences} + self, sentences: list[Sentence] + ) -> dict[Sentence, dict[FEATURE_TYPE, float]]: + return {s: {Q1: 1.0} for s in sentences} # def make_predictions(self, sentences): # answers = self.get_answers_to_questions(sentences) diff --git a/tests/pastel/test_pastel.py b/tests/pastel/test_pastel.py index 0875390..4c3f4d1 100644 --- a/tests/pastel/test_pastel.py +++ b/tests/pastel/test_pastel.py @@ -1,27 +1,24 @@ import json import tempfile -from unittest.mock import AsyncMock, patch +from unittest.mock import AsyncMock, call, patch import numpy as np import pytest from pytest import mark, param -from pastel.models import Sentence -from pastel.pastel import BiasType, Pastel +from pastel.models import FEATURE_TYPE, BiasType, ScoreAndAnswers, Sentence +from pastel.pastel import Pastel -Q1 = "Is the statement factual?" -Q2 = "Does the statement contain bias?" +# mypy: ignore-errors +# getting "Untyped decorator makes function ... untyped " so ignoring for now: + +Q1: FEATURE_TYPE = "Is the statement factual?" +Q2: FEATURE_TYPE = "Does the statement contain bias?" @pytest.fixture def pastel_instance() -> Pastel: - pasteliser = Pastel( - { - BiasType.BIAS: 1.0, - Q1: -3.0, - Q2: 2.0, - } - ) + pasteliser = Pastel({BiasType.BIAS: 1.0, Q1: -3.0, Q2: 2.0}) return pasteliser @@ -62,7 +59,7 @@ def test_get_scores_from_answers(pastel_instance: Pastel) -> None: def test_get_scores_from_answers_no_weights(pastel_instance: Pastel) -> None: for k in pastel_instance.model.keys(): pastel_instance.model[k] = 0.0 - answers = [{Q1: 1, Q2: 1}, {Q1: 0, Q2: 1}] + answers = [{Q1: 1.0, Q2: 1.0}, {Q1: 0.0, Q2: 1.0}] with pytest.raises(ValueError): pastel_instance.get_scores_from_answers(answers) @@ -99,22 +96,22 @@ async def test_retries(mock_run_prompt: AsyncMock, pastel_instance: Pastel) -> N "sentences,return_values,expected", [ param( - [Sentence("s1", ()), Sentence("s2", ())], + [Sentence("s1", tuple("quantity")), Sentence("s2", tuple("quantity"))], [{Q1: 1.0, Q2: 1.0}, {Q1: 1.0, Q2: 0.0}], { - Sentence("s1", ()): {Q1: 1.0, Q2: 1.0}, - Sentence("s2", ()): {Q1: 1.0, Q2: 0.0}, + Sentence("s1", tuple("quantity")): {Q1: 1.0, Q2: 1.0}, + Sentence("s2", tuple("quantity")): {Q1: 1.0, Q2: 0.0}, }, id="Normal case", ), param( - [Sentence("s1", ()), Sentence("s2", ())], + [Sentence("s1", tuple("quantity")), Sentence("s2", tuple("quantity"))], [{Q1: 1.0, Q2: 1.0}, ValueError()], - {Sentence("s1", ()): {Q1: 1.0, Q2: 1.0}}, + {Sentence("s1", tuple("quantity")): {Q1: 1.0, Q2: 1.0}}, id="One sentence fails", ), param( - [Sentence("s1", ()), Sentence("s2", ())], + [Sentence("s1", tuple("quantity")), Sentence("s2", tuple("quantity"))], [ValueError(), ValueError()], {}, id="All sentences fail", @@ -124,7 +121,7 @@ async def test_retries(mock_run_prompt: AsyncMock, pastel_instance: Pastel) -> N async def test_get_answers_to_questions( sentences: list[Sentence], return_values: list[dict[str, float] | BaseException], - expected: dict[str, dict[str, float]], + expected: dict[Sentence, dict[str, float]], pastel_instance: Pastel, ): with patch.object( @@ -138,33 +135,99 @@ async def test_get_answers_to_questions( "sentences,answers,expected", [ param( - ["s1", "s2"], - {"s1": {Q1: 0.0, Q2: 1.0}, "s2": {Q1: 0.0, Q2: 0.5}}, - np.array([3.0, 2.0]), + [Sentence("s1", tuple("quantity")), Sentence("s2", tuple("quantity"))], + { + Sentence("s1", tuple("quantity")): {Q1: 0.0, Q2: 1.0}, + Sentence("s2", tuple("quantity")): {Q1: 0.0, Q2: 0.5}, + }, + { + Sentence("s1", tuple("quantity")): ScoreAndAnswers( + sentence=Sentence("s1", tuple("quantity")), + score=3.0, + answers={Q1: 0.0, Q2: 1.0}, + ), + Sentence("s2", tuple("quantity")): ScoreAndAnswers( + sentence=Sentence("s2", tuple("quantity")), + score=2.0, + answers={Q1: 0.0, Q2: 0.5}, + ), + }, id="Normal case", ), param( - ["s1", "s2"], - {"s1": {Q1: 0.0, Q2: 1.0}}, - np.array([3.0, 0.0]), + [Sentence("s1", tuple("quantity")), Sentence("s2", tuple("quantity"))], + {Sentence("s1", tuple("quantity")): {Q1: 0.0, Q2: 1.0}}, + { + Sentence("s1", tuple("quantity")): ScoreAndAnswers( + sentence=Sentence("s1", tuple("quantity")), + score=3.0, + answers={Q1: 0.0, Q2: 1.0}, + ), + Sentence("s2", tuple("quantity")): ScoreAndAnswers( + sentence=Sentence("s2", tuple("quantity")), score=0.0, answers={} + ), + }, id="One sentence fails", ), param( - ["s1", "s2"], + [Sentence("s1", tuple("quantity")), Sentence("s2", tuple("quantity"))], {}, - np.array([0.0, 0.0]), + { + Sentence("s1", tuple("quantity")): ScoreAndAnswers( + sentence=Sentence("s1", tuple("quantity")), score=0.0, answers={} + ), + Sentence("s2", tuple("quantity")): ScoreAndAnswers( + sentence=Sentence("s2", tuple("quantity")), score=0.0, answers={} + ), + }, id="All sentences fail", ), ], ) -def test_make_predictions( - sentences: list[str], +async def test_make_predictions( + sentences: list[Sentence], answers: dict[str, dict[str, float]], - expected: np.ndarray, + expected: dict[Sentence, ScoreAndAnswers], pastel_instance: Pastel, ): with patch.object( pastel_instance, "get_answers_to_questions", return_value=answers ): - predictions = pastel_instance.make_predictions(sentences) - assert all(predictions == expected) + predictions = await pastel_instance.make_predictions(sentences) + assert predictions == expected + + +def test_update_predictions(pastel_instance): + sentences = [ + Sentence(c, tuple("quantity")) for c in ["claim 1", "claim 2", "claim 3"] + ] + old_answers = [{Q1: 1.0, Q2: 0.0}, {Q1: 0.0, Q2: 1.0}, {Q1: 1.0, Q2: 1.0}] + + with ( + patch.object( + pastel_instance, + "_get_function_answers_for_single_sentence", + return_value={"updated_feature": 1.0}, + ) as mock_get_func_answers, + patch.object( + pastel_instance, + "get_scores_from_answers", + return_value=np.array([1.0, 2.0, 3.0]), + ) as mock_get_scores, + ): + updates = pastel_instance.update_predictions(sentences, old_answers) + + mock_get_func_answers.assert_has_calls( + [call(sentence) for sentence in sentences] + ) + + mock_get_scores.assert_called_once() + + assert len(updates) == len(sentences) + for sentence, score, old_answer in zip(sentences, [1.0, 2.0, 3.0], old_answers): + assert sentence in updates + assert isinstance(updates[sentence], ScoreAndAnswers) + assert updates[sentence].sentence == sentence + assert updates[sentence].score == score + expected_answers = old_answer | {"updated_feature": 1.0} + assert updates[sentence].answers == expected_answers diff --git a/uv.lock b/uv.lock index cda881c..ff8b722 100644 --- a/uv.lock +++ b/uv.lock @@ -243,7 +243,7 @@ wheels = [ [[package]] name = "genai-utils" version = "0.0.1" -source = { git = "ssh://git@github.com/FullFact/genai-utils.git?tag=5.0.0#0a689e2a20bd32e09f1bb0d0543e2ba969392965" } +source = { git = "https://github.com/FullFact/genai-utils.git?tag=5.0.0#0a689e2a20bd32e09f1bb0d0543e2ba969392965" } dependencies = [ { name = "google-cloud-core" }, { name = "google-genai" }, @@ -683,6 +683,7 @@ dependencies = [ { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "scipy-stubs", version = "1.15.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "scipy-stubs", version = "1.16.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "tenacity" }, { name = "types-requests" }, ] @@ -699,12 +700,13 @@ dev = [ [package.metadata] requires-dist = [ - { name = "genai-utils", git = "ssh://git@github.com/FullFact/genai-utils.git?tag=5.0.0" }, + { name = "genai-utils", git = "https://github.com/FullFact/genai-utils.git?tag=5.0.0" }, { name = "google-cloud-core", specifier = ">=2.4.3" }, { name = "pydantic", specifier = ">=2.11.7" }, { name = "scikit-learn", specifier = ">=1.7.1" }, { name = "scipy", specifier = ">=1.15.3" }, { name = "scipy-stubs", specifier = ">=1.15.3.0" }, + { name = "tenacity", specifier = ">=9.1.2" }, { name = "types-requests", specifier = ">=2.32.4.20250809" }, ]