Skip to content

feat(langchain): add ruff rules PT #32010

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions libs/langchain/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ select = [
"PGH", # pygrep-hooks
"PIE", # flake8-pie
"PERF", # flake8-perf
"PT", # flake8-pytest-style
"PTH", # flake8-use-pathlib
"PYI", # flake8-pyi
"Q", # flake8-quotes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@


@pytest.mark.parametrize(
"provider, model",
("provider", "model"),
[
("openai", "text-embedding-3-large"),
("google_vertexai", "text-embedding-gecko@003"),
Expand Down
7 changes: 2 additions & 5 deletions libs/langchain/tests/unit_tests/agents/test_types.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
import unittest

from langchain.agents.agent_types import AgentType
from langchain.agents.types import AGENT_TO_CLASS


class TestTypes(unittest.TestCase):
def test_confirm_full_coverage(self) -> None:
self.assertEqual(list(AgentType), list(AGENT_TO_CLASS.keys()))
def test_confirm_full_coverage() -> None:
assert list(AgentType) == list(AGENT_TO_CLASS.keys())
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
DEFAULT_PARSER = get_parser()


@pytest.mark.parametrize("x", ("", "foo", 'foo("bar", "baz")'))
@pytest.mark.parametrize("x", ["", "foo", 'foo("bar", "baz")'])
def test_parse_invalid_grammar(x: str) -> None:
with pytest.raises((ValueError, lark.exceptions.UnexpectedToken)):
DEFAULT_PARSER.parse(x)
Expand Down Expand Up @@ -71,13 +71,13 @@ def test_parse_nested_operation() -> None:

def test_parse_disallowed_comparator() -> None:
parser = get_parser(allowed_comparators=[Comparator.EQ])
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Received disallowed comparator gt."):
parser.parse('gt("a", 2)')


def test_parse_disallowed_operator() -> None:
parser = get_parser(allowed_operators=[Operator.AND])
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Received disallowed operator not."):
parser.parse('not(gt("a", 2))')


Expand All @@ -87,53 +87,53 @@ def _test_parse_value(x: Any) -> None:
assert actual == x


@pytest.mark.parametrize("x", (-1, 0, 1_000_000))
@pytest.mark.parametrize("x", [-1, 0, 1_000_000])
def test_parse_int_value(x: int) -> None:
_test_parse_value(x)


@pytest.mark.parametrize("x", (-1.001, 0.00000002, 1_234_567.6543210))
@pytest.mark.parametrize("x", [-1.001, 0.00000002, 1_234_567.6543210])
def test_parse_float_value(x: float) -> None:
_test_parse_value(x)


@pytest.mark.parametrize("x", ([], [1, "b", "true"]))
@pytest.mark.parametrize("x", [[], [1, "b", "true"]])
def test_parse_list_value(x: list) -> None:
_test_parse_value(x)


@pytest.mark.parametrize("x", ('""', '" "', '"foo"', "'foo'"))
@pytest.mark.parametrize("x", ['""', '" "', '"foo"', "'foo'"])
def test_parse_string_value(x: str) -> None:
parsed = cast(Comparison, DEFAULT_PARSER.parse(f'eq("x", {x})'))
actual = parsed.value
assert actual == x[1:-1]


@pytest.mark.parametrize("x", ("true", "True", "TRUE", "false", "False", "FALSE"))
@pytest.mark.parametrize("x", ["true", "True", "TRUE", "false", "False", "FALSE"])
def test_parse_bool_value(x: str) -> None:
parsed = cast(Comparison, DEFAULT_PARSER.parse(f'eq("x", {x})'))
actual = parsed.value
expected = x.lower() == "true"
assert actual == expected


@pytest.mark.parametrize("op", ("and", "or"))
@pytest.mark.parametrize("arg", ('eq("foo", 2)', 'and(eq("foo", 2), lte("bar", 1.1))'))
@pytest.mark.parametrize("op", ["and", "or"])
@pytest.mark.parametrize("arg", ['eq("foo", 2)', 'and(eq("foo", 2), lte("bar", 1.1))'])
def test_parser_unpack_single_arg_operation(op: str, arg: str) -> None:
expected = DEFAULT_PARSER.parse(arg)
actual = DEFAULT_PARSER.parse(f"{op}({arg})")
assert expected == actual


@pytest.mark.parametrize("x", ('"2022-10-20"', "'2022-10-20'", "2022-10-20"))
@pytest.mark.parametrize("x", ['"2022-10-20"', "'2022-10-20'", "2022-10-20"])
def test_parse_date_value(x: str) -> None:
parsed = cast(Comparison, DEFAULT_PARSER.parse(f'eq("x", {x})'))
actual = parsed.value["date"]
assert actual == x.strip("'\"")


@pytest.mark.parametrize(
"x, expected",
("x", "expected"),
[
(
'"2021-01-01T00:00:00"',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
SCORE_WITH_EXPLANATION = "foo bar answer.\nScore: 80 (fully answers the question, but could provide more detail on the specific error message)" # noqa: E501


@pytest.mark.parametrize("answer", (GOOD_SCORE, SCORE_WITH_EXPLANATION))
@pytest.mark.parametrize("answer", [GOOD_SCORE, SCORE_WITH_EXPLANATION])
def test_parse_scores(answer: str) -> None:
result = output_parser.parse(answer)

Expand Down
27 changes: 18 additions & 9 deletions libs/langchain/tests/unit_tests/chains/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,14 @@ def _call(
def test_bad_inputs() -> None:
"""Test errors are raised if input keys are not found."""
chain = FakeChain()
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Missing some input keys: {'foo'}"):
chain({"foobar": "baz"})


def test_bad_outputs() -> None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Missing some output keys: {'bar'}"):
chain({"foo": "baz"})


Expand Down Expand Up @@ -102,7 +102,7 @@ def test_single_input_correct() -> None:
def test_single_input_error() -> None:
"""Test passing single input errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Missing some input keys:"):
chain("bar")


Expand All @@ -116,7 +116,9 @@ def test_run_single_arg() -> None:
def test_run_multiple_args_error() -> None:
"""Test run method with multiple args errors as expected."""
chain = FakeChain()
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="`run` supports only one positional argument."
):
chain.run("bar", "foo")


Expand All @@ -130,21 +132,28 @@ def test_run_kwargs() -> None:
def test_run_kwargs_error() -> None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Missing some input keys: {'bar'}"):
chain.run(foo="bar", baz="foo")


def test_run_args_and_kwargs_error() -> None:
"""Test run method with args and kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="`run` supported with either positional arguments "
"or keyword arguments but not both.",
):
chain.run("bar", foo="bar")


def test_multiple_output_keys_error() -> None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=["foo", "bar"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="`run` not supported when there is not exactly one output key.",
):
chain.run("bar")


Expand Down Expand Up @@ -175,7 +184,7 @@ def test_run_with_callback_and_input_error() -> None:
callbacks=[handler],
)

with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Missing some input keys: {'foo'}"):
chain({"bar": "foo"})

assert handler.starts == 1
Expand Down Expand Up @@ -222,7 +231,7 @@ def test_run_with_callback_and_output_error() -> None:
callbacks=[handler],
)

with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Missing some output keys: {'foo'}"):
chain("foo")

assert handler.starts == 1
Expand Down
19 changes: 16 additions & 3 deletions libs/langchain/tests/unit_tests/chains/test_combine_documents.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Test functionality related to combining documents."""

import re
from typing import Any

import pytest
Expand Down Expand Up @@ -30,7 +31,9 @@ def test_multiple_input_keys() -> None:
def test__split_list_long_single_doc() -> None:
"""Test splitting of a long single doc."""
docs = [Document(page_content="foo" * 100)]
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="A single document was longer than the context length"
):
split_list_of_docs(docs, _fake_docs_len_func, 100)


Expand Down Expand Up @@ -140,7 +143,17 @@ async def test_format_doc_missing_metadata() -> None:
input_variables=["page_content", "bar"],
template="{page_content}, {bar}",
)
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match=re.escape(
"Document prompt requires documents to have metadata variables: ['bar']."
),
):
format_document(doc, prompt)
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match=re.escape(
"Document prompt requires documents to have metadata variables: ['bar']."
),
):
await aformat_document(doc, prompt)
20 changes: 14 additions & 6 deletions libs/langchain/tests/unit_tests/chains/test_conversation.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Test conversation chain and memory."""

import re
from typing import Any, Optional

import pytest
Expand Down Expand Up @@ -76,7 +77,9 @@ def test_conversation_chain_errors_bad_prompt() -> None:
"""Test that conversation chain raise error with bad prompt."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=[], template="nothing here")
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="Value error, Got unexpected prompt input variables."
):
ConversationChain(llm=llm, prompt=prompt)


Expand All @@ -85,7 +88,12 @@ def test_conversation_chain_errors_bad_variable() -> None:
llm = FakeLLM()
prompt = PromptTemplate(input_variables=["foo"], template="{foo}")
memory = ConversationBufferMemory(memory_key="foo")
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match=re.escape(
"Value error, The input key foo was also found in the memory keys (['foo'])"
),
):
ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="foo")


Expand All @@ -106,18 +114,18 @@ def test_conversation_memory(memory: BaseMemory) -> None:
memory.save_context(good_inputs, good_outputs)
# This is a bad input because there are two variables that aren't the same as baz.
bad_inputs = {"foo": "bar", "foo1": "bar"}
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="One input key expected"):
memory.save_context(bad_inputs, good_outputs)
# This is a bad input because the only variable is the same as baz.
bad_inputs = {"baz": "bar"}
with pytest.raises(ValueError):
with pytest.raises(ValueError, match=re.escape("One input key expected got []")):
memory.save_context(bad_inputs, good_outputs)
# This is a bad output because it is empty.
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Got multiple output keys"):
memory.save_context(good_inputs, {})
# This is a bad output because there are two keys.
bad_outputs = {"foo": "bar", "foo1": "bar"}
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="Got multiple output keys"):
memory.save_context(good_inputs, bad_outputs)


Expand Down
2 changes: 1 addition & 1 deletion libs/langchain/tests/unit_tests/chains/test_llm_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,5 @@ def test_complex_question(fake_llm_math_chain: LLMMathChain) -> None:
@pytest.mark.requires("numexpr")
def test_error(fake_llm_math_chain: LLMMathChain) -> None:
"""Test question that raises error."""
with pytest.raises(ValueError):
with pytest.raises(ValueError, match="unknown format from LLM: foo"):
fake_llm_math_chain.run("foo")
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


@pytest.mark.parametrize(
"text,answer,sources",
("text", "answer", "sources"),
[
(
"This Agreement is governed by English law.\nSOURCES: 28-pl",
Expand Down
31 changes: 25 additions & 6 deletions libs/langchain/tests/unit_tests/chains/test_sequential.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Test pipeline functionality."""

import re
from typing import Optional

import pytest
Expand Down Expand Up @@ -94,7 +95,12 @@ def test_sequential_usage_memory() -> None:
memory = SimpleMemory(memories={"zab": "rab", "foo": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match=re.escape(
"Value error, The input key(s) foo are found in the Memory keys"
),
):
SequentialChain( # type: ignore[call-arg]
memory=memory,
chains=[chain_1, chain_2],
Expand Down Expand Up @@ -136,7 +142,9 @@ def test_sequential_missing_inputs() -> None:
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="Value error, Missing required input keys: {'test'}"
):
# Also needs "test" as an input
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]

Expand All @@ -145,7 +153,10 @@ def test_sequential_bad_outputs() -> None:
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Value error, Expected output variables that were not found: {'test'}.",
):
# "test" is not present as an output variable.
SequentialChain(
chains=[chain_1, chain_2],
Expand All @@ -172,7 +183,9 @@ def test_sequential_overlapping_inputs() -> None:
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError, match="Value error, Chain returned keys that already exist"
):
# "test" is specified as an input, but also is an output of one step
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) # type: ignore[call-arg]

Expand Down Expand Up @@ -226,13 +239,19 @@ def test_multi_input_errors() -> None:
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Value error, Chains used in SimplePipeline should all have one input",
):
SimpleSequentialChain(chains=[chain_1, chain_2])


def test_multi_output_errors() -> None:
"""Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "grok"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
with pytest.raises(
ValueError,
match="Value error, Chains used in SimplePipeline should all have one output",
):
SimpleSequentialChain(chains=[chain_1, chain_2])
Loading
Loading