Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
---
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.12.11
rev: v0.13.2
hooks:
- id: ruff
args:
- --fix
- --exit-non-zero-on-fix
- --line-length=120
- --ignore=E731,E501,W605,UP038
- --ignore=E731,E501,W605,UP038,RUF059
# See https://beta.ruff.rs/docs/rules for an overview of ruff rules
- --select=E,W,F,I,T,RUF,TID,UP
- --fixable=E,W,F,I,T,RUF,TID,UP
Expand Down
1 change: 1 addition & 0 deletions scripts/generate_core_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from pathlib import Path

from cognite.pygen._generator import generate_typed

from tests.tests_integration.conftest import make_cognite_client

THIS_REPO = Path(__file__).resolve().parent.parent
Expand Down
1 change: 1 addition & 0 deletions scripts/generate_extractor_extension_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from pathlib import Path

from cognite.pygen._generator import generate_typed

from tests.tests_integration.conftest import make_cognite_client

THIS_REPO = Path(__file__).resolve().parent.parent
Expand Down
8 changes: 4 additions & 4 deletions tests/tests_integration/test_api/test_datapoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -3058,11 +3058,11 @@ def test_error_when_both_target_unit_and_system_in_latest(
self, cognite_client: CogniteClient, all_test_time_series: TimeSeriesList
) -> None:
ts = all_test_time_series[0]
with pytest.raises(ValueError, match="You must use either 'target_unit' or 'target_unit_system', not both."):
with pytest.raises(ValueError, match=r"You must use either 'target_unit' or 'target_unit_system', not both\."):
cognite_client.time_series.data.retrieve_latest(
id=ts.id, before="1h-ago", target_unit="temperature:deg_f", target_unit_system="imperial"
)
with pytest.raises(ValueError, match="You must use either 'target_unit' or 'target_unit_system', not both."):
with pytest.raises(ValueError, match=r"You must use either 'target_unit' or 'target_unit_system', not both\."):
cognite_client.time_series.data.retrieve_latest(
id=LatestDatapointQuery(
id=ts.id, before="1h-ago", target_unit="temperature:deg_f", target_unit_system="imperial"
Expand Down Expand Up @@ -3385,13 +3385,13 @@ def test_delete_ranges(self, cognite_client: CogniteClient, new_ts: TimeSeries)
cognite_client.time_series.data.delete_ranges([{"start": "2d-ago", "end": "now", "id": new_ts.id}])

def test_invalid_status_code(self, cognite_client: CogniteClient, new_ts: TimeSeries) -> None:
with pytest.raises(CogniteAPIError, match="^Invalid status code"):
with pytest.raises(CogniteAPIError, match=r"^Invalid status code"):
# code=1 is not allowed: When info type is 00, all info bits must be 0
cognite_client.time_series.data.insert(datapoints=[(1, 3.1, 1)], id=new_ts.id)

def test_invalid_status_symbol(self, cognite_client: CogniteClient, new_ts: TimeSeries) -> None:
symbol = random.choice(("good", "uncertain", "bad")) # should be PascalCased
with pytest.raises(CogniteAPIError, match="^Invalid status code symbol"):
with pytest.raises(CogniteAPIError, match=r"^Invalid status code symbol"):
datapoints: list[dict] = [{"timestamp": 0, "value": 2.3, "status": {"symbol": symbol}}]
cognite_client.time_series.data.insert(datapoints=datapoints, id=new_ts.id)

Expand Down
2 changes: 1 addition & 1 deletion tests/tests_integration/test_api/test_raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,5 +150,5 @@ def test_insert_dataframe__index_has_duplicates(self, cognite_client):

df = pd.DataFrame({"aa": range(4), "bb": "value"}, index=list("abca"))

with pytest.raises(ValueError, match="^Dataframe index is not unique"):
with pytest.raises(ValueError, match=r"^Dataframe index is not unique"):
cognite_client.raw.rows.insert_dataframe("db", "table", df)
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def test_query_using_time_series_objs__missing_external_id(self, cognite_client,
# Before SDK version 7.32.8, when a passed TimeSeries missing external_id was passed, None
# was just cast to string and passed to the API, most likely leading to a "not found" error
with pytest.raises(
ValueError, match="^TimeSeries passed in 'variables' is missing required field 'external_id'"
ValueError, match=r"^TimeSeries passed in 'variables' is missing required field 'external_id'"
):
cognite_client.time_series.data.synthetic.query(
expressions="A / B",
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_integration/test_api/test_time_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def test_get_count__numeric(self, test_tss, ts_idx, exp_count):

def test_get_count__string_fails(self, test_ts_string):
assert test_ts_string.is_string is True
with pytest.raises(RuntimeError, match="String time series does not support count aggregate."):
with pytest.raises(RuntimeError, match=r"String time series does not support count aggregate\."):
test_ts_string.count()

def test_get_latest(self, test_ts_numeric, test_ts_string):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def test_create_transformation_error(self, cognite_client):
transform_without_name = Transformation(external_id=xid, destination=TransformationDestination.assets())

with pytest.raises(
ValueError, match="^External ID, name and ignore null fields are required to create a transformation."
ValueError, match=r"^External ID, name and ignore null fields are required to create a transformation\."
):
cognite_client.transformations.create(transform_without_name)

Expand Down
2 changes: 1 addition & 1 deletion tests/tests_integration/test_api/test_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -757,7 +757,7 @@ def test_trigger_run_history(
assert detailed.input == permanent_scheduled_trigger.input

def test_trigger_run_history_non_existing(self, cognite_client: CogniteClient) -> None:
with pytest.raises(CogniteAPIError, match="Workflow trigger not found."):
with pytest.raises(CogniteAPIError, match=r"Workflow trigger not found\."):
cognite_client.workflows.triggers.get_trigger_run_history(
external_id="integration_test-non_existing_trigger"
)
6 changes: 4 additions & 2 deletions tests/tests_unit/test_api/test_datapoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,7 +680,7 @@ def test_insert_dataframe_external_ids_and_instance_ids(self, cognite_client: Co
index = pd.to_datetime(timestamps, unit="ms")
df = pd.DataFrame({"123": [1, 2, 3, 4], "456": [5.0, 6.0, 7.0, 8.0]}, index=index)
with pytest.raises(
ValueError, match="`instance_id_headers` and `external_id_headers` cannot be used at the same time."
ValueError, match=r"`instance_id_headers` and `external_id_headers` cannot be used at the same time\."
):
cognite_client.time_series.data.insert_dataframe(df, instance_id_headers=True)

Expand All @@ -692,7 +692,9 @@ def test_insert_dataframe_malformed_instance_ids(self, cognite_client: CogniteCl
df = pd.DataFrame({"123": [1, 2, 3, 4], "456": [5.0, 6.0, 7.0, 8.0]}, index=index)
with pytest.raises(
ValueError,
match="Could not find instance IDs in the column header. InstanceId are given as NodeId or tuple. Got <class 'str'>",
match=re.escape(
"Could not find instance IDs in the column header. InstanceId are given as NodeId or tuple. Got <class 'str'>"
),
):
cognite_client.time_series.data.insert_dataframe(df, external_id_headers=False, instance_id_headers=True)

Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_api/test_datapoints_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,5 +192,5 @@ def test_retrieve_aggregates__include_outside_points_raises(self, query_validato

full_query = _FullDatapointsQuery(id=id_dct_lst, include_outside_points=False)
all_queries = full_query.parse_into_queries()
with pytest.raises(ValueError, match="'Include outside points' is not supported for aggregates."):
with pytest.raises(ValueError, match=r"'Include outside points' is not supported for aggregates\."):
query_validator(all_queries)
2 changes: 1 addition & 1 deletion tests/tests_unit/test_api/test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ def test_update_with_update_class_using_instance_id(self, cognite_client, mock_f
def test_update_with_update_class_using_instance_id_and_other_identifier(
self, extra_identifiers, cognite_client, mock_files_response
):
with pytest.raises(ValueError, match="Exactly one of 'id', 'external_id' or 'instance_id' must be provided."):
with pytest.raises(ValueError, match=r"Exactly one of 'id', 'external_id' or 'instance_id' must be provided\."):
FileMetadataUpdate(instance_id=NodeId("foo", "bar"), **extra_identifiers)

def test_update_labels_single(self, cognite_client, mock_files_response):
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_unit/test_api/test_raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,11 +440,11 @@ def test_raw_row__direct_column_access(raw_cls):

del row["bar"]
assert row.columns == {}
with pytest.raises(KeyError, match="^'wrong-key'$"):
with pytest.raises(KeyError, match=r"^'wrong-key'$"):
del row["wrong-key"]

row.columns = None
with pytest.raises(RuntimeError, match="^columns not set on Row instance$"):
with pytest.raises(RuntimeError, match=r"^columns not set on Row instance$"):
del row["wrong-key"]


Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_api/test_synthetic_time_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def test_expression_builder_variables_missing(self, cognite_client):
def test_expression_builder_unsupported_missing(self, cognite_client):
from sympy import cot, symbols

with pytest.raises(TypeError, match="^Unsupported sympy class cot"):
with pytest.raises(TypeError, match=r"^Unsupported sympy class cot"):
cognite_client.time_series.data.synthetic.query(
[symbols("a") + cot(symbols("a"))], start=0, end="now", variables={"a": "a"}
)
2 changes: 1 addition & 1 deletion tests/tests_unit/test_api/test_vision_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def test_extract_unit(
# Cannot save prediction of an incomplete job
with pytest.raises(
CogniteException,
match="Extract job is not completed. If the job is queued or running, wait for completion and try again",
match=r"Extract job is not completed\. If the job is queued or running, wait for completion and try again",
):
job.save_predictions()

Expand Down
3 changes: 2 additions & 1 deletion tests/tests_unit/test_api_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import json
import math
import random
import re
import time
import unittest
from collections import namedtuple
Expand Down Expand Up @@ -129,7 +130,7 @@ def test_requests_fail(self, fn, api_client_with_token):
method(**kwargs)
assert e.value.code == 500

with pytest.raises(CogniteAPIError, match="Client error | code: 400 | X-Request-ID:") as e:
with pytest.raises(CogniteAPIError, match=re.escape("Client error | code: 400 | X-Request-ID:")) as e:
method(**kwargs)
assert e.value.code == 400
assert e.value.message == "Client error"
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ def test_extend__with_identifiers(self):
def test_extend__fails_with_overlapping_identifiers(self):
resource_list = MyResourceList([MyResource(id=1), MyResource(id=2)])
another_resource_list = MyResourceList([MyResource(id=2), MyResource(id=6)])
with pytest.raises(ValueError, match="^Unable to extend as this would introduce duplicates$"):
with pytest.raises(ValueError, match=r"^Unable to extend as this would introduce duplicates$"):
resource_list.extend(another_resource_list)

def test_len(self):
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_unit/test_cognite_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ def mock_token_inspect(rsps) -> None:

class TestCogniteClient:
def test_project_is_empty(self):
with pytest.raises(ValueError, match="Invalid value for ClientConfig.project: <>"):
with pytest.raises(ValueError, match=r"Invalid value for ClientConfig\.project: <>"):
CogniteClient(ClientConfig(client_name="", project="", credentials=Token("bla")))
with pytest.raises(ValueError, match="Invalid value for ClientConfig.project: <None>"):
with pytest.raises(ValueError, match=r"Invalid value for ClientConfig\.project: <None>"):
CogniteClient(ClientConfig(client_name="", project=None, credentials=Token("bla")))

def test_project_is_correct(self, client_config_w_token_factory):
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_credential_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def test_access_token_not_generated_due_to_error(self, mock_oauth_session, mock_
mock_oauth_session().fetch_token.side_effect = InvalidClientIdError()
with pytest.raises(
CogniteAuthError,
match="Error generating access token: invalid_request, 400, Invalid client_id parameter value.",
match=r"Error generating access token: invalid_request, 400, Invalid client_id parameter value\.",
):
creds = OAuthClientCredentials(**self.DEFAULT_PROVIDER_ARGS)
creds._refresh_access_token()
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_data_classes/test_assets.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ def test_validate_asset_hierarchy_self_dependency(self):
def test_validate_asset_hierarchy__everything_is_wrong(self):
hierarchy = AssetHierarchy(basic_issue_assets()).validate(on_error="ignore")
assert hierarchy.invalid and hierarchy.orphans and hierarchy.unsure_parents and hierarchy.duplicates
with pytest.raises(CogniteAssetHierarchyError, match="^Unable to run cycle-check before"):
with pytest.raises(CogniteAssetHierarchyError, match=r"^Unable to run cycle-check before"):
hierarchy.cycles
with pytest.raises(
CogniteAssetHierarchyError, match=r"Issue\(s\): 3 duplicates, 1 invalid, 1 unsure_parents, 2 orphans$"
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_data_classes/test_capabilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def test_load_capability_misspelled_acl(self, unknown_acls_items):
Capability.load(unknown_acl, allow_unknown=False)

# when difflib doesnt find any matches, it should be omitted from the err. msg:
with pytest.raises(ValueError, match="force loading it as an unknown capability. List of all ACLs"):
with pytest.raises(ValueError, match=r"force loading it as an unknown capability\. List of all ACLs"):
Capability.load(
{"does not match anything really": {"actions": ["READ"], "scope": {"all": {}}}},
allow_unknown=False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def test_is_list_property_with_max_list_size(self, lst_cls: type[ListablePropert
assert isinstance(prop, ListablePropertyType)
assert prop.max_list_size == 10

with pytest.raises(ValueError, match="^is_list must be True if max_list_size is set$"):
with pytest.raises(ValueError, match=r"^is_list must be True if max_list_size is set$"):
PropertyType.load({"type": type_name, "list": False, "maxListSize": 10})

@pytest.mark.parametrize("lst_cls_with_unit", all_concrete_subclasses(PropertyTypeWithUnit))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def test_auth_loaders_auth_cls(sample_sources):


def test_auth_loaders(sample_sources) -> None:
resource, expected_auth_cls, expected_auth_write_cls = sample_sources
resource, _expected_auth_cls, expected_auth_write_cls = sample_sources

source_write_cls = _SOURCE_WRITE_CLASS_BY_TYPE.get(resource["source"])
obj: SourceWrite = source_write_cls._load(resource=resource)
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_data_classes/test_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def test_upsert_variant_doesnt_accept_hash(self):
task = WorkflowTask(external_id="foo", parameters=TransformationTaskParameters(external_id="something"))
WorkflowDefinition(tasks=[task], description="desc", hash_="very-random")

with pytest.raises(TypeError, match="unexpected keyword argument 'hash_'$"):
with pytest.raises(TypeError, match=r"unexpected keyword argument 'hash_'$"):
WorkflowDefinitionUpsert(tasks=[task], description="desc", hash_="very-random")


Expand Down
2 changes: 1 addition & 1 deletion tests/tests_unit/test_utils/test_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_shorten(obj, width, placeholder, expected):


def test_shorten__fails():
with pytest.raises(ValueError, match="^Width must be larger than "):
with pytest.raises(ValueError, match=r"^Width must be larger than "):
shorten(object(), width=2, placeholder="...")


Expand Down
14 changes: 8 additions & 6 deletions tests/tests_unit/test_utils/test_time.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,11 @@ def test_naive_datetime_to_ms_unix(self, local_tz: str, expected_ms: int) -> Non
def test_naive_datetime_to_ms_windows(self) -> None:
with pytest.raises(
ValueError,
match="Failed to convert datetime to epoch. "
"This likely because you are using a naive datetime. "
"Try using a timezone aware datetime instead.",
match=re.escape(
"Failed to convert datetime to epoch. "
"This likely because you are using a naive datetime. "
"Try using a timezone aware datetime instead."
),
):
datetime_to_ms(datetime(1925, 8, 3))

Expand Down Expand Up @@ -239,7 +241,7 @@ def test_time_shift_real_time(self) -> None:

@pytest.mark.parametrize("t", [MIN_TIMESTAMP_MS - 1, datetime(1899, 12, 31, tzinfo=timezone.utc), "100000000w-ago"])
def test_negative(self, t: int | float | str | datetime) -> None:
with pytest.raises(ValueError, match="must represent a time after 1.1.1900"):
with pytest.raises(ValueError, match=re.escape("must represent a time after 1.1.1900")):
timestamp_to_ms(t)


Expand Down Expand Up @@ -785,7 +787,7 @@ def test_month_aligner__ceil(self, dt: datetime, expected: datetime) -> None:
assert expected == MonthAligner.ceil(dt)

def test_month_aligner_ceil__invalid_date(self) -> None:
with pytest.raises(ValueError, match="^day is out of range for month$"):
with pytest.raises(ValueError, match=r"^day is out of range for month$"):
MonthAligner.add_units(datetime(2023, 7, 31), 2) # sept has 30 days

@pytest.mark.parametrize(
Expand All @@ -804,7 +806,7 @@ def test_month_aligner__add_unites(self, dt: datetime, n_units: int, expected: d
assert expected == MonthAligner.add_units(dt, n_units)

def test_month_aligner_add_unites__invalid_date(self) -> None:
with pytest.raises(ValueError, match="^day is out of range for month$"):
with pytest.raises(ValueError, match=r"^day is out of range for month$"):
MonthAligner.add_units(datetime(2023, 1, 29), 1) # 2023 = non-leap year


Expand Down