diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fa413644c0a..95992e59139 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -41,7 +41,7 @@ ddtrace/internal/_file_queue.py @DataDog/python-guild ddtrace/internal/_unpatched.py @DataDog/python-guild ddtrace/internal/compat.py @DataDog/python-guild @DataDog/apm-core-python ddtrace/internal/endpoints.py @DataDog/python-guild @DataDog/asm-python -ddtrace/settings/config.py @DataDog/python-guild @DataDog/apm-sdk-api-python +ddtrace/settings/config.py @DataDog/python-guild @DataDog/apm-sdk-capabilities-python docs/ @DataDog/python-guild tests/utils.py @DataDog/python-guild tests/suitespec.yml @DataDog/python-guild @DataDog/apm-core-python @@ -144,6 +144,7 @@ ddtrace/contrib/internal/botocore/services/bedrock.py @DataDog/ml-observ ddtrace/contrib/internal/botocore/services/bedrock_agents.py @DataDog/ml-observability ddtrace/contrib/botocore/services/bedrock.py @DataDog/ml-observability ddtrace/contrib/internal/anthropic @DataDog/ml-observability +ddtrace/contrib/internal/google_adk @DataDog/ml-observability ddtrace/contrib/internal/google_generativeai @DataDog/ml-observability ddtrace/contrib/internal/google_genai @DataDog/ml-observability ddtrace/contrib/internal/vertexai @DataDog/ml-observability @@ -164,6 +165,7 @@ tests/contrib/botocore/test_bedrock_agents_llmobs.py @DataDog/ml-observ tests/contrib/botocore/bedrock_utils.py @DataDog/ml-observability tests/contrib/botocore/bedrock_cassettes @DataDog/ml-observability tests/contrib/anthropic @DataDog/ml-observability +tests/contrib/google_adk @DataDog/ml-observability tests/contrib/google_generativeai @DataDog/ml-observability tests/contrib/google_genai @DataDog/ml-observability tests/contrib/vertexai @DataDog/ml-observability @@ -182,6 +184,7 @@ tests/snapshots/tests.contrib.openai.* @DataDog/ml-observ tests/snapshots/tests.contrib.vertexai.* @DataDog/ml-observability tests/snapshots/tests.contrib.botocore.test_bedrock_agents.* @DataDog/ml-observability tests/snapshots/tests.contrib.botocore.test_bedrock.* @DataDog/ml-observability +tests/snapshots/tests.contrib.google_adk.* @DataDog/ml-observability tests/snapshots/tests.contrib.google_generativeai.* @DataDog/ml-observability tests/snapshots/tests.contrib.langgraph.* @DataDog/ml-observability tests/snapshots/tests.contrib.crewai.* @DataDog/ml-observability @@ -193,29 +196,29 @@ ddtrace/internal/remoteconfig @DataDog/remote-config @DataDog/apm-core-pyt tests/internal/remoteconfig @DataDog/remote-config @DataDog/apm-core-python # API SDK -ddtrace/trace/ @DataDog/apm-sdk-api-python -ddtrace/_trace/ @DataDog/apm-sdk-api-python +ddtrace/trace/ @DataDog/apm-sdk-capabilities-python +ddtrace/_trace/ @DataDog/apm-sdk-capabilities-python # File commonly updated for integrations, widen ownership to help with PR review -ddtrace/_trace/trace_handlers.py @DataDog/apm-sdk-api-python @DataDog/apm-core-python @DataDog/apm-idm-python -ddtrace/opentelemetry/ @DataDog/apm-sdk-api-python -ddtrace/internal/opentelemetry @DataDog/apm-sdk-api-python -ddtrace/opentracer/ @DataDog/apm-sdk-api-python -ddtrace/propagation/ @DataDog/apm-sdk-api-python +ddtrace/_trace/trace_handlers.py @DataDog/apm-sdk-capabilities-python @DataDog/apm-core-python @DataDog/apm-idm-python +ddtrace/opentelemetry/ @DataDog/apm-sdk-capabilities-python +ddtrace/internal/opentelemetry @DataDog/apm-sdk-capabilities-python +ddtrace/opentracer/ @DataDog/apm-sdk-capabilities-python +ddtrace/propagation/ @DataDog/apm-sdk-capabilities-python -ddtrace/internal/sampling.py @DataDog/apm-sdk-api-python -ddtrace/internal/tracemethods.py @DataDog/apm-sdk-api-python -ddtrace/internal/metrics.py @DataDog/apm-sdk-api-python -ddtrace/internal/rate_limiter.py @DataDog/apm-sdk-api-python -ddtrace/runtime/ @DataDog/apm-sdk-api-python -ddtrace/internal/runtime/ @DataDog/apm-sdk-api-python -ddtrace/settings/_otel_remapper.py @DataDog/apm-sdk-api-python -tests/integration/test_priority_sampling.py @DataDog/apm-sdk-api-python -tests/integration/test_propagation.py @DataDog/apm-sdk-api-python -tests/runtime/ @DataDog/apm-sdk-api-python -tests/test_sampling.py @DataDog/apm-sdk-api-python -tests/test_tracemethods.py @DataDog/apm-sdk-api-python -tests/opentelemetry/ @DataDog/apm-sdk-api-python -tests/tracer/ @DataDog/apm-sdk-api-python +ddtrace/internal/sampling.py @DataDog/apm-sdk-capabilities-python +ddtrace/internal/tracemethods.py @DataDog/apm-sdk-capabilities-python +ddtrace/internal/metrics.py @DataDog/apm-sdk-capabilities-python +ddtrace/internal/rate_limiter.py @DataDog/apm-sdk-capabilities-python +ddtrace/runtime/ @DataDog/apm-sdk-capabilities-python +ddtrace/internal/runtime/ @DataDog/apm-sdk-capabilities-python +ddtrace/settings/_otel_remapper.py @DataDog/apm-sdk-capabilities-python +tests/integration/test_priority_sampling.py @DataDog/apm-sdk-capabilities-python +tests/integration/test_propagation.py @DataDog/apm-sdk-capabilities-python +tests/runtime/ @DataDog/apm-sdk-capabilities-python +tests/test_sampling.py @DataDog/apm-sdk-capabilities-python +tests/test_tracemethods.py @DataDog/apm-sdk-capabilities-python +tests/opentelemetry/ @DataDog/apm-sdk-capabilities-python +tests/tracer/ @DataDog/apm-sdk-capabilities-python # Override because order matters tests/tracer/test_ci.py @DataDog/ci-app-libraries diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 10af9e1eef8..17216a9ff54 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,21 +1,15 @@ -## Checklist -- [ ] PR author has checked that all the criteria below are met -- The PR description includes an overview of the change -- The PR description articulates the motivation for the change -- The change includes tests OR the PR description describes a testing strategy -- The PR description notes risks associated with the change, if any -- Newly-added code is easy to change -- The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) -- The change includes or references documentation updates if necessary -- Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) +## Description -## Reviewer Checklist -- [ ] Reviewer has checked that all the criteria below are met -- Title is accurate -- All changes are related to the pull request's stated goal -- Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes -- Testing strategy adequately addresses listed risks -- Newly-added code is easy to change -- Release note makes sense to a user of the library -- If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment -- Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) + + +## Testing + + + +## Risks + + + +## Additional Notes + + diff --git a/.github/workflows/build_deploy.yml b/.github/workflows/build_deploy.yml index 0f5163c1053..ac416a58156 100644 --- a/.github/workflows/build_deploy.yml +++ b/.github/workflows/build_deploy.yml @@ -24,15 +24,57 @@ on: - cron: 0 2 * * 2-6 jobs: + compute_version: + name: Compute Library Version + runs-on: ubuntu-latest + outputs: + library_version: ${{ steps.compute-version.outputs.library_version }} + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # Include all history and tags + with: + persist-credentials: false + fetch-depth: 0 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + name: Install Python + with: + python-version: '3.12' + - name: Compute Version + id: compute-version + run: | + pip install "setuptools_scm[toml]>=4" + + # If we are on the main or release branch, strip away the dev version + if [[ "$GITHUB_REF_NAME" == "main" || \ + "$GITHUB_REF_NAME" =~ ^[0-9]+\.[0-9]+$ || \ + "$GITHUB_REF_NAME" =~ ^[0-9]+\.x$ ]]; then + LIBRARY_VERSION=$(setuptools-scm --strip-dev) + else + # All else, maintain the dev version + LIBRARY_VERSION=$(setuptools-scm) + fi + + echo "${LIBRARY_VERSION}" | tee version.txt + echo "library_version=${LIBRARY_VERSION}" >> $GITHUB_OUTPUT + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: library-version + path: version.txt + build_wheels: + needs: [ "compute_version" ] uses: ./.github/workflows/build_python_3.yml with: cibw_build: 'cp38* cp39* cp310* cp311* cp312* cp313*' cibw_skip: 'cp38-win_arm64 cp39-win_arm64 cp310-win_arm64' + library_version: ${{ needs.compute_version.outputs.library_version }} build_sdist: + needs: [ "compute_version" ] name: Build source distribution runs-on: ubuntu-latest + env: + SETUPTOOLS_SCM_PRETEND_VERSION_FOR_DDTRACE: ${{ needs.compute_version.outputs.library_version }} steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 # Include all history and tags diff --git a/.github/workflows/build_python_3.yml b/.github/workflows/build_python_3.yml index 20419387134..2200d1d365e 100644 --- a/.github/workflows/build_python_3.yml +++ b/.github/workflows/build_python_3.yml @@ -12,8 +12,39 @@ on: cibw_prerelease_pythons: required: false type: string + library_version: + required: false + type: string jobs: + compute_version: + name: Compute Library Version + runs-on: ubuntu-latest + outputs: + library_version: ${{ steps.compute-version.outputs.library_version }} + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # Include all history and tags + with: + persist-credentials: false + fetch-depth: 0 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + name: Install Python + with: + python-version: '3.12' + - name: Compute Version + id: compute-version + run: | + if [ -n "${{ inputs.library_version}}" ]; then + LIBRARY_VERSION="${{ inputs.library_version}}" + else + pip install "setuptools_scm[toml]>=4" + LIBRARY_VERSION=$(setuptools-scm) + fi + + echo "${LIBRARY_VERSION}" + echo "library_version=${LIBRARY_VERSION}" >> $GITHUB_OUTPUT + build-wheels-matrix: runs-on: ubuntu-latest outputs: @@ -32,19 +63,19 @@ jobs: run: | MATRIX_INCLUDE=$( { - cibuildwheel --print-build-identifiers --platform linux --archs x86_64,i686 | jq -cR '{only: ., os: "ubuntu-latest"}' \ - && cibuildwheel --print-build-identifiers --platform linux --archs aarch64 | jq -cR '{only: ., os: "ubuntu-24.04-arm"}' \ + cibuildwheel --print-build-identifiers --platform linux --archs x86_64,i686 | jq -cR '{only: ., os: "ubuntu-latest-16-cores"}' \ + && cibuildwheel --print-build-identifiers --platform linux --archs aarch64 | jq -cR '{only: ., os: "arm-8core-linux"}' \ && cibuildwheel --print-build-identifiers --platform windows --archs AMD64,x86 | jq -cR '{only: ., os: "windows-latest"}' \ && cibuildwheel --print-build-identifiers --platform windows --archs ARM64 | jq -cR '{only: ., os: "windows-11-arm"}' \ - && cibuildwheel --print-build-identifiers --platform macos --archs x86_64 | jq -cR '{only: ., os: "macos-13"}' \ - && cibuildwheel --print-build-identifiers --platform macos --archs arm64 | jq -cR '{only: ., os: "macos-latest"}' + && cibuildwheel --print-build-identifiers --platform macos --archs x86_64 | jq -cR '{only: ., os: "macos-15-large"}' \ + && cibuildwheel --print-build-identifiers --platform macos --archs arm64 | jq -cR '{only: ., os: "macos-15-xlarge"}' } | jq -sc ) echo $MATRIX_INCLUDE echo "include=${MATRIX_INCLUDE}" >> $GITHUB_OUTPUT build: - needs: build-wheels-matrix + needs: ["compute_version", "build-wheels-matrix" ] runs-on: ${{ matrix.os }} name: Build ${{ matrix.only }} strategy: @@ -52,6 +83,7 @@ jobs: matrix: include: ${{ fromJson(needs.build-wheels-matrix.outputs.include) }} env: + SETUPTOOLS_SCM_PRETEND_VERSION_FOR_DDTRACE: ${{ needs.compute_version.outputs.library_version }} CIBW_SKIP: ${{ inputs.cibw_skip }} CIBW_PRERELEASE_PYTHONS: ${{ inputs.cibw_prerelease_pythons }} CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014 @@ -66,11 +98,12 @@ jobs: fi curl -sSf https://sh.rustup.rs | sh -s -- -y; fi - CIBW_ENVIRONMENT_LINUX: PATH=$HOME/.cargo/bin:$PATH CMAKE_BUILD_PARALLEL_LEVEL=24 CMAKE_ARGS="-DNATIVE_TESTING=OFF" + CIBW_ENVIRONMENT_LINUX: PATH=$HOME/.cargo/bin:$PATH CMAKE_BUILD_PARALLEL_LEVEL=24 CMAKE_ARGS="-DNATIVE_TESTING=OFF" SETUPTOOLS_SCM_PRETEND_VERSION_FOR_DDTRACE=${{ needs.compute_version.outputs.library_version }} # SYSTEM_VERSION_COMPAT is a workaround for versioning issue, a.k.a. # `platform.mac_ver()` reports incorrect MacOS version at 11.0 # See: https://stackoverflow.com/a/65402241 - CIBW_ENVIRONMENT_MACOS: CMAKE_BUILD_PARALLEL_LEVEL=24 SYSTEM_VERSION_COMPAT=0 CMAKE_ARGS="-DNATIVE_TESTING=OFF" + CIBW_ENVIRONMENT_MACOS: CMAKE_BUILD_PARALLEL_LEVEL=24 SYSTEM_VERSION_COMPAT=0 CMAKE_ARGS="-DNATIVE_TESTING=OFF" SETUPTOOLS_SCM_PRETEND_VERSION_FOR_DDTRACE=${{ needs.compute_version.outputs.library_version }} + CIBW_ENVIRONMENT_WINDOWS: SETUPTOOLS_SCM_PRETEND_VERSION_FOR_DDTRACE=${{ needs.compute_version.outputs.library_version }} # cibuildwheel repair will copy anything's under /output directory from the # build container to the host machine. This is a bit hacky way, but seems # to be the only way getting debug symbols out from the container while diff --git a/.github/workflows/profiling-native.yml b/.github/workflows/profiling-native.yml index 5c0df634f63..f3e1c6fb21b 100644 --- a/.github/workflows/profiling-native.yml +++ b/.github/workflows/profiling-native.yml @@ -41,6 +41,7 @@ jobs: - name: Install Valgrind if: ${{ matrix.sanitizer == 'valgrind' }} run: | + sudo apt update sudo apt-get install -y valgrind - name: Run tests with sanitizers diff --git a/.github/workflows/require-checklist.yaml b/.github/workflows/require-checklist.yaml deleted file mode 100644 index 656ef63ffd5..00000000000 --- a/.github/workflows/require-checklist.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: Require Checklist -on: - pull_request: - types: [opened, edited, synchronize] -jobs: - require-checklist: - runs-on: ubuntu-latest - steps: - - uses: mheap/require-checklist-action@46d2ca1a0f90144bd081fd13a80b1dc581759365 # v2.5.0 - with: - # require a checklist to be present in the PR description - requireChecklist: true diff --git a/.github/workflows/requirements-locks.yml b/.github/workflows/requirements-locks.yml index 49149c8646e..8396a21d6b6 100644 --- a/.github/workflows/requirements-locks.yml +++ b/.github/workflows/requirements-locks.yml @@ -25,7 +25,7 @@ jobs: run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: Set python interpreters - run: pyenv global 3.10 3.8 3.9 3.11 3.12 3.13 + run: pyenv global 3.10 3.8 3.9 3.11 3.12 3.13 3.14.0rc1 - name: Install Dependencies run: pip install --upgrade pip && pip install riot==0.20.1 && pip install toml==0.10.2 diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml index f42a80a79f1..b5e98cfa48b 100644 --- a/.github/workflows/system-tests.yml +++ b/.github/workflows/system-tests.yml @@ -47,7 +47,7 @@ jobs: persist-credentials: false repository: 'DataDog/system-tests' # Automatically managed, use scripts/update-system-tests-version to update - ref: 'e13ccb562e9e060317b173e25b1e638d89f9df3b' + ref: '0eadeeae3a7c2fddb61ed55b692580ad6cd852f9' - name: Download wheels to binaries directory uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 @@ -94,7 +94,7 @@ jobs: persist-credentials: false repository: 'DataDog/system-tests' # Automatically managed, use scripts/update-system-tests-version to update - ref: 'e13ccb562e9e060317b173e25b1e638d89f9df3b' + ref: '0eadeeae3a7c2fddb61ed55b692580ad6cd852f9' - name: Build runner uses: ./.github/actions/install_runner @@ -217,14 +217,14 @@ jobs: if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' run: ./run.sh APPSEC_BLOCKING_FULL_DENYLIST - - name: Run APPSEC_REQUEST_BLOCKING - if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' - run: ./run.sh APPSEC_REQUEST_BLOCKING - - name: Run APPSEC_RASP if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' run: ./run.sh APPSEC_RASP + - name: Run APPSEC_STANDALONE_RASP + if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' + run: ./run.sh APPSEC_STANDALONE_RASP + - name: Run DEBUGGER_PROBES_STATUS if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'debugger-1' run: ./run.sh DEBUGGER_PROBES_STATUS @@ -279,7 +279,7 @@ jobs: persist-credentials: false repository: 'DataDog/system-tests' # Automatically managed, use scripts/update-system-tests-version to update - ref: 'e13ccb562e9e060317b173e25b1e638d89f9df3b' + ref: '0eadeeae3a7c2fddb61ed55b692580ad6cd852f9' - name: Download wheels to binaries directory uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7546a0f4b2e..34508166f50 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,7 +14,7 @@ variables: DD_VPA_TEMPLATE: "vpa-template-cpu-p70-10percent-2x-oom-min-cap" # CI_DEBUG_SERVICES: "true" # Automatically managed, use scripts/update-system-tests-version to update - SYSTEM_TESTS_REF: "e13ccb562e9e060317b173e25b1e638d89f9df3b" + SYSTEM_TESTS_REF: "0eadeeae3a7c2fddb61ed55b692580ad6cd852f9" default: interruptible: true @@ -75,12 +75,20 @@ run-tests-trigger: strategy: depend # Validate the ast-grep rule's test suite in .sg/tests -"test ast-grep rules": +"ast-grep rules": extends: .testrunner stage: tests needs: [] script: - - hatch run lint:sg-test + script: + - | + echo -e "\e[0Ksection_start:`date +%s`:sg_test[collapsed=true]\r\e[0KValidate ast-grep rules" + hatch run lint:sg-test + echo -e "\e[0Ksection_end:`date +%s`:sg_test\r\e[0K" + - | + echo -e "\e[0Ksection_start:`date +%s`:sg_scan[collapsed=true]\r\e[0Kast-grep scan" + hatch run lint:sg + echo -e "\e[0Ksection_end:`date +%s`:sg_scan\r\e[0K" microbenchmarks: stage: benchmarks diff --git a/.gitlab/benchmarks/bp-runner.microbenchmarks.fail-on-breach.yml b/.gitlab/benchmarks/bp-runner.microbenchmarks.fail-on-breach.yml index 8820a0d8738..7b178bb647a 100644 --- a/.gitlab/benchmarks/bp-runner.microbenchmarks.fail-on-breach.yml +++ b/.gitlab/benchmarks/bp-runner.microbenchmarks.fail-on-breach.yml @@ -15,10 +15,6 @@ experiments: thresholds: - execution_time < 0.01 ms - max_rss_usage < 33.50 MB - - name: coreapiscenario-context_with_data_only_all_listeners - thresholds: - - execution_time < 0.02 ms - - max_rss_usage < 33.50 MB - name: coreapiscenario-get_item_exists thresholds: - execution_time < 0.01 ms @@ -36,43 +32,47 @@ experiments: - name: djangosimple-appsec thresholds: - execution_time < 22.30 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB - name: djangosimple-exception-replay-enabled thresholds: - execution_time < 1.45 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB - name: djangosimple-iast thresholds: - execution_time < 22.25 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB - name: djangosimple-profiler thresholds: - execution_time < 16.55 ms - - max_rss_usage < 53.50 MB + - max_rss_usage < 54.50 MB - name: djangosimple-span-code-origin thresholds: - execution_time < 28.20 ms - - max_rss_usage < 68.50 MB + - max_rss_usage < 69.50 MB - name: djangosimple-tracer thresholds: - execution_time < 21.75 ms + - max_rss_usage < 67.00 MB + - name: djangosimple-tracer-minimal + thresholds: + - execution_time < 17.50 ms - max_rss_usage < 66.00 MB - name: djangosimple-tracer-native thresholds: - execution_time < 21.75 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 72.50 MB - name: djangosimple-tracer-and-profiler thresholds: - execution_time < 23.50 ms - - max_rss_usage < 67.00 MB + - max_rss_usage < 67.50 MB - name: djangosimple-tracer-no-caches thresholds: - execution_time < 19.65 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB - name: djangosimple-tracer-no-databases thresholds: - execution_time < 20.10 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB - name: djangosimple-tracer-dont-create-db-spans thresholds: - execution_time < 21.50 ms @@ -80,25 +80,29 @@ experiments: - name: djangosimple-tracer-no-middleware thresholds: - execution_time < 21.50 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB - name: djangosimple-tracer-no-templates thresholds: - execution_time < 22.00 ms - - max_rss_usage < 66.00 MB + - max_rss_usage < 67.00 MB + - name: djangosimple-resource-renaming + thresholds: + - execution_time < 21.75 ms + - max_rss_usage < 67.00 MB # errortrackingdjangosimple - name: errortrackingdjangosimple-errortracking-enabled-all thresholds: - execution_time < 19.85 ms - - max_rss_usage < 65.50 MB + - max_rss_usage < 66.50 MB - name: errortrackingdjangosimple-errortracking-enabled-user thresholds: - execution_time < 19.40 ms - - max_rss_usage < 65.50 MB + - max_rss_usage < 66.50 MB - name: errortrackingdjangosimple-tracer-enabled thresholds: - execution_time < 19.45 ms - - max_rss_usage < 65.50 MB + - max_rss_usage < 66.50 MB # errortrackingflasksqli - name: errortrackingflasksqli-errortracking-enabled-all @@ -122,16 +126,16 @@ experiments: - name: flasksimple-tracer-native thresholds: - execution_time < 3.65 ms - - max_rss_usage < 53.50 MB + - max_rss_usage < 60.00 MB - name: flasksimple-profiler thresholds: - execution_time < 2.10 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.00 MB - name: flasksimple-debugger thresholds: - execution_time < 2.00 ms - - max_rss_usage < 46.50 MB - - max_rss_usage < 45.00 MB + - max_rss_usage < 47.00 MB + - max_rss_usage < 47.00 MB - name: flasksimple-iast-get thresholds: - execution_time < 2.00 ms @@ -139,15 +143,19 @@ experiments: - name: flasksimple-appsec-get thresholds: - execution_time < 4.75 ms - - max_rss_usage < 64.50 MB + - max_rss_usage < 65.00 MB - name: flasksimple-appsec-post thresholds: - execution_time < 6.75 ms - - max_rss_usage < 64.50 MB + - max_rss_usage < 65.00 MB - name: flasksimple-appsec-telemetry thresholds: - execution_time < 4.75 ms - - max_rss_usage < 64.50 MB + - max_rss_usage < 65.00 MB + - name: flasksimple-resource-renaming + thresholds: + - execution_time < 3.65 ms + - max_rss_usage < 53.50 MB # flasksqli - name: flasksqli-appsec-enabled @@ -157,11 +165,11 @@ experiments: - name: flasksqli-iast-enabled thresholds: - execution_time < 2.80 ms - - max_rss_usage < 59.00 MB + - max_rss_usage < 60.00 MB - name: flasksqli-tracer-enabled thresholds: - execution_time < 2.25 ms - - max_rss_usage < 53.50 MB + - max_rss_usage < 54.50 MB # httppropagationextract - name: httppropagationextract-all_styles_all_headers @@ -726,11 +734,11 @@ experiments: - max_rss_usage < 39.00 MB - name: iastpropagation-propagation_enabled_100 thresholds: - - execution_time < 1.90 ms + - execution_time < 2.30 ms - max_rss_usage < 39.00 MB - name: iastpropagation-propagation_enabled_1000 thresholds: - - execution_time < 35.55 ms + - execution_time < 34.55 ms - max_rss_usage < 39.00 MB # otelsdkspan @@ -787,7 +795,7 @@ experiments: - name: otelspan-add-event thresholds: - execution_time < 47.15 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.00 MB - name: otelspan-add-metrics thresholds: - execution_time < 344.80 ms @@ -803,19 +811,19 @@ experiments: - name: otelspan-is-recording thresholds: - execution_time < 44.50 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.50 MB - name: otelspan-record-exception thresholds: - execution_time < 67.65 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.00 MB - name: otelspan-set-status thresholds: - execution_time < 50.40 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.00 MB - name: otelspan-start thresholds: - execution_time < 43.45 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.00 MB - name: otelspan-start-finish thresholds: - execution_time < 88.00 ms @@ -827,7 +835,7 @@ experiments: - name: otelspan-update-name thresholds: - execution_time < 45.15 ms - - max_rss_usage < 46.50 MB + - max_rss_usage < 47.00 MB # packagespackageforrootmodulemapping - name: packagespackageforrootmodulemapping-cache_off diff --git a/.gitlab/download-library-version-from-gh-actions.sh b/.gitlab/download-library-version-from-gh-actions.sh new file mode 100755 index 00000000000..2cafe20ef60 --- /dev/null +++ b/.gitlab/download-library-version-from-gh-actions.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -eo pipefail + +source .gitlab/gha-utils.sh +RUN_ID=$(wait_for_run_id) + +timeout=600 # 10 minutes +start_time=$(date +%s) +end_time=$((start_time + timeout)) +# Loop for 10 minutes waiting for run to appear in github +while [ $(date +%s) -lt $end_time ]; do + # If the artifact isn't ready yet, then the download will fail + if gh run download $RUN_ID --repo DataDog/dd-trace-py --pattern "library-version"; then + break + fi + + echo "Waiting for library version" + sleep 30 +done + +echo "Library Version: $(cat library-version/version.txt)" diff --git a/.gitlab/download-wheels-from-gh-actions.sh b/.gitlab/download-wheels-from-gh-actions.sh index 2792e8754fe..6e5fc6f35fc 100755 --- a/.gitlab/download-wheels-from-gh-actions.sh +++ b/.gitlab/download-wheels-from-gh-actions.sh @@ -1,56 +1,9 @@ #!/bin/bash set -eo pipefail -get_run_id() { - RUN_ID=$( - gh run ls \ - --repo DataDog/dd-trace-py \ - --commit="$CI_COMMIT_SHA" \ - $([ -z "$TRIGGERING_EVENT" ] && echo "" || echo "--event=$TRIGGERING_EVENT") \ - --workflow=build_deploy.yml \ - --json databaseId \ - --jq "first (.[]) | .databaseId" - ) -} +source .gitlab/gha-utils.sh -if [ -z "$CI_COMMIT_SHA" ]; then - echo "Error: CI_COMMIT_SHA was not provided" - exit 1 -fi - -if [ -v "$CI_COMMIT_TAG" ]; then - TRIGGERING_EVENT="release" -fi - -get_run_id - -if [ -z "$RUN_ID" ]; then - echo "No RUN_ID found waiting for job to start" - # The job has not started yet. Give it time to start - sleep 180 # 3 minutes - - echo "Querying for RUN_ID" - - timeout=600 # 10 minutes - start_time=$(date +%s) - end_time=$((start_time + timeout)) - # Loop for 10 minutes waiting for run to appear in github - while [ $(date +%s) -lt $end_time ]; do - get_run_id - if [ -n "$RUN_ID" ]; then - break; - fi - echo "Waiting for RUN_ID" - sleep 60 - done -fi - -if [ -z "$RUN_ID" ]; then - echo "RUN_ID not found. Check if the GitHub build jobs were successfully triggered on your PR. Usually closing and re-opening your PR will resolve this issue." - exit 1 -fi - -echo "Found RUN_ID: $RUN_ID" +RUN_ID=$(wait_for_run_id) mkdir pywheels cd pywheels diff --git a/.gitlab/gha-utils.sh b/.gitlab/gha-utils.sh new file mode 100755 index 00000000000..4ae88f4361f --- /dev/null +++ b/.gitlab/gha-utils.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set -eo pipefail + +wait_for_run_id() { + get_run_id() { + RUN_ID=$( + gh run ls \ + --repo DataDog/dd-trace-py \ + --commit="$CI_COMMIT_SHA" \ + $([ -z "$TRIGGERING_EVENT" ] && echo "" || echo "--event=$TRIGGERING_EVENT") \ + --workflow=build_deploy.yml \ + --json databaseId \ + --jq "first (.[]) | .databaseId" + ) + } + + if [ -z "$CI_COMMIT_SHA" ]; then + echo "Error: CI_COMMIT_SHA was not provided" >&2 + exit 1 + fi + + if [ -v CI_COMMIT_TAG ]; then + TRIGGERING_EVENT="release" + fi + + get_run_id + + if [ -z "$RUN_ID" ]; then + echo "No RUN_ID found waiting for job to start" >&2 + # The job has not started yet. Give it time to start + sleep 30 + + echo "Querying for RUN_ID" >&2 + + timeout=600 # 10 minutes + start_time=$(date +%s) + end_time=$((start_time + timeout)) + # Loop for 10 minutes waiting for run to appear in github + while [ $(date +%s) -lt $end_time ]; do + get_run_id + if [ -n "$RUN_ID" ]; then + break; + fi + echo "Waiting for RUN_ID" >&2 + sleep 30 + done + fi + + if [ -z "$RUN_ID" ]; then + echo "RUN_ID not found. Check if the GitHub build jobs were successfully triggered on your PR. Usually closing and re-opening your PR will resolve this issue." >&2 + exit 1 + fi + + echo "Found RUN_ID: $RUN_ID" >&2 + + echo "${RUN_ID}" +} diff --git a/.gitlab/package.yml b/.gitlab/package.yml index 0af8a3d1916..3f6d2db6995 100644 --- a/.gitlab/package.yml +++ b/.gitlab/package.yml @@ -1,3 +1,31 @@ +compute_library_version: + image: registry.ddbuild.io/images/dd-octo-sts-ci-base:2025.06-1 + tags: [ "arch:amd64" ] + stage: package + id_tokens: + DDOCTOSTS_ID_TOKEN: + aud: dd-octo-sts + script: | + if [ -z ${GH_TOKEN} ] + then + # Use dd-octo-sts to get GitHub token + dd-octo-sts token --scope DataDog/dd-trace-py --policy gitlab.github-access.read > token + gh auth login --with-token < token + rm token + fi + # Prevent git operation errors: + # failed to determine base repo: failed to run git: fatal: detected dubious ownership in repository at ... + git config --global --add safe.directory "${CI_PROJECT_DIR}" + .gitlab/download-library-version-from-gh-actions.sh + + echo "SETUPTOOLS_SCM_PRETEND_VERSION_FOR_DDTRACE=$(cat library-version/version.txt)" | tee library_version.env + echo "DDTRACE_VERSION=$(cat library-version/version.txt)" | tee -a library_version.env + artifacts: + reports: + dotenv: library_version.env + paths: + - "library-version/version.txt" + download_ddtrace_artifacts: image: registry.ddbuild.io/images/dd-octo-sts-ci-base:2025.06-1 tags: [ "arch:amd64" ] diff --git a/.gitlab/services.yml b/.gitlab/services.yml index 2e787953045..218a64553aa 100644 --- a/.gitlab/services.yml +++ b/.gitlab/services.yml @@ -12,12 +12,14 @@ DD_REMOTE_CONFIGURATION_REFRESH_INTERVAL: 5s DD_DOGSTATSD_NON_LOCAL_TRAFFIC: true testagent: - name: registry.ddbuild.io/images/mirror/dd-apm-test-agent/ddapm-test-agent:v1.29.1 + name: registry.ddbuild.io/images/mirror/dd-apm-test-agent/ddapm-test-agent:v1.34.0 alias: testagent variables: LOG_LEVEL: ERROR SNAPSHOT_DIR: ${CI_PROJECT_DIR}/tests/snapshots SNAPSHOT_CI: 1 + VCR_CI_MODE: true + OTLP_GRPC_PORT: 4136 VCR_CASSETTES_DIRECTORY: ${CI_PROJECT_DIR}/tests/llmobs/llmobs_cassettes PORT: 9126 DD_POOL_TRACE_CHECK_FAILURES: true diff --git a/.gitlab/templates/cached-testrunner.yml b/.gitlab/templates/cached-testrunner.yml index 562249ce8ea..00cf745ba56 100644 --- a/.gitlab/templates/cached-testrunner.yml +++ b/.gitlab/templates/cached-testrunner.yml @@ -5,7 +5,7 @@ EXT_CACHE_VENV: '${{CI_PROJECT_DIR}}/.cache/ext_cache_venv${{PYTHON_VERSION}}' before_script: | ulimit -c unlimited - pyenv global 3.12 3.8 3.9 3.10 3.11 3.13 + pyenv global 3.12 3.8 3.9 3.10 3.11 3.13 3.14.0rc1 export _CI_DD_AGENT_URL=http://${{HOST_IP}}:8126/ set -e -o pipefail if [ ! -d $EXT_CACHE_VENV ]; then diff --git a/.gitlab/testrunner.yml b/.gitlab/testrunner.yml index 22aeb93d901..bf90fbd088c 100644 --- a/.gitlab/testrunner.yml +++ b/.gitlab/testrunner.yml @@ -12,7 +12,7 @@ variables: before_script: - ulimit -c unlimited - git config --global --add safe.directory ${CI_PROJECT_DIR} - - pyenv global 3.12 3.8 3.9 3.10 3.11 3.13 + - pyenv global 3.12 3.8 3.9 3.10 3.11 3.13 3.14.0rc1 - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ retry: 2 artifacts: diff --git a/.riot/requirements/1050efa.txt b/.riot/requirements/1050efa.txt index 98035c5522c..1da83ceca87 100644 --- a/.riot/requirements/1050efa.txt +++ b/.riot/requirements/1050efa.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1050efa.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/3f34788.txt b/.riot/requirements/1053a29.txt similarity index 56% rename from .riot/requirements/3f34788.txt rename to .riot/requirements/1053a29.txt index 33529e147f8..e4da9e7fd72 100644 --- a/.riot/requirements/3f34788.txt +++ b/.riot/requirements/1053a29.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --no-annotate .riot/requirements/3f34788.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1053a29.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -19,12 +19,12 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/10d90c0.txt b/.riot/requirements/10d90c0.txt new file mode 100644 index 00000000000..b5277004d95 --- /dev/null +++ b/.riot/requirements/10d90c0.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/10d90c0.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.2.24 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/111cc29.txt b/.riot/requirements/111cc29.txt new file mode 100644 index 00000000000..7cc6434a6a2 --- /dev/null +++ b/.riot/requirements/111cc29.txt @@ -0,0 +1,34 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/111cc29.in +# +attrs==25.3.0 +coverage[toml]==7.10.6 +exceptiongroup==1.3.0 +gunicorn==23.0.0 +hypothesis==6.45.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +lz4==4.4.4 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +py-cpuinfo==8.0.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-asyncio==0.21.1 +pytest-benchmark==5.1.0 +pytest-cov==6.3.0 +pytest-cpp==2.6.0 +pytest-mock==3.15.0 +pytest-randomly==3.16.0 +referencing==0.36.2 +rpds-py==0.27.1 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.15.0 +uwsgi==2.0.29 diff --git a/.riot/requirements/118fd10.txt b/.riot/requirements/118fd10.txt new file mode 100644 index 00000000000..702baae7aab --- /dev/null +++ b/.riot/requirements/118fd10.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/118fd10.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.0.10 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/94282bb.txt b/.riot/requirements/119044a.txt similarity index 63% rename from .riot/requirements/94282bb.txt rename to .riot/requirements/119044a.txt index 735ed0197cf..cae7551e20a 100644 --- a/.riot/requirements/94282bb.txt +++ b/.riot/requirements/119044a.txt @@ -2,33 +2,32 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/94282bb.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/119044a.in # attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 +azure-core==1.33.0 +azure-functions==1.23.0 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 -django==2.2.28 -django-configurations==2.3.2 exceptiongroup==1.3.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 +isodate==0.7.2 +markupsafe==2.1.5 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.5.0 -pylibmc==1.6.3 pytest==8.3.5 pytest-cov==5.0.0 -pytest-django[testing]==3.10.0 pytest-mock==3.14.1 -pytz==2025.2 requests==2.32.4 six==1.17.0 sortedcontainers==2.4.0 -sqlparse==0.5.3 tomli==2.2.1 typing-extensions==4.13.2 urllib3==2.2.3 +werkzeug==3.0.6 diff --git a/.riot/requirements/1227878.txt b/.riot/requirements/1227878.txt new file mode 100644 index 00000000000..ccbcde1ed47 --- /dev/null +++ b/.riot/requirements/1227878.txt @@ -0,0 +1,121 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1227878.in +# +absolufy-imports==0.3.1 +alembic==1.16.5 +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.3 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +cloudpickle==3.1.1 +coverage[toml]==7.10.6 +cryptography==45.0.7 +deprecated==1.2.18 +docstring-parser==0.17.0 +fastapi==0.116.1 +google-adk==1.14.1 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.181.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform[agent-engines]==1.113.0 +google-cloud-appengine-logging==1.6.2 +google-cloud-audit-log==0.3.2 +google-cloud-bigquery==3.37.0 +google-cloud-bigtable==2.32.0 +google-cloud-core==2.4.3 +google-cloud-logging==3.12.1 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-spanner==3.57.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.36.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +greenlet==3.2.4 +grpc-google-iam-v1==0.14.2 +grpc-interceptor==0.15.4 +grpcio==1.74.0 +grpcio-status==1.74.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mako==1.3.10 +markupsafe==3.0.2 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.3.3 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sqlalchemy-spanner==1.16.0 +sqlparse==0.5.3 +sse-starlette==3.0.2 +starlette==0.47.3 +tenacity==8.5.0 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +watchdog==6.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/1261ed3.txt b/.riot/requirements/1261ed3.txt deleted file mode 100644 index cf97c1bc502..00000000000 --- a/.riot/requirements/1261ed3.txt +++ /dev/null @@ -1,31 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1261ed3.in -# -aiohappyeyeballs==2.4.3 -aiohttp==3.10.9 -aiohttp-jinja2==1.5.1 -aiosignal==1.3.1 -attrs==24.2.0 -coverage[toml]==7.6.1 -frozenlist==1.4.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.0.0 -jinja2==3.1.4 -markupsafe==2.1.5 -mock==5.1.0 -multidict==6.1.0 -opentracing==2.4.0 -packaging==24.1 -pluggy==1.5.0 -pytest==8.3.3 -pytest-aiohttp==1.0.5 -pytest-asyncio==0.23.7 -pytest-cov==5.0.0 -pytest-mock==3.14.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -yarl==1.13.1 diff --git a/.riot/requirements/12c10e8.txt b/.riot/requirements/12c10e8.txt index eb2fabb5fec..2593379b730 100644 --- a/.riot/requirements/12c10e8.txt +++ b/.riot/requirements/12c10e8.txt @@ -5,22 +5,22 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/12c10e8.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 importlib-metadata==8.7.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 zipp==3.23.0 diff --git a/.riot/requirements/12cb0e7.txt b/.riot/requirements/12cb0e7.txt index 4484bf49f4a..d11aab47a98 100644 --- a/.riot/requirements/12cb0e7.txt +++ b/.riot/requirements/12cb0e7.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/12cb0e7.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/1310347.txt b/.riot/requirements/1310347.txt deleted file mode 100644 index 39a09d5651b..00000000000 --- a/.riot/requirements/1310347.txt +++ /dev/null @@ -1,35 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1310347.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.0.10 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/1334ad5.txt b/.riot/requirements/1334ad5.txt new file mode 100644 index 00000000000..89f8a984ce0 --- /dev/null +++ b/.riot/requirements/1334ad5.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1334ad5.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.2.24 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/133fa5b.txt b/.riot/requirements/133fa5b.txt new file mode 100644 index 00000000000..087147556c9 --- /dev/null +++ b/.riot/requirements/133fa5b.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/133fa5b.in +# +attrs==25.3.0 +coverage[toml]==7.10.6 +gunicorn==23.0.0 +hypothesis==6.45.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +lz4==4.4.4 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +py-cpuinfo==8.0.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-asyncio==0.21.1 +pytest-benchmark==5.1.0 +pytest-cov==6.3.0 +pytest-cpp==2.6.0 +pytest-mock==3.15.0 +pytest-randomly==3.16.0 +referencing==0.36.2 +rpds-py==0.27.1 +sortedcontainers==2.4.0 +typing-extensions==4.15.0 +uwsgi==2.0.29 diff --git a/.riot/requirements/13632f0.txt b/.riot/requirements/13632f0.txt new file mode 100644 index 00000000000..03a2eced3a1 --- /dev/null +++ b/.riot/requirements/13632f0.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/13632f0.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.0.10 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/17bea9e.txt b/.riot/requirements/14395e9.txt similarity index 62% rename from .riot/requirements/17bea9e.txt rename to .riot/requirements/14395e9.txt index b08c2facd3c..55ad6e69192 100644 --- a/.riot/requirements/17bea9e.txt +++ b/.riot/requirements/14395e9.txt @@ -2,18 +2,22 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/17bea9e.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/14395e9.in # asgiref==3.8.1 attrs==25.3.0 backports-zoneinfo==0.2.1 bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 -django==4.2.23 +dill==0.4.0 +django==4.2.24 django-configurations==2.5.1 exceptiongroup==1.3.0 +gevent==24.2.1 +greenlet==3.1.1 +gunicorn==23.0.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -26,6 +30,7 @@ pytest==8.3.5 pytest-cov==5.0.0 pytest-django[testing]==3.10.0 pytest-mock==3.14.1 +pyyaml==6.0.2 requests==2.32.4 six==1.17.0 sortedcontainers==2.4.0 @@ -33,3 +38,8 @@ sqlparse==0.5.3 tomli==2.2.1 typing-extensions==4.13.2 urllib3==2.2.3 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.3.2 diff --git a/.riot/requirements/145beea.txt b/.riot/requirements/145beea.txt new file mode 100644 index 00000000000..0065b3bd9dd --- /dev/null +++ b/.riot/requirements/145beea.txt @@ -0,0 +1,37 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/145beea.in +# +attrs==25.3.0 +coverage[toml]==7.6.1 +exceptiongroup==1.3.0 +gunicorn==23.0.0 +hypothesis==6.45.0 +importlib-metadata==8.5.0 +importlib-resources==6.4.5 +iniconfig==2.1.0 +jsonschema==4.23.0 +jsonschema-specifications==2023.12.1 +lz4==4.3.3 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pkgutil-resolve-name==1.3.10 +pluggy==1.5.0 +py-cpuinfo==8.0.0 +pytest==8.3.5 +pytest-asyncio==0.21.1 +pytest-benchmark==4.0.0 +pytest-cov==5.0.0 +pytest-cpp==2.6.0 +pytest-mock==3.14.1 +pytest-randomly==3.15.0 +referencing==0.35.1 +rpds-py==0.20.1 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.13.2 +uwsgi==2.0.29 +zipp==3.20.2 diff --git a/.riot/requirements/8f3133c.txt b/.riot/requirements/147aa57.txt similarity index 52% rename from .riot/requirements/8f3133c.txt rename to .riot/requirements/147aa57.txt index d625e77cf0f..aec3a5960d1 100644 --- a/.riot/requirements/8f3133c.txt +++ b/.riot/requirements/147aa57.txt @@ -2,35 +2,42 @@ # This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/8f3133c.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/147aa57.in # attrs==25.3.0 babel==2.17.0 -certifi==2025.7.14 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 execnet==2.1.1 +gevent==25.9.1 +greenlet==3.2.4 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 markupsafe==2.1.5 mock==5.2.0 -mysql-connector-python==9.3.0 +mysql-connector-python==9.4.0 mysqlclient==2.1.1 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 psycopg2-binary==2.9.10 pygments==2.19.2 -pymysql==1.1.1 -pytest==8.4.1 -pytest-asyncio==1.0.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pymysql==1.1.2 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 pytest-xdist==3.8.0 -requests==2.32.4 +requests==2.32.5 sortedcontainers==2.4.0 -sqlalchemy==2.0.41 -typing-extensions==4.14.1 +sqlalchemy==2.0.43 +typing-extensions==4.15.0 urllib3==2.5.0 werkzeug==3.0.6 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/147bedb.txt b/.riot/requirements/147bedb.txt index fc15ccb0b96..afe636940d2 100644 --- a/.riot/requirements/147bedb.txt +++ b/.riot/requirements/147bedb.txt @@ -5,22 +5,22 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/147bedb.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 importlib-metadata==8.7.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 zipp==3.23.0 diff --git a/.riot/requirements/14d7e8a.txt b/.riot/requirements/14d7e8a.txt deleted file mode 100644 index 979467f1e35..00000000000 --- a/.riot/requirements/14d7e8a.txt +++ /dev/null @@ -1,31 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --allow-unsafe --no-annotate .riot/requirements/14d7e8a.in -# -aiohappyeyeballs==2.4.3 -aiohttp==3.10.9 -aiohttp-jinja2==1.6 -aiosignal==1.3.1 -attrs==24.2.0 -coverage[toml]==7.6.1 -frozenlist==1.4.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.0.0 -jinja2==3.1.4 -markupsafe==2.1.5 -mock==5.1.0 -multidict==6.1.0 -opentracing==2.4.0 -packaging==24.1 -pluggy==1.5.0 -pytest==8.3.3 -pytest-aiohttp==1.0.5 -pytest-asyncio==0.23.7 -pytest-cov==5.0.0 -pytest-mock==3.14.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -yarl==1.13.1 diff --git a/.riot/requirements/153b471.txt b/.riot/requirements/153b471.txt new file mode 100644 index 00000000000..140b3dd5c45 --- /dev/null +++ b/.riot/requirements/153b471.txt @@ -0,0 +1,33 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/153b471.in +# +aiohappyeyeballs==2.6.1 +aiohttp==3.12.15 +aiohttp-jinja2==1.5.1 +aiosignal==1.4.0 +attrs==25.3.0 +coverage[toml]==7.10.7 +frozenlist==1.7.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +jinja2==3.1.6 +markupsafe==3.0.2 +mock==5.2.0 +multidict==6.6.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pygments==2.19.2 +pytest==8.4.2 +pytest-aiohttp==1.1.0 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 +sortedcontainers==2.4.0 +yarl==1.20.1 diff --git a/.riot/requirements/15b674e.txt b/.riot/requirements/15b674e.txt new file mode 100644 index 00000000000..511bf0efaad --- /dev/null +++ b/.riot/requirements/15b674e.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/15b674e.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==5.2.6 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/164c3ce.txt b/.riot/requirements/164c3ce.txt deleted file mode 100644 index 5acfc83a32e..00000000000 --- a/.riot/requirements/164c3ce.txt +++ /dev/null @@ -1,31 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --allow-unsafe --no-annotate .riot/requirements/164c3ce.in -# -aiohappyeyeballs==2.4.3 -aiohttp==3.10.9 -aiohttp-jinja2==1.5.1 -aiosignal==1.3.1 -attrs==24.2.0 -coverage[toml]==7.6.1 -frozenlist==1.4.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.0.0 -jinja2==3.1.4 -markupsafe==2.1.5 -mock==5.1.0 -multidict==6.1.0 -opentracing==2.4.0 -packaging==24.1 -pluggy==1.5.0 -pytest==8.3.3 -pytest-aiohttp==1.0.5 -pytest-asyncio==0.23.7 -pytest-cov==5.0.0 -pytest-mock==3.14.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -yarl==1.13.1 diff --git a/.riot/requirements/16fd7e4.txt b/.riot/requirements/16fd7e4.txt new file mode 100644 index 00000000000..d71e4a8044e --- /dev/null +++ b/.riot/requirements/16fd7e4.txt @@ -0,0 +1,43 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/16fd7e4.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==3.2.25 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pytz==2025.2 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/1757cd0.txt b/.riot/requirements/1757cd0.txt new file mode 100644 index 00000000000..0ed8bc8063a --- /dev/null +++ b/.riot/requirements/1757cd0.txt @@ -0,0 +1,43 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1757cd0.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==3.2.25 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pytz==2025.2 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/176fdea.txt b/.riot/requirements/176fdea.txt deleted file mode 100644 index f0a6188167f..00000000000 --- a/.riot/requirements/176fdea.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/176fdea.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.2.23 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/13b38fc.txt b/.riot/requirements/17ac431.txt similarity index 52% rename from .riot/requirements/13b38fc.txt rename to .riot/requirements/17ac431.txt index 494be47e616..a33c0c8d0a1 100644 --- a/.riot/requirements/13b38fc.txt +++ b/.riot/requirements/17ac431.txt @@ -2,35 +2,42 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/13b38fc.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/17ac431.in # attrs==25.3.0 babel==2.17.0 -certifi==2025.7.14 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 execnet==2.1.1 +gevent==25.9.1 +greenlet==3.2.4 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 markupsafe==2.1.5 mock==5.2.0 -mysql-connector-python==9.3.0 +mysql-connector-python==9.4.0 mysqlclient==2.1.1 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 psycopg2-binary==2.9.10 pygments==2.19.2 -pymysql==1.1.1 -pytest==8.4.1 -pytest-asyncio==1.0.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pymysql==1.1.2 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 pytest-xdist==3.8.0 -requests==2.32.4 +requests==2.32.5 sortedcontainers==2.4.0 -sqlalchemy==2.0.41 -typing-extensions==4.14.1 +sqlalchemy==2.0.43 +typing-extensions==4.15.0 urllib3==2.5.0 werkzeug==3.0.6 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/17b0130.txt b/.riot/requirements/17b0130.txt new file mode 100644 index 00000000000..c893b33f3ff --- /dev/null +++ b/.riot/requirements/17b0130.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/17b0130.in +# +attrs==25.3.0 +azure-core==1.33.0 +azure-functions==1.10.1 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.6.1 +exceptiongroup==1.3.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.5.0 +pytest==8.3.5 +pytest-cov==5.0.0 +pytest-mock==3.14.1 +requests==2.32.4 +six==1.17.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.13.2 +urllib3==2.2.3 diff --git a/.riot/requirements/17f7f1d.txt b/.riot/requirements/17f7f1d.txt new file mode 100644 index 00000000000..f70f9218b20 --- /dev/null +++ b/.riot/requirements/17f7f1d.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/17f7f1d.in +# +attrs==25.3.0 +azure-core==1.35.0 +azure-functions==1.10.1 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 +exceptiongroup==1.3.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 diff --git a/.riot/requirements/181ead4.txt b/.riot/requirements/181ead4.txt deleted file mode 100644 index 23f8112047f..00000000000 --- a/.riot/requirements/181ead4.txt +++ /dev/null @@ -1,35 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/181ead4.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.2.23 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/1ce81e3.txt b/.riot/requirements/1829a8a.txt similarity index 50% rename from .riot/requirements/1ce81e3.txt rename to .riot/requirements/1829a8a.txt index c0c8beccb31..045bfd5fa7e 100644 --- a/.riot/requirements/1ce81e3.txt +++ b/.riot/requirements/1829a8a.txt @@ -2,16 +2,20 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1ce81e3.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1829a8a.in # attrs==25.3.0 bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 django==2.2.28 django-configurations==2.3.2 exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -21,15 +25,21 @@ packaging==25.0 pluggy==1.6.0 pygments==2.19.2 pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 +pytest==8.4.2 +pytest-cov==7.0.0 pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 +pytest-mock==3.15.1 pytz==2025.2 -requests==2.32.4 +pyyaml==6.0.2 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 sqlparse==0.5.3 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/1e1c2b7.txt b/.riot/requirements/18b7202.txt similarity index 56% rename from .riot/requirements/1e1c2b7.txt rename to .riot/requirements/18b7202.txt index 74784e763d1..c43952110ce 100644 --- a/.riot/requirements/1e1c2b7.txt +++ b/.riot/requirements/18b7202.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1e1c2b7.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18b7202.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -19,12 +19,12 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/10f86d9.txt b/.riot/requirements/18c82f1.txt similarity index 56% rename from .riot/requirements/10f86d9.txt rename to .riot/requirements/18c82f1.txt index b3d59480725..7589c24dd01 100644 --- a/.riot/requirements/10f86d9.txt +++ b/.riot/requirements/18c82f1.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --no-annotate .riot/requirements/10f86d9.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18c82f1.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -19,12 +19,12 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/18d669b.txt b/.riot/requirements/18d669b.txt new file mode 100644 index 00000000000..d57b5c05d82 --- /dev/null +++ b/.riot/requirements/18d669b.txt @@ -0,0 +1,106 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18d669b.in +# +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +fastapi==0.116.2 +google-adk==1.0.0 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform==1.114.0 +google-cloud-bigquery==3.37.0 +google-cloud-core==2.4.3 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.3.3 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sse-starlette==3.0.2 +starlette==0.48.0 +tenacity==9.1.2 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/1d35b33.txt b/.riot/requirements/18f9ba2.txt similarity index 59% rename from .riot/requirements/1d35b33.txt rename to .riot/requirements/18f9ba2.txt index 57d050a2404..bdfea7fe926 100644 --- a/.riot/requirements/1d35b33.txt +++ b/.riot/requirements/18f9ba2.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1d35b33.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18f9ba2.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 idna==3.10 @@ -20,13 +20,13 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/193c2a8.txt b/.riot/requirements/198266a.txt similarity index 63% rename from .riot/requirements/193c2a8.txt rename to .riot/requirements/198266a.txt index 87da9b79182..a0a7c21269e 100644 --- a/.riot/requirements/193c2a8.txt +++ b/.riot/requirements/198266a.txt @@ -2,18 +2,22 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/193c2a8.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/198266a.in # asgiref==3.8.1 attrs==25.3.0 backports-zoneinfo==0.2.1 bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 +dill==0.4.0 django==4.0.10 django-configurations==2.5.1 exceptiongroup==1.3.0 +gevent==24.2.1 +greenlet==3.1.1 +gunicorn==23.0.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -26,6 +30,7 @@ pytest==8.3.5 pytest-cov==5.0.0 pytest-django[testing]==3.10.0 pytest-mock==3.14.1 +pyyaml==6.0.2 requests==2.32.4 six==1.17.0 sortedcontainers==2.4.0 @@ -33,3 +38,8 @@ sqlparse==0.5.3 tomli==2.2.1 typing-extensions==4.13.2 urllib3==2.2.3 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.3.2 diff --git a/.riot/requirements/1995aee.txt b/.riot/requirements/1995aee.txt new file mode 100644 index 00000000000..8da37e7643f --- /dev/null +++ b/.riot/requirements/1995aee.txt @@ -0,0 +1,109 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1995aee.in +# +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +backports-asyncio-runner==1.2.0 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +exceptiongroup==1.3.0 +fastapi==0.116.2 +google-adk==1.0.0 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform==1.114.0 +google-cloud-bigquery==3.37.0 +google-cloud-core==2.4.3 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.2.6 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sse-starlette==3.0.2 +starlette==0.48.0 +tenacity==9.1.2 +tomli==2.2.1 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/199a155.txt b/.riot/requirements/199a155.txt new file mode 100644 index 00000000000..f190975ed10 --- /dev/null +++ b/.riot/requirements/199a155.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/199a155.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.0.10 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/19d80a8.txt b/.riot/requirements/19d80a8.txt new file mode 100644 index 00000000000..08a62605cb4 --- /dev/null +++ b/.riot/requirements/19d80a8.txt @@ -0,0 +1,120 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/19d80a8.in +# +absolufy-imports==0.3.1 +alembic==1.16.5 +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +cloudpickle==3.1.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +fastapi==0.116.2 +google-adk==1.14.1 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform[agent-engines]==1.114.0 +google-cloud-appengine-logging==1.6.2 +google-cloud-audit-log==0.3.2 +google-cloud-bigquery==3.37.0 +google-cloud-bigtable==2.32.0 +google-cloud-core==2.4.3 +google-cloud-logging==3.12.1 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-spanner==3.57.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpc-interceptor==0.15.4 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mako==1.3.10 +markupsafe==3.0.2 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.3.3 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sqlalchemy-spanner==1.16.0 +sqlparse==0.5.3 +sse-starlette==3.0.2 +starlette==0.48.0 +tenacity==8.5.0 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +watchdog==6.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/fa40758.txt b/.riot/requirements/1a5fb66.txt similarity index 52% rename from .riot/requirements/fa40758.txt rename to .riot/requirements/1a5fb66.txt index ad7566358f3..1200bc65901 100644 --- a/.riot/requirements/fa40758.txt +++ b/.riot/requirements/1a5fb66.txt @@ -2,35 +2,42 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/fa40758.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1a5fb66.in # attrs==25.3.0 babel==2.17.0 -certifi==2025.7.14 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 execnet==2.1.1 +gevent==25.9.1 +greenlet==3.2.4 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 markupsafe==2.1.5 mock==5.2.0 -mysql-connector-python==9.3.0 +mysql-connector-python==9.4.0 mysqlclient==2.1.1 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 psycopg2-binary==2.9.10 pygments==2.19.2 -pymysql==1.1.1 -pytest==8.4.1 -pytest-asyncio==1.0.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pymysql==1.1.2 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 pytest-xdist==3.8.0 -requests==2.32.4 +requests==2.32.5 sortedcontainers==2.4.0 -sqlalchemy==2.0.41 -typing-extensions==4.14.1 +sqlalchemy==2.0.43 +typing-extensions==4.15.0 urllib3==2.5.0 werkzeug==3.0.6 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/1a79bf4.txt b/.riot/requirements/1a79bf4.txt index ca3a5df0faf..cd2cc79083d 100644 --- a/.riot/requirements/1a79bf4.txt +++ b/.riot/requirements/1a79bf4.txt @@ -2,29 +2,29 @@ # This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1a79bf4.in +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/1a79bf4.in # aiohappyeyeballs==2.6.1 -aiohttp==3.12.11 -aiosignal==1.3.2 +aiohttp==3.12.15 +aiosignal==1.4.0 attrs==25.3.0 -coverage[toml]==7.8.2 -frozenlist==1.6.2 +coverage[toml]==7.10.7 +frozenlist==1.7.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 mock==5.2.0 -multidict==6.4.4 +multidict==6.6.4 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 -propcache==0.3.1 -pygments==2.19.1 -pytest==8.4.0 +propcache==0.3.2 +pygments==2.19.2 +pytest==8.4.2 pytest-aiohttp==1.0.5 pytest-asyncio==0.23.7 -pytest-cov==6.1.1 -pytest-mock==3.14.1 -pytest-randomly==3.16.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 sortedcontainers==2.4.0 -yarl==1.20.0 +yarl==1.20.1 diff --git a/.riot/requirements/1b2b6cf.txt b/.riot/requirements/1b2b6cf.txt new file mode 100644 index 00000000000..669c0d7d8e3 --- /dev/null +++ b/.riot/requirements/1b2b6cf.txt @@ -0,0 +1,34 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1b2b6cf.in +# +attrs==25.3.0 +azure-core==1.35.0 +azure-functions==1.23.0 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 +exceptiongroup==1.3.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +markupsafe==3.0.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +werkzeug==3.1.3 diff --git a/.riot/requirements/1bee964.txt b/.riot/requirements/1bee964.txt deleted file mode 100644 index 7c6cb3d756a..00000000000 --- a/.riot/requirements/1bee964.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1bee964.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.0.10 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/1bf3da5.txt b/.riot/requirements/1bf3da5.txt index 0ead3bd623e..da379d432f9 100644 --- a/.riot/requirements/1bf3da5.txt +++ b/.riot/requirements/1bf3da5.txt @@ -10,7 +10,7 @@ exceptiongroup==1.3.0 hypothesis==6.45.0 importlib-metadata==8.5.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 diff --git a/.riot/requirements/18a650f.txt b/.riot/requirements/1c299c5.txt similarity index 56% rename from .riot/requirements/18a650f.txt rename to .riot/requirements/1c299c5.txt index 616444bd687..14626b1e81d 100644 --- a/.riot/requirements/18a650f.txt +++ b/.riot/requirements/1c299c5.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --no-annotate .riot/requirements/18a650f.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1c299c5.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -19,12 +19,12 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/1c3d259.txt b/.riot/requirements/1c3d259.txt index 69e20821ab6..4ed9e497f2c 100644 --- a/.riot/requirements/1c3d259.txt +++ b/.riot/requirements/1c3d259.txt @@ -2,29 +2,29 @@ # This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1c3d259.in +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/1c3d259.in # aiohappyeyeballs==2.6.1 -aiohttp==3.12.11 -aiosignal==1.3.2 +aiohttp==3.12.15 +aiosignal==1.4.0 attrs==25.3.0 -coverage[toml]==7.8.2 -frozenlist==1.6.2 +coverage[toml]==7.10.7 +frozenlist==1.7.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 mock==5.2.0 -multidict==6.4.4 +multidict==6.6.4 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 -propcache==0.3.1 -pygments==2.19.1 -pytest==8.4.0 +propcache==0.3.2 +pygments==2.19.2 +pytest==8.4.2 pytest-aiohttp==1.0.5 pytest-asyncio==0.23.7 -pytest-cov==6.1.1 -pytest-mock==3.14.1 -pytest-randomly==3.16.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 sortedcontainers==2.4.0 -yarl==1.20.0 +yarl==1.20.1 diff --git a/.riot/requirements/1ca8a0e.txt b/.riot/requirements/1ca8a0e.txt deleted file mode 100644 index 3edd06bc51c..00000000000 --- a/.riot/requirements/1ca8a0e.txt +++ /dev/null @@ -1,33 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1ca8a0e.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 -django==3.2.25 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -pytz==2025.2 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/1cddcb1.txt b/.riot/requirements/1cddcb1.txt new file mode 100644 index 00000000000..203ed4a9dc4 --- /dev/null +++ b/.riot/requirements/1cddcb1.txt @@ -0,0 +1,100 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/1cddcb1.in +# +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +backports-asyncio-runner==1.2.0 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.1.8 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +exceptiongroup==1.3.0 +fastapi==0.116.2 +google-adk==1.0.0 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform==1.114.0 +google-cloud-bigquery==3.37.0 +google-cloud-core==2.4.3 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.0.2 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +pyyaml==6.0.2 +requests==2.32.5 +rsa==4.9.1 +shapely==2.0.7 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +starlette==0.48.0 +tenacity==9.1.2 +tomli==2.2.1 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==1.26.20 +uvicorn==0.35.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/1d07e79.txt b/.riot/requirements/1d07e79.txt new file mode 100644 index 00000000000..9a0af3995a6 --- /dev/null +++ b/.riot/requirements/1d07e79.txt @@ -0,0 +1,46 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1d07e79.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==3.2.25 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pytz==2025.2 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/1d36b1d.txt b/.riot/requirements/1d36b1d.txt new file mode 100644 index 00000000000..9a7b3c84266 --- /dev/null +++ b/.riot/requirements/1d36b1d.txt @@ -0,0 +1,34 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1d36b1d.in +# +attrs==25.3.0 +azure-core==1.35.0 +azure-functions==1.23.0 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 +exceptiongroup==1.3.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +markupsafe==3.0.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +werkzeug==3.1.3 diff --git a/.riot/requirements/1d36df8.txt b/.riot/requirements/1d36df8.txt new file mode 100644 index 00000000000..83aa6f3d069 --- /dev/null +++ b/.riot/requirements/1d36df8.txt @@ -0,0 +1,33 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/1d36df8.in +# +aiohappyeyeballs==2.6.1 +aiohttp==3.12.15 +aiohttp-jinja2==1.6 +aiosignal==1.4.0 +attrs==25.3.0 +coverage[toml]==7.10.7 +frozenlist==1.7.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +jinja2==3.1.6 +markupsafe==3.0.2 +mock==5.2.0 +multidict==6.6.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pygments==2.19.2 +pytest==8.4.2 +pytest-aiohttp==1.1.0 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 +sortedcontainers==2.4.0 +yarl==1.20.1 diff --git a/.riot/requirements/1dc5917.txt b/.riot/requirements/1dc5917.txt new file mode 100644 index 00000000000..448fe35e664 --- /dev/null +++ b/.riot/requirements/1dc5917.txt @@ -0,0 +1,33 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/1dc5917.in +# +aiohappyeyeballs==2.6.1 +aiohttp==3.12.15 +aiohttp-jinja2==1.6 +aiosignal==1.4.0 +attrs==25.3.0 +coverage[toml]==7.10.7 +frozenlist==1.7.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +jinja2==3.1.6 +markupsafe==3.0.2 +mock==5.2.0 +multidict==6.6.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pygments==2.19.2 +pytest==8.4.2 +pytest-aiohttp==1.1.0 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 +sortedcontainers==2.4.0 +yarl==1.20.1 diff --git a/.riot/requirements/1e0ec0b.txt b/.riot/requirements/1e0ec0b.txt index b4b0dc67fb9..8fb561ea27d 100644 --- a/.riot/requirements/1e0ec0b.txt +++ b/.riot/requirements/1e0ec0b.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1e0ec0b.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 diff --git a/.riot/requirements/1e35304.txt b/.riot/requirements/1e35304.txt new file mode 100644 index 00000000000..afd81438756 --- /dev/null +++ b/.riot/requirements/1e35304.txt @@ -0,0 +1,33 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/1e35304.in +# +aiohappyeyeballs==2.6.1 +aiohttp==3.12.15 +aiohttp-jinja2==1.5.1 +aiosignal==1.4.0 +attrs==25.3.0 +coverage[toml]==7.10.7 +frozenlist==1.7.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +jinja2==3.1.6 +markupsafe==3.0.2 +mock==5.2.0 +multidict==6.6.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pygments==2.19.2 +pytest==8.4.2 +pytest-aiohttp==1.1.0 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 +sortedcontainers==2.4.0 +yarl==1.20.1 diff --git a/.riot/requirements/1e4bf1b.txt b/.riot/requirements/1e4bf1b.txt new file mode 100644 index 00000000000..1ded78a4bb5 --- /dev/null +++ b/.riot/requirements/1e4bf1b.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e4bf1b.in +# +attrs==25.3.0 +azure-core==1.35.0 +azure-functions==1.10.1 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 +exceptiongroup==1.3.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 diff --git a/.riot/requirements/1e5039b.txt b/.riot/requirements/1e5039b.txt new file mode 100644 index 00000000000..9ab87d7339e --- /dev/null +++ b/.riot/requirements/1e5039b.txt @@ -0,0 +1,120 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e5039b.in +# +absolufy-imports==0.3.1 +alembic==1.16.5 +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +cloudpickle==3.1.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +fastapi==0.116.2 +google-adk==1.14.1 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform[agent-engines]==1.114.0 +google-cloud-appengine-logging==1.6.2 +google-cloud-audit-log==0.3.2 +google-cloud-bigquery==3.37.0 +google-cloud-bigtable==2.32.0 +google-cloud-core==2.4.3 +google-cloud-logging==3.12.1 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-spanner==3.57.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpc-interceptor==0.15.4 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mako==1.3.10 +markupsafe==3.0.2 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.3.3 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sqlalchemy-spanner==1.16.0 +sqlparse==0.5.3 +sse-starlette==3.0.2 +starlette==0.48.0 +tenacity==8.5.0 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +watchdog==6.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/1e66025.txt b/.riot/requirements/1e66025.txt new file mode 100644 index 00000000000..97b4d14ac4c --- /dev/null +++ b/.riot/requirements/1e66025.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e66025.in +# +attrs==25.3.0 +coverage[toml]==7.10.6 +gunicorn==23.0.0 +hypothesis==6.45.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +lz4==4.4.4 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +py-cpuinfo==8.0.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-asyncio==0.21.1 +pytest-benchmark==5.1.0 +pytest-cov==6.3.0 +pytest-cpp==2.6.0 +pytest-mock==3.15.0 +pytest-randomly==3.16.0 +referencing==0.36.2 +rpds-py==0.27.1 +sortedcontainers==2.4.0 +uwsgi==2.0.29 diff --git a/.riot/requirements/1826cd3.txt b/.riot/requirements/1f1c431.txt similarity index 56% rename from .riot/requirements/1826cd3.txt rename to .riot/requirements/1f1c431.txt index f58ec8df5a0..c1cdf9e6c30 100644 --- a/.riot/requirements/1826cd3.txt +++ b/.riot/requirements/1f1c431.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1826cd3.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1f1c431.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -19,12 +19,12 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/1f3b209.txt b/.riot/requirements/1f3b209.txt index 4874ae4a492..2dfe7af1ab8 100644 --- a/.riot/requirements/1f3b209.txt +++ b/.riot/requirements/1f3b209.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1f3b209.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/1f937c5.txt b/.riot/requirements/1f937c5.txt new file mode 100644 index 00000000000..47d2ef0c3ec --- /dev/null +++ b/.riot/requirements/1f937c5.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1f937c5.in +# +attrs==25.3.0 +azure-core==1.35.0 +azure-functions==1.23.0 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +markupsafe==3.0.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +typing-extensions==4.15.0 +urllib3==2.5.0 +werkzeug==3.1.3 diff --git a/.riot/requirements/1fc9ecc.txt b/.riot/requirements/1fc9ecc.txt index 475ad4c2f80..a47c8c7f867 100644 --- a/.riot/requirements/1fc9ecc.txt +++ b/.riot/requirements/1fc9ecc.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1fc9ecc.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/1fdd310.txt b/.riot/requirements/1fdd310.txt deleted file mode 100644 index 9633a5c2580..00000000000 --- a/.riot/requirements/1fdd310.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1fdd310.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.2.23 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/2a056ee.txt b/.riot/requirements/2a056ee.txt new file mode 100644 index 00000000000..4bd599a7371 --- /dev/null +++ b/.riot/requirements/2a056ee.txt @@ -0,0 +1,46 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/2a056ee.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==3.2.25 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pytz==2025.2 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/3adcfe7.txt b/.riot/requirements/3adcfe7.txt new file mode 100644 index 00000000000..0499d9de939 --- /dev/null +++ b/.riot/requirements/3adcfe7.txt @@ -0,0 +1,30 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/3adcfe7.in +# +attrs==25.3.0 +azure-core==1.35.0 +azure-functions==1.10.1 +azure-servicebus==7.14.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +isodate==0.7.2 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +typing-extensions==4.15.0 +urllib3==2.5.0 diff --git a/.riot/requirements/3c0d0e9.txt b/.riot/requirements/3c0d0e9.txt deleted file mode 100644 index 3f42c20193d..00000000000 --- a/.riot/requirements/3c0d0e9.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/3c0d0e9.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.0.10 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/3f0f7d8.txt b/.riot/requirements/3f0f7d8.txt new file mode 100644 index 00000000000..2a30ccc1333 --- /dev/null +++ b/.riot/requirements/3f0f7d8.txt @@ -0,0 +1,114 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate --resolver=backtracking .riot/requirements/3f0f7d8.in +# +absolufy-imports==0.3.1 +alembic==1.16.5 +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +backports-asyncio-runner==1.2.0 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.1.8 +cloudpickle==3.1.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +exceptiongroup==1.3.0 +fastapi==0.116.2 +google-adk==1.14.1 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform[agent-engines]==1.114.0 +google-cloud-appengine-logging==1.6.2 +google-cloud-audit-log==0.3.2 +google-cloud-bigquery==3.37.0 +google-cloud-bigtable==2.32.0 +google-cloud-core==2.4.3 +google-cloud-logging==3.12.1 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-spanner==3.57.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpc-interceptor==0.15.4 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +mako==1.3.10 +markupsafe==3.0.2 +mock==5.2.0 +multidict==6.6.4 +numpy==2.0.2 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +pyyaml==6.0.2 +requests==2.32.5 +rsa==4.9.1 +shapely==2.0.7 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sqlalchemy-spanner==1.16.0 +sqlparse==0.5.3 +starlette==0.48.0 +tenacity==8.5.0 +tomli==2.2.1 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==1.26.20 +uvicorn==0.35.0 +vcrpy==7.0.0 +watchdog==6.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/f16bd61.txt b/.riot/requirements/40eff7a.txt similarity index 52% rename from .riot/requirements/f16bd61.txt rename to .riot/requirements/40eff7a.txt index 85a7f4602cd..4947301e733 100644 --- a/.riot/requirements/f16bd61.txt +++ b/.riot/requirements/40eff7a.txt @@ -2,37 +2,45 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/f16bd61.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/40eff7a.in # attrs==25.3.0 babel==2.17.0 -certifi==2025.7.14 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 +backports-asyncio-runner==1.2.0 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 exceptiongroup==1.3.0 execnet==2.1.1 +gevent==25.9.1 +greenlet==3.2.4 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 markupsafe==2.1.5 mock==5.2.0 -mysql-connector-python==9.3.0 +mysql-connector-python==9.4.0 mysqlclient==2.1.1 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 psycopg2-binary==2.9.10 pygments==2.19.2 -pymysql==1.1.1 -pytest==8.4.1 -pytest-asyncio==1.0.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pymysql==1.1.2 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 pytest-xdist==3.8.0 -requests==2.32.4 +requests==2.32.5 sortedcontainers==2.4.0 -sqlalchemy==2.0.41 +sqlalchemy==2.0.43 tomli==2.2.1 -typing-extensions==4.14.1 +typing-extensions==4.15.0 urllib3==2.5.0 werkzeug==3.0.6 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/492b83f.txt b/.riot/requirements/492b83f.txt new file mode 100644 index 00000000000..52c200e905f --- /dev/null +++ b/.riot/requirements/492b83f.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/492b83f.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.2.24 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/b43c003.txt b/.riot/requirements/4cdef4b.txt similarity index 59% rename from .riot/requirements/b43c003.txt rename to .riot/requirements/4cdef4b.txt index 4a6b0100c45..bb51e3c46ab 100644 --- a/.riot/requirements/b43c003.txt +++ b/.riot/requirements/4cdef4b.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --no-annotate .riot/requirements/b43c003.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/4cdef4b.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 idna==3.10 @@ -20,13 +20,13 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/4ed631d.txt b/.riot/requirements/4ed631d.txt index 8b05b059d31..bb4a2c5ecbf 100644 --- a/.riot/requirements/4ed631d.txt +++ b/.riot/requirements/4ed631d.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/4ed631d.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/4ee4ae4.txt b/.riot/requirements/4ee4ae4.txt deleted file mode 100644 index a25251831c1..00000000000 --- a/.riot/requirements/4ee4ae4.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/4ee4ae4.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.0.10 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/72029dd.txt b/.riot/requirements/4f4caf8.txt similarity index 72% rename from .riot/requirements/72029dd.txt rename to .riot/requirements/4f4caf8.txt index 81aa4ff153a..7441d0631a9 100644 --- a/.riot/requirements/72029dd.txt +++ b/.riot/requirements/4f4caf8.txt @@ -2,15 +2,17 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/72029dd.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/4f4caf8.in # attrs==25.3.0 babel==2.17.0 -certifi==2025.7.14 -charset-normalizer==3.4.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 exceptiongroup==1.3.0 execnet==2.1.1 +gevent==24.2.1 +greenlet==3.1.1 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -22,7 +24,7 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.5.0 psycopg2-binary==2.9.10 -pymysql==1.1.1 +pymysql==1.1.2 pytest==8.3.5 pytest-asyncio==0.24.0 pytest-cov==5.0.0 @@ -31,8 +33,13 @@ pytest-xdist==3.6.1 pytz==2025.2 requests==2.32.4 sortedcontainers==2.4.0 -sqlalchemy==2.0.41 +sqlalchemy==2.0.43 tomli==2.2.1 typing-extensions==4.13.2 urllib3==2.2.3 werkzeug==3.0.6 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.3.2 diff --git a/.riot/requirements/57eb2c8.txt b/.riot/requirements/57eb2c8.txt deleted file mode 100644 index df0eb43f0ec..00000000000 --- a/.riot/requirements/57eb2c8.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/57eb2c8.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==5.2.4 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/5cb6b77.txt b/.riot/requirements/5cb6b77.txt deleted file mode 100644 index 4add644b27c..00000000000 --- a/.riot/requirements/5cb6b77.txt +++ /dev/null @@ -1,35 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/5cb6b77.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.0.10 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/1670b58.txt b/.riot/requirements/6851a3c.txt similarity index 59% rename from .riot/requirements/1670b58.txt rename to .riot/requirements/6851a3c.txt index c2944b513c4..de6dec1ed4f 100644 --- a/.riot/requirements/1670b58.txt +++ b/.riot/requirements/6851a3c.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1670b58.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/6851a3c.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 idna==3.10 @@ -20,13 +20,13 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/eca9f2c.txt b/.riot/requirements/6b9f69a.txt similarity index 52% rename from .riot/requirements/eca9f2c.txt rename to .riot/requirements/6b9f69a.txt index 40d142af814..270062836de 100644 --- a/.riot/requirements/eca9f2c.txt +++ b/.riot/requirements/6b9f69a.txt @@ -2,37 +2,45 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/eca9f2c.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/6b9f69a.in # attrs==25.3.0 babel==2.17.0 -certifi==2025.7.14 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 +backports-asyncio-runner==1.2.0 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 exceptiongroup==1.3.0 execnet==2.1.1 +gevent==25.9.1 +greenlet==3.2.4 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 markupsafe==2.1.5 mock==5.2.0 -mysql-connector-python==9.3.0 +mysql-connector-python==9.4.0 mysqlclient==2.1.1 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 psycopg2-binary==2.9.10 pygments==2.19.2 -pymysql==1.1.1 -pytest==8.4.1 -pytest-asyncio==1.0.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pymysql==1.1.2 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 pytest-xdist==3.8.0 -requests==2.32.4 +requests==2.32.5 sortedcontainers==2.4.0 -sqlalchemy==2.0.41 +sqlalchemy==2.0.43 tomli==2.2.1 -typing-extensions==4.14.1 +typing-extensions==4.15.0 urllib3==2.5.0 werkzeug==3.0.6 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/6d52d9c.txt b/.riot/requirements/6d52d9c.txt deleted file mode 100644 index 2c542858fdc..00000000000 --- a/.riot/requirements/6d52d9c.txt +++ /dev/null @@ -1,36 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/6d52d9c.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==3.2.25 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -pytz==2025.2 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/72027cc.txt b/.riot/requirements/72027cc.txt deleted file mode 100644 index 7e3fdf8d9f7..00000000000 --- a/.riot/requirements/72027cc.txt +++ /dev/null @@ -1,33 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/72027cc.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 -django==3.2.25 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -pytz==2025.2 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/1170953.txt b/.riot/requirements/7670259.txt similarity index 59% rename from .riot/requirements/1170953.txt rename to .riot/requirements/7670259.txt index 540786d7221..3bdc8bcb3d5 100644 --- a/.riot/requirements/1170953.txt +++ b/.riot/requirements/7670259.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1170953.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7670259.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 idna==3.10 @@ -20,13 +20,13 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/769aa27.txt b/.riot/requirements/769aa27.txt index 38cb00ebd77..e8047d1e8ca 100644 --- a/.riot/requirements/769aa27.txt +++ b/.riot/requirements/769aa27.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/769aa27.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/156842b.txt b/.riot/requirements/7b02bf5.txt similarity index 76% rename from .riot/requirements/156842b.txt rename to .riot/requirements/7b02bf5.txt index 858d452e704..399b31b7be8 100644 --- a/.riot/requirements/156842b.txt +++ b/.riot/requirements/7b02bf5.txt @@ -2,13 +2,13 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/156842b.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7b02bf5.in # attrs==25.3.0 azure-core==1.33.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 exceptiongroup==1.3.0 hypothesis==6.45.0 @@ -20,7 +20,7 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.5.0 pytest==8.3.5 -pytest-asyncio==0.24.0 +pytest-asyncio==0.23.7 pytest-cov==5.0.0 pytest-mock==3.14.1 requests==2.32.4 diff --git a/.riot/requirements/85acf6e.txt b/.riot/requirements/85acf6e.txt index ccbbf2992ca..580c21d5303 100644 --- a/.riot/requirements/85acf6e.txt +++ b/.riot/requirements/85acf6e.txt @@ -5,7 +5,7 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/85acf6e.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 iniconfig==2.1.0 @@ -15,10 +15,10 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 diff --git a/.riot/requirements/ade49fb.txt b/.riot/requirements/85deb9a.txt similarity index 56% rename from .riot/requirements/ade49fb.txt rename to .riot/requirements/85deb9a.txt index 1c2e43a2fa5..625bc98ed4b 100644 --- a/.riot/requirements/ade49fb.txt +++ b/.riot/requirements/85deb9a.txt @@ -2,14 +2,14 @@ # This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --no-annotate .riot/requirements/ade49fb.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/85deb9a.in # attrs==25.3.0 azure-core==1.35.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.6 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -19,12 +19,12 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-asyncio==0.24.0 -pytest-cov==6.2.1 -pytest-mock==3.14.1 -requests==2.32.4 +pytest==8.4.2 +pytest-asyncio==0.23.7 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +requests==2.32.5 six==1.17.0 sortedcontainers==2.4.0 -typing-extensions==4.14.0 +typing-extensions==4.15.0 urllib3==2.5.0 diff --git a/.riot/requirements/891b825.txt b/.riot/requirements/891b825.txt deleted file mode 100644 index 2fab2a24c10..00000000000 --- a/.riot/requirements/891b825.txt +++ /dev/null @@ -1,32 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/891b825.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.2.23 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/8a17cb2.txt b/.riot/requirements/8a17cb2.txt index 10b59cc5782..c692572e88b 100644 --- a/.riot/requirements/8a17cb2.txt +++ b/.riot/requirements/8a17cb2.txt @@ -10,7 +10,7 @@ exceptiongroup==1.3.0 hypothesis==6.45.0 importlib-metadata==8.5.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 diff --git a/.riot/requirements/8a26afe.txt b/.riot/requirements/8a26afe.txt new file mode 100644 index 00000000000..6cd712fd37a --- /dev/null +++ b/.riot/requirements/8a26afe.txt @@ -0,0 +1,36 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/8a26afe.in +# +attrs==25.3.0 +coverage[toml]==7.10.6 +exceptiongroup==1.3.0 +gunicorn==23.0.0 +hypothesis==6.45.0 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +lz4==4.4.4 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +py-cpuinfo==8.0.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-asyncio==0.21.1 +pytest-benchmark==5.1.0 +pytest-cov==6.3.0 +pytest-cpp==2.6.0 +pytest-mock==3.15.0 +pytest-randomly==3.16.0 +referencing==0.36.2 +rpds-py==0.27.1 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.15.0 +uwsgi==2.0.29 +zipp==3.23.0 diff --git a/.riot/requirements/8c5fae9.txt b/.riot/requirements/8c5fae9.txt deleted file mode 100644 index 8dec07f7c1c..00000000000 --- a/.riot/requirements/8c5fae9.txt +++ /dev/null @@ -1,35 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/8c5fae9.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==4.2.23 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/a12d611.txt b/.riot/requirements/a12d611.txt deleted file mode 100644 index 14892ac4a89..00000000000 --- a/.riot/requirements/a12d611.txt +++ /dev/null @@ -1,33 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/a12d611.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.2 -django==3.2.25 -django-configurations==2.5.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -pytz==2025.2 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -urllib3==2.5.0 diff --git a/.riot/requirements/a9f396a.txt b/.riot/requirements/a9f396a.txt deleted file mode 100644 index 4505eee48b0..00000000000 --- a/.riot/requirements/a9f396a.txt +++ /dev/null @@ -1,31 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --allow-unsafe --no-annotate .riot/requirements/a9f396a.in -# -aiohappyeyeballs==2.4.3 -aiohttp==3.10.9 -aiohttp-jinja2==1.6 -aiosignal==1.3.1 -attrs==24.2.0 -coverage[toml]==7.6.1 -frozenlist==1.4.1 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.0.0 -jinja2==3.1.4 -markupsafe==2.1.5 -mock==5.1.0 -multidict==6.1.0 -opentracing==2.4.0 -packaging==24.1 -pluggy==1.5.0 -pytest==8.3.3 -pytest-aiohttp==1.0.5 -pytest-asyncio==0.23.7 -pytest-cov==5.0.0 -pytest-mock==3.14.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -yarl==1.13.1 diff --git a/.riot/requirements/ae0fa28.txt b/.riot/requirements/ae0fa28.txt new file mode 100644 index 00000000000..a54e28db974 --- /dev/null +++ b/.riot/requirements/ae0fa28.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ae0fa28.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==5.2.6 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/b0ca79b.txt b/.riot/requirements/b0ca79b.txt deleted file mode 100644 index bdb600d4a57..00000000000 --- a/.riot/requirements/b0ca79b.txt +++ /dev/null @@ -1,36 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/b0ca79b.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==3.2.25 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -pytz==2025.2 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/b42974d.txt b/.riot/requirements/b42974d.txt new file mode 100644 index 00000000000..442507b5ca7 --- /dev/null +++ b/.riot/requirements/b42974d.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/b42974d.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.2.24 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/ba22367.txt b/.riot/requirements/ba22367.txt new file mode 100644 index 00000000000..ee6b1cc17ae --- /dev/null +++ b/.riot/requirements/ba22367.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ba22367.in +# +attrs==25.3.0 +coverage[toml]==7.10.6 +gunicorn==23.0.0 +hypothesis==6.45.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +lz4==4.4.4 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +py-cpuinfo==8.0.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-asyncio==0.21.1 +pytest-benchmark==5.1.0 +pytest-cov==6.3.0 +pytest-cpp==2.6.0 +pytest-mock==3.15.0 +pytest-randomly==3.16.0 +referencing==0.36.2 +rpds-py==0.27.1 +sortedcontainers==2.4.0 +typing-extensions==4.15.0 +uwsgi==2.0.29 diff --git a/.riot/requirements/bebf559.txt b/.riot/requirements/bebf559.txt new file mode 100644 index 00000000000..c88dcfdf8da --- /dev/null +++ b/.riot/requirements/bebf559.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/bebf559.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.0.10 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/c2ff780.txt b/.riot/requirements/c2ff780.txt new file mode 100644 index 00000000000..efa6fe3347a --- /dev/null +++ b/.riot/requirements/c2ff780.txt @@ -0,0 +1,123 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c2ff780.in +# +absolufy-imports==0.3.1 +alembic==1.16.5 +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +backports-asyncio-runner==1.2.0 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +cloudpickle==3.1.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +exceptiongroup==1.3.0 +fastapi==0.116.2 +google-adk==1.14.1 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform[agent-engines]==1.114.0 +google-cloud-appengine-logging==1.6.2 +google-cloud-audit-log==0.3.2 +google-cloud-bigquery==3.37.0 +google-cloud-bigtable==2.32.0 +google-cloud-core==2.4.3 +google-cloud-logging==3.12.1 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-spanner==3.57.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpc-interceptor==0.15.4 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mako==1.3.10 +markupsafe==3.0.2 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.2.6 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sqlalchemy-spanner==1.16.0 +sqlparse==0.5.3 +sse-starlette==3.0.2 +starlette==0.48.0 +tenacity==8.5.0 +tomli==2.2.1 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +watchdog==6.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/c48b250.txt b/.riot/requirements/c48b250.txt new file mode 100644 index 00000000000..2d957b44797 --- /dev/null +++ b/.riot/requirements/c48b250.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c48b250.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.0.10 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/c4d4455.txt b/.riot/requirements/c4d4455.txt deleted file mode 100644 index f6d50daf086..00000000000 --- a/.riot/requirements/c4d4455.txt +++ /dev/null @@ -1,20 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.13 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/c4d4455.in -# -attrs==25.1.0 -coverage[toml]==7.6.10 -hypothesis==6.45.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==24.2 -pluggy==1.5.0 -pytest==8.3.4 -pytest-asyncio==0.21.1 -pytest-cov==6.0.0 -pytest-mock==3.14.0 -pytest-randomly==3.16.0 -sortedcontainers==2.4.0 diff --git a/.riot/requirements/d65bf1a.txt b/.riot/requirements/d65bf1a.txt new file mode 100644 index 00000000000..71a02aa041d --- /dev/null +++ b/.riot/requirements/d65bf1a.txt @@ -0,0 +1,43 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/d65bf1a.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==3.2.25 +django-configurations==2.5.1 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pytz==2025.2 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/db343a1.txt b/.riot/requirements/db343a1.txt new file mode 100644 index 00000000000..21b14788b02 --- /dev/null +++ b/.riot/requirements/db343a1.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --cert=None --client-cert=None --index-url=None --no-annotate --pip-args=None .riot/requirements/db343a1.in +# +attrs==25.3.0 +coverage[toml]==7.10.7 +hypothesis==6.45.0 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +pytest-randomly==4.0.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/dc2fb54.txt b/.riot/requirements/dc2fb54.txt new file mode 100644 index 00000000000..ed20fd0b219 --- /dev/null +++ b/.riot/requirements/dc2fb54.txt @@ -0,0 +1,106 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/dc2fb54.in +# +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.4 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +coverage[toml]==7.10.6 +cryptography==46.0.1 +deprecated==1.2.18 +docstring-parser==0.17.0 +fastapi==0.116.2 +google-adk==1.0.0 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.182.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform==1.114.0 +google-cloud-bigquery==3.37.0 +google-cloud-core==2.4.3 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.38.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +grpc-google-iam-v1==0.14.2 +grpcio==1.75.0 +grpcio-status==1.75.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.3.3 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.1 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sse-starlette==3.0.2 +starlette==0.48.0 +tenacity==9.1.2 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/e0744c3.txt b/.riot/requirements/e0744c3.txt new file mode 100644 index 00000000000..df00f841009 --- /dev/null +++ b/.riot/requirements/e0744c3.txt @@ -0,0 +1,107 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e0744c3.in +# +annotated-types==0.7.0 +anyio==4.10.0 +attrs==25.3.0 +authlib==1.6.3 +cachetools==5.5.2 +certifi==2025.8.3 +cffi==2.0.0 +charset-normalizer==3.4.3 +click==8.2.1 +coverage[toml]==7.10.6 +cryptography==45.0.7 +deprecated==1.2.18 +docstring-parser==0.17.0 +fastapi==0.116.1 +google-adk==1.0.0 +google-api-core[grpc]==2.25.1 +google-api-python-client==2.181.0 +google-auth==2.40.3 +google-auth-httplib2==0.2.0 +google-cloud-aiplatform==1.113.0 +google-cloud-bigquery==3.37.0 +google-cloud-core==2.4.3 +google-cloud-resource-manager==1.14.2 +google-cloud-secret-manager==2.24.0 +google-cloud-speech==2.33.0 +google-cloud-storage==2.19.0 +google-cloud-trace==1.16.2 +google-crc32c==1.7.1 +google-genai==1.36.0 +google-resumable-media==2.7.2 +googleapis-common-protos[grpc]==1.70.0 +graphviz==0.21 +greenlet==3.2.4 +grpc-google-iam-v1==0.14.2 +grpcio==1.74.0 +grpcio-status==1.74.0 +h11==0.16.0 +httpcore==1.0.9 +httplib2==0.31.0 +httpx==0.28.1 +httpx-sse==0.4.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.7.0 +iniconfig==2.1.0 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +mcp==1.14.0 +mock==5.2.0 +multidict==6.6.4 +numpy==2.3.3 +opentelemetry-api==1.37.0 +opentelemetry-exporter-gcp-trace==1.9.0 +opentelemetry-resourcedetector-gcp==1.9.0a0 +opentelemetry-sdk==1.37.0 +opentelemetry-semantic-conventions==0.58b0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +proto-plus==1.26.1 +protobuf==6.32.1 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pycparser==2.23 +pydantic==2.11.9 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 +pygments==2.19.2 +pyparsing==3.2.4 +pytest==8.4.2 +pytest-asyncio==1.2.0 +pytest-cov==7.0.0 +pytest-mock==3.15.0 +python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 +python-multipart==0.0.20 +pyyaml==6.0.2 +referencing==0.36.2 +requests==2.32.5 +rpds-py==0.27.1 +rsa==4.9.1 +shapely==2.1.1 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.43 +sse-starlette==3.0.2 +starlette==0.47.3 +tenacity==9.1.2 +typing-extensions==4.15.0 +typing-inspection==0.4.1 +tzlocal==5.3.1 +uritemplate==4.2.0 +urllib3==2.5.0 +uvicorn==0.35.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.3 +yarl==1.20.1 +zipp==3.23.0 diff --git a/.riot/requirements/e151245.txt b/.riot/requirements/e151245.txt deleted file mode 100644 index 925b513e0fa..00000000000 --- a/.riot/requirements/e151245.txt +++ /dev/null @@ -1,35 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/e151245.in -# -asgiref==3.8.1 -attrs==25.3.0 -bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 -coverage[toml]==7.9.1 -django==5.2.4 -django-configurations==2.5.1 -exceptiongroup==1.3.0 -hypothesis==6.45.0 -idna==3.10 -iniconfig==2.1.0 -mock==5.2.0 -opentracing==2.4.0 -packaging==25.0 -pluggy==1.6.0 -pygments==2.19.2 -pylibmc==1.6.3 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-django[testing]==3.10.0 -pytest-mock==3.14.1 -requests==2.32.4 -six==1.17.0 -sortedcontainers==2.4.0 -sqlparse==0.5.3 -tomli==2.2.1 -typing-extensions==4.14.0 -urllib3==2.5.0 diff --git a/.riot/requirements/e75aea6.txt b/.riot/requirements/e75aea6.txt index b9e4de1cd71..328a6666c11 100644 --- a/.riot/requirements/e75aea6.txt +++ b/.riot/requirements/e75aea6.txt @@ -5,7 +5,7 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/e75aea6.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 importlib-metadata==8.7.0 @@ -16,11 +16,11 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 zipp==3.23.0 diff --git a/.riot/requirements/e776347.txt b/.riot/requirements/e776347.txt new file mode 100644 index 00000000000..86296e1883c --- /dev/null +++ b/.riot/requirements/e776347.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e776347.in +# +asgiref==3.9.1 +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.10.7 +dill==0.4.0 +django==4.2.24 +django-configurations==2.5.1 +exceptiongroup==1.3.0 +gevent==25.9.1 +greenlet==3.2.4 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +pygments==2.19.2 +pylibmc==1.6.3 +pytest==8.4.2 +pytest-cov==7.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.15.1 +pyyaml==6.0.2 +requests==2.32.5 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.15.0 +urllib3==2.5.0 +zope-event==6.0 +zope-interface==8.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==80.9.0 diff --git a/.riot/requirements/cef811a.txt b/.riot/requirements/eb4440f.txt similarity index 63% rename from .riot/requirements/cef811a.txt rename to .riot/requirements/eb4440f.txt index b467a126c72..9420d403230 100644 --- a/.riot/requirements/cef811a.txt +++ b/.riot/requirements/eb4440f.txt @@ -2,17 +2,21 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/cef811a.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/eb4440f.in # asgiref==3.8.1 attrs==25.3.0 bcrypt==4.2.1 -certifi==2025.6.15 -charset-normalizer==3.4.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 +dill==0.4.0 django==3.2.25 django-configurations==2.5.1 exceptiongroup==1.3.0 +gevent==24.2.1 +greenlet==3.1.1 +gunicorn==23.0.0 hypothesis==6.45.0 idna==3.10 iniconfig==2.1.0 @@ -26,6 +30,7 @@ pytest-cov==5.0.0 pytest-django[testing]==3.10.0 pytest-mock==3.14.1 pytz==2025.2 +pyyaml==6.0.2 requests==2.32.4 six==1.17.0 sortedcontainers==2.4.0 @@ -33,3 +38,8 @@ sqlparse==0.5.3 tomli==2.2.1 typing-extensions==4.13.2 urllib3==2.2.3 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.3.2 diff --git a/.riot/requirements/f61cdff.txt b/.riot/requirements/f61cdff.txt new file mode 100644 index 00000000000..853373c6a43 --- /dev/null +++ b/.riot/requirements/f61cdff.txt @@ -0,0 +1,44 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f61cdff.in +# +attrs==25.3.0 +bcrypt==4.2.1 +certifi==2025.8.3 +charset-normalizer==3.4.3 +coverage[toml]==7.6.1 +dill==0.4.0 +django==2.2.28 +django-configurations==2.3.2 +exceptiongroup==1.3.0 +gevent==24.2.1 +greenlet==3.1.1 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.5.0 +pylibmc==1.6.3 +pytest==8.3.5 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.1 +pytz==2025.2 +pyyaml==6.0.2 +requests==2.32.4 +six==1.17.0 +sortedcontainers==2.4.0 +sqlparse==0.5.3 +tomli==2.2.1 +typing-extensions==4.13.2 +urllib3==2.2.3 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.3.2 diff --git a/.riot/requirements/803d32f.txt b/.riot/requirements/f8e5119.txt similarity index 76% rename from .riot/requirements/803d32f.txt rename to .riot/requirements/f8e5119.txt index eae5d0fed3f..a06b17c8085 100644 --- a/.riot/requirements/803d32f.txt +++ b/.riot/requirements/f8e5119.txt @@ -2,13 +2,13 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/803d32f.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f8e5119.in # attrs==25.3.0 azure-core==1.33.0 azure-servicebus==7.14.2 -certifi==2025.6.15 -charset-normalizer==3.4.2 +certifi==2025.8.3 +charset-normalizer==3.4.3 coverage[toml]==7.6.1 exceptiongroup==1.3.0 hypothesis==6.45.0 @@ -20,7 +20,7 @@ opentracing==2.4.0 packaging==25.0 pluggy==1.5.0 pytest==8.3.5 -pytest-asyncio==0.24.0 +pytest-asyncio==0.23.7 pytest-cov==5.0.0 pytest-mock==3.14.1 requests==2.32.4 diff --git a/.riot/requirements/fb50881.txt b/.riot/requirements/fb50881.txt index 0c30a0483de..fc935b5e121 100644 --- a/.riot/requirements/fb50881.txt +++ b/.riot/requirements/fb50881.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/fb50881.in # attrs==25.3.0 -coverage[toml]==7.9.1 +coverage[toml]==7.10.6 exceptiongroup==1.3.0 hypothesis==6.45.0 iniconfig==2.1.0 -mariadb==1.1.12 +mariadb==1.1.13 mock==5.2.0 opentracing==2.4.0 packaging==25.0 pluggy==1.6.0 pygments==2.19.2 -pytest==8.4.1 -pytest-cov==6.2.1 -pytest-mock==3.14.1 +pytest==8.4.2 +pytest-cov==6.3.0 +pytest-mock==3.15.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 tomli==2.2.1 -typing-extensions==4.14.0 +typing-extensions==4.15.0 diff --git a/benchmarks/bm/flask_utils.py b/benchmarks/bm/flask_utils.py index 689be13a28e..79d2c3c5eb9 100644 --- a/benchmarks/bm/flask_utils.py +++ b/benchmarks/bm/flask_utils.py @@ -92,6 +92,7 @@ def setup(self): "DD_APPSEC_ENABLED": str(self.appsec_enabled), "DD_IAST_ENABLED": str(self.iast_enabled), "DD_TELEMETRY_METRICS_ENABLED": str(self.telemetry_metrics_enabled), + "DD_TRACE_RESOURCE_RENAMING_ENABLED": str(self.resource_renaming_enabled), } ) diff --git a/benchmarks/core_api/config.yaml b/benchmarks/core_api/config.yaml index 00356258e0c..16bd87f1b19 100644 --- a/benchmarks/core_api/config.yaml +++ b/benchmarks/core_api/config.yaml @@ -1,75 +1,36 @@ core_dispatch_no_listeners: listeners: 0 - all_listeners: 0 set_item_count: 0 get_item_exists: false core_dispatch_listeners: listeners: 10 - all_listeners: 0 - set_item_count: 0 - get_item_exists: false -core_dispatch_listeners_and_all_listeners: - listeners: 10 - all_listeners: 10 - set_item_count: 0 - get_item_exists: false -core_dispatch_only_all_listeners: - listeners: 0 - all_listeners: 10 set_item_count: 0 get_item_exists: false core_dispatch_with_results_no_listeners: listeners: 0 - all_listeners: 0 set_item_count: 0 get_item_exists: false core_dispatch_with_results_listeners: listeners: 10 - all_listeners: 0 - set_item_count: 0 - get_item_exists: false -core_dispatch_with_results_listeners_and_all_listeners: - listeners: 10 - all_listeners: 10 - set_item_count: 0 - get_item_exists: false -core_dispatch_with_results_only_all_listeners: - listeners: 0 - all_listeners: 10 set_item_count: 0 get_item_exists: false context_with_data_no_listeners: listeners: 0 - all_listeners: 0 set_item_count: 0 get_item_exists: false context_with_data_listeners: listeners: 10 - all_listeners: 0 - set_item_count: 0 - get_item_exists: false -context_with_data_listeners_and_all_listeners: - listeners: 10 - all_listeners: 10 - set_item_count: 0 - get_item_exists: false -context_with_data_only_all_listeners: - listeners: 10 - all_listeners: 0 set_item_count: 0 get_item_exists: false set_item: listeners: 0 - all_listeners: 0 set_item_count: 100 get_item_exists: false get_item_missing: listeners: 0 - all_listeners: 0 set_item_count: 0 get_item_exists: false get_item_exists: listeners: 0 - all_listeners: 0 set_item_count: 0 get_item_exists: true diff --git a/benchmarks/core_api/scenario.py b/benchmarks/core_api/scenario.py index 1ebd5d3554a..9d91ffcfd6d 100644 --- a/benchmarks/core_api/scenario.py +++ b/benchmarks/core_api/scenario.py @@ -11,7 +11,6 @@ class CoreAPIScenario(bm.Scenario): listeners: int - all_listeners: int set_item_count: int get_item_exists: bool @@ -26,24 +25,6 @@ def listener(*_): core.on("context.started.with_data", listener) core.on("context.ended.with_data", listener) - for _ in range(self.all_listeners): - if hasattr(core, "on_all"): - - def all_listener(event_id, args): - pass - - core.on_all(all_listener) - else: - - def listener(*_): - pass - - # If we don't support "core.on_all", just double up the registered listeners to try - # and make the comparison semi-equal - core.on(CUSTOM_EVENT_NAME, listener) - core.on("context.started.with_data", listener) - core.on("context.ended.with_data", listener) - if self.get_item_exists: core.set_item("key", "value") diff --git a/benchmarks/django_simple/config.yaml b/benchmarks/django_simple/config.yaml index bffc4db8337..9c1de7c2265 100644 --- a/benchmarks/django_simple/config.yaml +++ b/benchmarks/django_simple/config.yaml @@ -10,6 +10,8 @@ baseline: &baseline always_create_database_spans: true django_instrument_templates: true native_writer: false + django_minimal: false + resource_renaming_enabled: false tracer: &tracer <<: *baseline tracer_enabled: true @@ -52,3 +54,9 @@ tracer-no-templates: tracer-native: <<: *tracer native_writer: true +tracer-minimal: + <<: *tracer + django_minimal: true +resource-renaming: + <<: *tracer + resource_renaming_enabled: true diff --git a/benchmarks/django_simple/scenario.py b/benchmarks/django_simple/scenario.py index 68c7c7cdf6c..fb848b36bea 100644 --- a/benchmarks/django_simple/scenario.py +++ b/benchmarks/django_simple/scenario.py @@ -17,6 +17,8 @@ class DjangoSimple(bm.Scenario): always_create_database_spans: bool django_instrument_templates: bool native_writer: bool + django_minimal: bool + resource_renaming_enabled: bool def run(self): os.environ["DJANGO_SETTINGS_MODULE"] = "app" @@ -26,6 +28,14 @@ def run(self): os.environ["DD_DJANGO_ALWAYS_CREATE_DATABASE_SPANS"] = "1" if self.always_create_database_spans else "0" os.environ["DD_DJANGO_INSTRUMENT_TEMPLATES"] = "1" if self.django_instrument_templates else "0" + # Use only the minimal setting and the defaults for everything else (mostly based on the value of "minimal") + if self.django_minimal: + os.environ["DD_DJANGO_TRACING_MINIMAL"] = "1" + del os.environ["DD_DJANGO_INSTRUMENT_CACHES"] + del os.environ["DD_DJANGO_INSTRUMENT_DATABASES"] + del os.environ["DD_DJANGO_ALWAYS_CREATE_DATABASE_SPANS"] + del os.environ["DD_DJANGO_INSTRUMENT_TEMPLATES"] + if self.profiler_enabled: os.environ.update( {"DD_PROFILING_ENABLED": "1", "DD_PROFILING_API_TIMEOUT": "0.1", "DD_PROFILING_UPLOAD_INTERVAL": "10"} @@ -40,6 +50,8 @@ def run(self): os.environ.update({"DD_EXCEPTION_REPLAY_ENABLED": "1"}) if self.native_writer: os.environ.update({"_DD_TRACE_WRITER_NATIVE": "1"}) + if self.resource_renaming_enabled: + os.environ.update({"DD_TRACE_RESOURCE_RENAMING_ENABLED": "1"}) # This will not work with gevent workers as the gevent hub has not been # initialized when this hook is called. diff --git a/benchmarks/errortracking_flask_sqli/scenario.py b/benchmarks/errortracking_flask_sqli/scenario.py index abb1468935e..6d63c0672e2 100644 --- a/benchmarks/errortracking_flask_sqli/scenario.py +++ b/benchmarks/errortracking_flask_sqli/scenario.py @@ -14,6 +14,7 @@ class ErrorTrackingFlaskSQLi(bm.Scenario, FlaskScenarioMixin): telemetry_metrics_enabled: bool errortracking_enabled: str native_writer: bool + resource_renaming_enabled: bool def run(self): app = self.create_app() diff --git a/benchmarks/flask_simple/config.yaml b/benchmarks/flask_simple/config.yaml index a846a73cffd..4bc23dc62f2 100644 --- a/benchmarks/flask_simple/config.yaml +++ b/benchmarks/flask_simple/config.yaml @@ -7,6 +7,7 @@ baseline: &baseline post_request: false telemetry_metrics_enabled: false native_writer: false + resource_renaming_enabled: false tracer: &tracer <<: *baseline tracer_enabled: true @@ -32,7 +33,10 @@ appsec-telemetry: tracer-native: <<: *tracer native_writer: true -# The scenarios below produce inconsistent results. We plan to +resource-renaming: + <<: *tracer + resource_renaming_enabled: true +# The scenarios below produce inconsistent results. We plan to # disbable these scenarios until the root cause is identified: # tracer-and-profiler: # <<: *baseline diff --git a/benchmarks/flask_simple/scenario.py b/benchmarks/flask_simple/scenario.py index cdf2a1aaffb..8f34ed11058 100644 --- a/benchmarks/flask_simple/scenario.py +++ b/benchmarks/flask_simple/scenario.py @@ -15,6 +15,7 @@ class FlaskSimple(bm.Scenario, FlaskScenarioMixin): telemetry_metrics_enabled: bool errortracking_enabled: str native_writer: bool + resource_renaming_enabled: bool def run(self): app = self.create_app() diff --git a/benchmarks/flask_sqli/scenario.py b/benchmarks/flask_sqli/scenario.py index f09ea0e0ba5..313911f780b 100644 --- a/benchmarks/flask_sqli/scenario.py +++ b/benchmarks/flask_sqli/scenario.py @@ -14,6 +14,7 @@ class FlaskSQLi(bm.Scenario, FlaskScenarioMixin): telemetry_metrics_enabled: bool errortracking_enabled: str native_writer: bool + resource_renaming_enabled: bool def run(self): app = self.create_app() diff --git a/ddtrace/_logger.py b/ddtrace/_logger.py index 3d282367d59..3a6a5c5a0f5 100644 --- a/ddtrace/_logger.py +++ b/ddtrace/_logger.py @@ -2,10 +2,13 @@ from os import path from typing import Optional +from ddtrace.internal.logger import get_logger from ddtrace.internal.telemetry import get_config from ddtrace.internal.utils.formats import asbool +log = get_logger(__name__) + DD_LOG_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s".format( "[dd.service=%(dd.service)s dd.env=%(dd.env)s dd.version=%(dd.version)s" " dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] " @@ -53,6 +56,8 @@ def configure_ddtrace_logger(): _configure_ddtrace_debug_logger(ddtrace_logger) _configure_ddtrace_file_logger(ddtrace_logger) + # Calling _configure_ddtrace_native_logger should come after Python logging has been configured. + _configure_ddtrace_native_logger() def _configure_ddtrace_debug_logger(logger): @@ -125,3 +130,24 @@ def get_log_injection_state(raw_config: Optional[str]) -> bool: normalized, ) return False + + +def _configure_ddtrace_native_logger(): + try: + from ddtrace.internal.native._native import logger + + native_writer_enabled = get_config("_DD_TRACE_WRITER_NATIVE", False, asbool, report_telemetry=True) + if native_writer_enabled: + backend = get_config("_DD_NATIVE_LOGGING_BACKEND", "file", report_telemetry=True) + kwargs = {"output": backend} + if backend == "file": + kwargs["path"] = get_config("_DD_NATIVE_LOGGING_FILE_PATH", "native.log", report_telemetry=True) + kwargs["max_size_bytes"] = get_config( + "_DD_NATIVE_LOGGING_FILE_SIZE_BYTES", 4096, int, report_telemetry=True + ) + kwargs["max_files"] = get_config("_DD_NATIVE_LOGGING_FILE_ROTATION_LEN", 1, int, report_telemetry=True) + + logger.configure(**kwargs) + logger.set_log_level(get_config("_DD_NATIVE_LOGGING_LOG_LEVEL", "warn", report_telemetry=True)) + except Exception: + log.warning("Failed to initialize native logger", exc_info=True) diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index d1559df9cf7..d3f1853eaeb 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -49,6 +49,7 @@ "algoliasearch": True, "futures": True, "freezegun": False, # deprecated, to be removed in ddtrace 4.x + "google_adk": True, "google_generativeai": True, "google_genai": True, "gevent": True, @@ -163,6 +164,7 @@ "azure_servicebus": ("azure.servicebus",), "httplib": ("http.client",), "kafka": ("confluent_kafka",), + "google_adk": ("google.adk",), "google_generativeai": ("google.generativeai",), "google_genai": ("google.genai",), "langgraph": ( diff --git a/ddtrace/_trace/apm_filter.py b/ddtrace/_trace/apm_filter.py new file mode 100644 index 00000000000..89ba4e93dd0 --- /dev/null +++ b/ddtrace/_trace/apm_filter.py @@ -0,0 +1,22 @@ +import os +from typing import List +from typing import Optional + +from ddtrace._trace.processor import TraceProcessor +from ddtrace._trace.span import Span +from ddtrace.internal.utils.formats import asbool + + +class APMTracingEnabledFilter(TraceProcessor): + """ + Trace processor that drops all APM traces when DD_APM_TRACING_ENABLED is set to a falsy value. + """ + + def __init__(self) -> None: + super().__init__() + self._apm_tracing_enabled = asbool(os.getenv("DD_APM_TRACING_ENABLED", "true")) + + def process_trace(self, trace: List[Span]) -> Optional[List[Span]]: + if not self._apm_tracing_enabled: + return None + return trace diff --git a/ddtrace/_trace/processor/resource_renaming.py b/ddtrace/_trace/processor/resource_renaming.py new file mode 100644 index 00000000000..6ee6c664f95 --- /dev/null +++ b/ddtrace/_trace/processor/resource_renaming.py @@ -0,0 +1,77 @@ +import re +from typing import List +from typing import Optional +from urllib.parse import urlparse + +from ddtrace._trace.processor import SpanProcessor +from ddtrace.ext import SpanTypes +from ddtrace.ext import http +from ddtrace.internal.logger import get_logger +from ddtrace.settings._config import config + + +log = get_logger(__name__) + + +class ResourceRenamingProcessor(SpanProcessor): + def __init__(self): + self._INT_RE = re.compile(r"^[1-9][0-9]+$") + self._INT_ID_RE = re.compile(r"^(?=.*[0-9].*)[0-9._-]{3,}$") + self._HEX_RE = re.compile(r"^(?=.*[0-9].*)[A-Fa-f0-9]{6,}$") + self._HEX_ID_RE = re.compile(r"^(?=.*[0-9].*)[A-Fa-f0-9._-]{6,}$") + self._STR_RE = re.compile(r"^(.{20,}|.*[%&'()*+,:=@].*)$") + + def _compute_simplified_endpoint_path_element(self, elem: str) -> str: + """Applies the parameter replacement rules to a single path element.""" + if self._INT_RE.fullmatch(elem): + return "{param:int}" + if self._INT_ID_RE.fullmatch(elem): + return "{param:int_id}" + if self._HEX_RE.fullmatch(elem): + return "{param:hex}" + if self._HEX_ID_RE.fullmatch(elem): + return "{param:hex_id}" + if self._STR_RE.fullmatch(elem): + return "{param:str}" + return elem + + def _compute_simplified_endpoint(self, url: Optional[str]) -> str: + """Extracts and simplifies the path from an HTTP URL.""" + if not url: + return "/" + + try: + parsed_url = urlparse(url) + except ValueError as e: + log.error("Failed to parse http.url tag when processing span for resource renaming: %s", e) + return "/" + path = parsed_url.path + if not path or path == "/": + return "/" + + elements: List[str] = [] + for part in path.split("/"): + if part: + elements.append(part) + if len(elements) >= 8: + break + + if not elements: + return "/" + + elements = [self._compute_simplified_endpoint_path_element(elem) for elem in elements] + return "/" + "/".join(elements) + + def on_span_start(self, span): + pass + + def on_span_finish(self, span): + if not span._is_top_level or span.span_type not in (SpanTypes.WEB, SpanTypes.HTTP, SpanTypes.SERVERLESS): + return + + route = span.get_tag(http.ROUTE) + + if not route or config._trace_resource_renaming_always_simplified_endpoint: + url = span.get_tag(http.URL) + endpoint = self._compute_simplified_endpoint(url) + span.set_tag_str(http.ENDPOINT, endpoint) diff --git a/ddtrace/_trace/trace_handlers.py b/ddtrace/_trace/trace_handlers.py index d593ca126ab..0ec66f8e9aa 100644 --- a/ddtrace/_trace/trace_handlers.py +++ b/ddtrace/_trace/trace_handlers.py @@ -29,6 +29,7 @@ from ddtrace.ext import azure_servicebus as azure_servicebusx from ddtrace.ext import db from ddtrace.ext import http +from ddtrace.ext import net from ddtrace.ext import redis as redisx from ddtrace.internal import core from ddtrace.internal.compat import maybe_stringify @@ -36,11 +37,11 @@ from ddtrace.internal.constants import FLASK_ENDPOINT from ddtrace.internal.constants import FLASK_URL_RULE from ddtrace.internal.constants import FLASK_VIEW_ARGS +from ddtrace.internal.constants import MESSAGING_BATCH_COUNT from ddtrace.internal.constants import MESSAGING_DESTINATION_NAME from ddtrace.internal.constants import MESSAGING_MESSAGE_ID from ddtrace.internal.constants import MESSAGING_OPERATION from ddtrace.internal.constants import MESSAGING_SYSTEM -from ddtrace.internal.constants import NETWORK_DESTINATION_NAME from ddtrace.internal.logger import get_logger from ddtrace.internal.schema.span_attribute_schema import SpanDirection from ddtrace.propagation.http import HTTPPropagator @@ -855,6 +856,22 @@ def _set_azure_function_tags(span, azure_functions_config, function_name, trigge span.set_tag_str("aas.function.trigger", trigger) # codespell:ignore +def _set_azure_messaging_tags(ctx, entity_name, operation, system, fully_qualified_namespace, message_id, batch_count): + span = ctx.span + span.set_tag_str(MESSAGING_DESTINATION_NAME, entity_name) + span.set_tag_str(MESSAGING_OPERATION, operation) + span.set_tag_str(MESSAGING_SYSTEM, system) + + if fully_qualified_namespace is not None: + span.set_tag_str(net.TARGET_NAME, fully_qualified_namespace) + + if batch_count is not None: + span.set_tag_str(MESSAGING_BATCH_COUNT, batch_count) + + if message_id is not None: + span.set_tag_str(MESSAGING_MESSAGE_ID, message_id) + + def _on_azure_functions_request_span_modifier(ctx, azure_functions_config, req): span = ctx.span parsed_url = parse.urlparse(req.url) @@ -888,27 +905,40 @@ def _on_azure_functions_trigger_span_modifier(ctx, azure_functions_config, funct def _on_azure_functions_service_bus_trigger_span_modifier( - ctx, azure_functions_config, function_name, trigger, span_kind, entity_name, message_id + ctx, + azure_functions_config, + function_name, + trigger, + span_kind, + entity_name, + fully_qualified_namespace, + message_id=None, + batch_count=None, ): span = ctx.span _set_azure_function_tags(span, azure_functions_config, function_name, trigger, span_kind) - span.set_tag_str(MESSAGING_DESTINATION_NAME, entity_name) - span.set_tag_str(MESSAGING_OPERATION, "receive") - span.set_tag_str(MESSAGING_SYSTEM, azure_servicebusx.SERVICE) - - if message_id is not None: - span.set_tag_str(MESSAGING_MESSAGE_ID, message_id) + _set_azure_messaging_tags( + ctx, + entity_name, + azure_servicebusx.RECEIVE, + azure_servicebusx.SERVICE, + fully_qualified_namespace, + message_id, + batch_count, + ) -def _on_azure_servicebus_send_message_modifier(ctx, azure_servicebus_config, entity_name, fully_qualified_namespace): +def _on_azure_servicebus_message_modifier( + ctx, azure_servicebus_config, operation, entity_name, fully_qualified_namespace, message_id, batch_count +): span = ctx.span span.set_tag_str(COMPONENT, azure_servicebus_config.integration_name) - span.set_tag_str(MESSAGING_DESTINATION_NAME, entity_name) - span.set_tag_str(MESSAGING_OPERATION, "send") - span.set_tag_str(MESSAGING_SYSTEM, azure_servicebusx.SERVICE) - span.set_tag_str(NETWORK_DESTINATION_NAME, fully_qualified_namespace) span.set_tag_str(SPAN_KIND, SpanKind.PRODUCER) + _set_azure_messaging_tags( + ctx, entity_name, operation, azure_servicebusx.SERVICE, fully_qualified_namespace, message_id, batch_count + ) + def _on_router_match(route): req_span = core.get_item("req_span") @@ -979,7 +1009,7 @@ def listen(): core.on("azure.functions.start_response", _on_azure_functions_start_response) core.on("azure.functions.trigger_call_modifier", _on_azure_functions_trigger_span_modifier) core.on("azure.functions.service_bus_trigger_modifier", _on_azure_functions_service_bus_trigger_span_modifier) - core.on("azure.servicebus.send_message_modifier", _on_azure_servicebus_send_message_modifier) + core.on("azure.servicebus.message_modifier", _on_azure_servicebus_message_modifier) # web frameworks general handlers core.on("web.request.start", _on_web_framework_start_request) @@ -1048,7 +1078,9 @@ def listen(): "azure.functions.patched_route_request", "azure.functions.patched_service_bus", "azure.functions.patched_timer", - "azure.servicebus.patched_producer", + "azure.servicebus.patched_producer_batch", + "azure.servicebus.patched_producer_schedule", + "azure.servicebus.patched_producer_send", "psycopg.patched_connect", ): core.on(f"context.started.{context_name}", _start_span) diff --git a/ddtrace/_trace/tracer.py b/ddtrace/_trace/tracer.py index 41d8e79d84c..5adf3c14d9a 100644 --- a/ddtrace/_trace/tracer.py +++ b/ddtrace/_trace/tracer.py @@ -20,6 +20,7 @@ from ddtrace._trace.processor import SpanProcessor from ddtrace._trace.processor import TopLevelSpanProcessor from ddtrace._trace.processor import TraceProcessor +from ddtrace._trace.processor.resource_renaming import ResourceRenamingProcessor from ddtrace._trace.provider import BaseContextProvider from ddtrace._trace.provider import DefaultContextProvider from ddtrace._trace.span import Span @@ -74,6 +75,9 @@ def _default_span_processors_factory( span_processors: List[SpanProcessor] = [] span_processors += [TopLevelSpanProcessor()] + if config._trace_resource_renaming_enabled: + span_processors.append(ResourceRenamingProcessor()) + # When using the NativeWriter stats are computed by the native code. if config._trace_compute_stats and not config._trace_writer_native: # Inline the import to avoid pulling in ddsketch or protobuf diff --git a/ddtrace/_trace/utils_botocore/span_tags.py b/ddtrace/_trace/utils_botocore/span_tags.py index e1865e6ba9b..b44f5eaad70 100644 --- a/ddtrace/_trace/utils_botocore/span_tags.py +++ b/ddtrace/_trace/utils_botocore/span_tags.py @@ -92,6 +92,7 @@ def set_botocore_patched_api_call_span_tags(span: Span, instance, args, params, if region_name is not None: span.set_tag_str("aws.region", region_name) span.set_tag_str("region", region_name) + span.set_tag_str("aws.partition", aws.get_aws_partition(region_name)) # Derive peer hostname only in serverless environments to avoid # unnecessary tag noise in traditional hosts/containers. diff --git a/ddtrace/appsec/_common_module_patches.py b/ddtrace/appsec/_common_module_patches.py index e5cd0fbebea..702ffc25bab 100644 --- a/ddtrace/appsec/_common_module_patches.py +++ b/ddtrace/appsec/_common_module_patches.py @@ -1,4 +1,3 @@ -# This module must not import other modules unconditionally that require iast import ctypes import io import json @@ -195,9 +194,6 @@ def wrapped_open_ED4CF71136E15EBF(original_open_callable, instance, args, kwargs """ wrapper for open url function """ - if asm_config._iast_enabled: - # TODO: IAST SSRF sink to be added - pass if _get_rasp_capability("ssrf"): try: from ddtrace.appsec._asm_request_context import _get_asm_context @@ -290,14 +286,6 @@ def wrapped_request_D8CB81E472AF98A2(original_request_callable, instance, args, wrapper for third party requests.request function https://requests.readthedocs.io """ - if asm_config._iast_enabled: - from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled - - if is_iast_request_enabled(): - from ddtrace.appsec._iast.taint_sinks.ssrf import _iast_report_ssrf - - _iast_report_ssrf(original_request_callable, *args, **kwargs) - if _get_rasp_capability("ssrf"): try: from ddtrace.appsec._asm_request_context import _get_asm_context diff --git a/ddtrace/appsec/_ddwaf/waf.py b/ddtrace/appsec/_ddwaf/waf.py index 002cc8b1372..aa96d9b8861 100644 --- a/ddtrace/appsec/_ddwaf/waf.py +++ b/ddtrace/appsec/_ddwaf/waf.py @@ -97,11 +97,14 @@ def _set_info(self, diagnostics: ddwaf_object, action: str) -> None: self._cached_version = version for key, value in info_struct.items(): if isinstance(value, dict): - if value.get("error", False): - self.report_error(f"appsec.waf.error::{action}::{key}::{value['error']}", self._cached_version) - elif value.get("errors", False): + if error := value.get("error", False): + self.report_error(f"appsec.waf.error::{action}::{key}::{error}", self._cached_version, action) + elif errors := value.get("errors", False): self.report_error( - f"appsec.waf.error::{action}::{key}::{str(value['errors'])}", self._cached_version, False + f"appsec.waf.error::{action}::{key}::{str(errors)}", + self._cached_version, + action, + False, ) self._info = DDWaf_info( len(rules.get("loaded", [])), diff --git a/ddtrace/appsec/_handlers.py b/ddtrace/appsec/_handlers.py index 43dc1a0216d..806e787cf93 100644 --- a/ddtrace/appsec/_handlers.py +++ b/ddtrace/appsec/_handlers.py @@ -1,4 +1,3 @@ -import asyncio import io import json from typing import Any @@ -171,6 +170,9 @@ def _on_lambda_parse_body( async def _on_asgi_request_parse_body(receive, headers): if asm_config._asm_enabled: + # This must not be imported globally due to 3rd party patching timeline + import asyncio + more_body = True body_parts = [] try: diff --git a/ddtrace/appsec/_iast/_ast/iastpatch.c b/ddtrace/appsec/_iast/_ast/iastpatch.c index 83bd88e710a..6cb755f309e 100644 --- a/ddtrace/appsec/_iast/_ast/iastpatch.c +++ b/ddtrace/appsec/_iast/_ast/iastpatch.c @@ -17,11 +17,22 @@ static char** cached_packages = NULL; static size_t cached_packages_count = 0; /* Static Lists */ -static const char* static_allowlist[] = { - "jinja2.", "pygments.", "multipart.", "sqlalchemy.", "python_multipart.", - "attrs.", "jsonschema.", "s3fs.", "mysql.", "pymysql.", - "markupsafe.", "werkzeug.utils.", "langchain.", "langchain_core.", "django.http.response." -}; +static const char* static_allowlist[] = { "jinja2.", + "pygments.", + "multipart.", + "sqlalchemy.", + "python_multipart.", + "attrs.", + "jsonschema.", + "s3fs.", + "mysql.", + "pymysql.", + "markupsafe.", + "werkzeug.utils.", + "langchain.", + "langchain_core.", + "django.http.response.", + "langchain_experimental." }; static const size_t static_allowlist_count = sizeof(static_allowlist) / sizeof(static_allowlist[0]); static const char* static_denylist[] = { diff --git a/ddtrace/appsec/_iast/_ast/visitor.py b/ddtrace/appsec/_iast/_ast/visitor.py index 895155a6ee8..a7e1474f5f9 100644 --- a/ddtrace/appsec/_iast/_ast/visitor.py +++ b/ddtrace/appsec/_iast/_ast/visitor.py @@ -14,8 +14,10 @@ from ..._constants import IAST from .._metrics import _set_metric_iast_instrumented_propagation +from ..constants import DEFAULT_COMMAND_INJECTION_FUNCTIONS from ..constants import DEFAULT_PATH_TRAVERSAL_FUNCTIONS from ..constants import DEFAULT_SOURCE_IO_FUNCTIONS +from ..constants import DEFAULT_SSRF_FUNCTIONS from ..constants import DEFAULT_WEAK_RANDOMNESS_FUNCTIONS @@ -134,6 +136,8 @@ def _mark_avoid_convert_recursively(node): "taint_sinks": { "weak_randomness": DEFAULT_WEAK_RANDOMNESS_FUNCTIONS, "path_traversal": DEFAULT_PATH_TRAVERSAL_FUNCTIONS, + "cmd_injection": DEFAULT_COMMAND_INJECTION_FUNCTIONS, + "ssrf": DEFAULT_SSRF_FUNCTIONS, # These explicitly WON'T be replaced by taint_sink_function: "disabled": { "__new__", @@ -182,6 +186,8 @@ def __init__( self._taint_sink_replace_any = self._merge_dicts( _ASPECTS_SPEC["taint_sinks"]["weak_randomness"], *[functions for module, functions in _ASPECTS_SPEC["taint_sinks"]["path_traversal"].items()], + *[functions for module, functions in _ASPECTS_SPEC["taint_sinks"]["cmd_injection"].items()], + *[functions for module, functions in _ASPECTS_SPEC["taint_sinks"]["ssrf"].items()], ) self._source_replace_any = self._merge_dicts( *[functions for module, functions in _ASPECTS_SPEC["sources"]["io"].items()], @@ -190,6 +196,7 @@ def __init__( self._taint_sink_replace_disabled = _ASPECTS_SPEC["taint_sinks"]["disabled"] self.update_location(filename, module_name) + self.allowed_replacements = {CODE_TYPE_FIRST_PARTY, CODE_TYPE_SITE_PACKAGES} def update_location(self, filename: str = "", module_name: str = ""): self.filename = filename @@ -604,7 +611,7 @@ def visit_Call(self, call_node: ast.Call) -> Any: call_node.func = self._attr_node(call_node, SOURCES_FUNCTION_REPLACEMENT) self.ast_modified = call_modified = True - if self.codetype == CODE_TYPE_FIRST_PARTY: + if self.codetype in self.allowed_replacements: # Function replacement case if isinstance(call_node.func, ast.Name): aspect = self._should_replace_with_taint_sink(call_node, True) diff --git a/ddtrace/appsec/_iast/_langchain.py b/ddtrace/appsec/_iast/_langchain.py index 60d699297b8..49456bf370c 100644 --- a/ddtrace/appsec/_iast/_langchain.py +++ b/ddtrace/appsec/_iast/_langchain.py @@ -132,7 +132,7 @@ def _langchain_llm_generate_after(prompts, completions): ) setattr(gen, text_attr, new_text) except Exception as e: - iast_error(f"propagation::source::langchain _langchain_llm_generate_after. {e}") + iast_error("propagation::source::langchain _langchain_llm_generate_after", e) def _langchain_chatmodel_generate_after(messages, completions): @@ -200,7 +200,7 @@ def _langchain_chatmodel_generate_after(messages, completions): if isinstance(arguments, str): function_call["arguments"] = _iast_taint_if_str(source, arguments) except Exception as e: - iast_error(f"propagation::source::langchain _langchain_chatmodel_generate_after. {e}") + iast_error("propagation::source::langchain _langchain_chatmodel_generate_after", e) def _langchain_stream_chunk_callback(interface_type, args, kwargs): @@ -218,7 +218,7 @@ def _iast_chunk_taint(chunk): try: _langchain_iast_taint_chunk(source, chunk) except Exception as e: - iast_error(f"propagation::source::langchain _langchain_iast_taint_chunk. {e}") + iast_error("propagation::source::langchain _langchain_iast_taint_chunk", e) return _iast_chunk_taint @@ -333,7 +333,7 @@ def _propagate_prompt_template_format(kwargs, result): source = ranges[0].source return taint_pyobject(result, source.name, source.value, source.origin) except Exception as e: - iast_error(f"propagation::source::langchain iast_propagate_prompt_template_format. {e}") + iast_error("propagation::source::langchain iast_propagate_prompt_template_format", e) return result @@ -364,5 +364,5 @@ def _propagante_agentoutput_parse(args, kwargs, result): values = result.return_values values["output"] = taint_pyobject(values["output"], source.name, source.value, source.origin) except Exception as e: - iast_error(f"propagation::source::langchain taint_parser_output. {e}") + iast_error("propagation::source::langchain taint_parser_output", e) return result diff --git a/ddtrace/appsec/_iast/_logs.py b/ddtrace/appsec/_iast/_logs.py index 4f746c84e3c..5c07099d940 100644 --- a/ddtrace/appsec/_iast/_logs.py +++ b/ddtrace/appsec/_iast/_logs.py @@ -1,5 +1,6 @@ +from typing import Union + from ddtrace.appsec._iast._metrics import _set_iast_error_metric -from ddtrace.appsec._iast._utils import _is_iast_debug_enabled from ddtrace.internal.logger import get_logger from ddtrace.settings.asm import config as asm_config @@ -40,19 +41,9 @@ def iast_instrumentation_ast_patching_errorr_log(msg): iast_error(msg, default_prefix="iast::instrumentation::ast_patching::") -def iast_propagation_error_log(msg): - iast_error(msg, default_prefix="iast::propagation::error::") - +def iast_propagation_error_log(msg, exc: Union[BaseException, tuple, None] = None): + iast_error(msg, default_prefix="iast::propagation::error::", exc=exc) -def iast_error(msg, default_prefix="iast::"): - if _is_iast_debug_enabled(): - # Import inspect locally to avoid gevent compatibility issues. - # Top-level imports of inspect can interfere with gevent's monkey patching - # and cause sporadic worker timeouts in Gunicorn applications. - # See ddtrace/internal/iast/product.py for detailed explanation. - import inspect - stack = inspect.stack() - frame_info = "\n".join("%s %s" % (frame_info.filename, frame_info.lineno) for frame_info in stack[:7]) - log.debug("%s%s:\n%s", default_prefix, msg, frame_info) - _set_iast_error_metric(f"{default_prefix}{msg}") +def iast_error(msg, default_prefix="iast::", exc: Union[BaseException, tuple, None] = None): + _set_iast_error_metric(f"{default_prefix}{msg}", exc=exc) diff --git a/ddtrace/appsec/_iast/_metrics.py b/ddtrace/appsec/_iast/_metrics.py index f93d8c29bd6..a27a0355c95 100644 --- a/ddtrace/appsec/_iast/_metrics.py +++ b/ddtrace/appsec/_iast/_metrics.py @@ -1,7 +1,6 @@ -import sys -import traceback from typing import Dict from typing import Text +from typing import Union from ddtrace.appsec._constants import IAST from ddtrace.appsec._constants import TELEMETRY_INFORMATION_VERBOSITY @@ -13,7 +12,6 @@ from ddtrace.appsec._iast._utils import _is_iast_debug_enabled from ddtrace.internal import telemetry from ddtrace.internal.logger import get_logger -from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.settings.asm import config as asm_config @@ -46,27 +44,13 @@ def wrapper(f): @metric_verbosity(TELEMETRY_MANDATORY_VERBOSITY) @deduplication -def _set_iast_error_metric(msg: Text) -> None: +def _set_iast_error_metric(msg: Text, exc: Union[BaseException, tuple, None] = None) -> None: """This was originally implemented to analyze which services were triggering this issue, and we used that insight to refactor how IAST creates and destroys context. However, after that refactor, this information no longer provides value and only adds noise. So now, those telemetry metrics are only emitted if IAST is in debug mode """ - try: - if _is_iast_debug_enabled(): - exception_type, exception_instance, _traceback_list = sys.exc_info() - res = [] - # first 10 frames are this function, the exception in aspects and the error line - res.extend(traceback.format_stack(limit=20)) - - # get the frame with the error and the error message - result = traceback.format_exception(exception_type, exception_instance, _traceback_list) - res.extend(result[1:]) - - stack_trace = "".join(res) - - telemetry.telemetry_writer.add_log(TELEMETRY_LOG_LEVEL.ERROR, msg, stack_trace=stack_trace) - except Exception: - log.warning("iast::metrics::error::_set_iast_error_metric", exc_info=True) + if _is_iast_debug_enabled(): + telemetry.telemetry_writer.add_error_log(msg, exc=exc) @metric_verbosity(TELEMETRY_MANDATORY_VERBOSITY) diff --git a/ddtrace/appsec/_iast/_taint_tracking/_taint_objects_base.py b/ddtrace/appsec/_iast/_taint_tracking/_taint_objects_base.py index a8c3cdf8fbc..40ed038ba87 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/_taint_objects_base.py +++ b/ddtrace/appsec/_iast/_taint_tracking/_taint_objects_base.py @@ -72,7 +72,7 @@ def get_tainted_ranges(pyobject: Any) -> Tuple: try: return get_ranges(pyobject) except ValueError as e: - iast_propagation_error_log(f"get_tainted_ranges error (pyobject type {type(pyobject)}): {e}") + iast_propagation_error_log(f"get_tainted_ranges error (pyobject type {type(pyobject)})", exc=e) return tuple() @@ -83,5 +83,5 @@ def is_pyobject_tainted(pyobject: Any) -> bool: try: return is_in_taint_map(pyobject) except ValueError as e: - iast_propagation_error_log(f"Checking tainted object error: {e}") + iast_propagation_error_log("Checking tainted object error", exc=e) return False diff --git a/ddtrace/appsec/_iast/_taint_tracking/aspects.py b/ddtrace/appsec/_iast/_taint_tracking/aspects.py index d2dabb493b7..368054359c7 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/aspects.py +++ b/ddtrace/appsec/_iast/_taint_tracking/aspects.py @@ -134,7 +134,7 @@ def stringio_aspect(orig_function: Optional[Callable], flag_added_args: int, *ar try: copy_ranges_from_strings(args[0], result) except Exception as e: - iast_propagation_error_log(f"stringio_aspect. {e}") + iast_propagation_error_log("stringio_aspect", e) return result @@ -152,7 +152,7 @@ def bytesio_aspect(orig_function: Optional[Callable], flag_added_args: int, *arg try: copy_ranges_from_strings(args[0], result) except Exception as e: - iast_propagation_error_log(f"bytesio_aspect. {e}") + iast_propagation_error_log("bytesio_aspect", e) return result @@ -170,7 +170,7 @@ def bytes_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: try: copy_ranges_from_strings(args[0], result) except Exception as e: - iast_propagation_error_log(f"bytes_aspect. {e}") + iast_propagation_error_log("bytes_aspect", e) return result @@ -188,7 +188,7 @@ def bytearray_aspect(orig_function: Optional[Callable], flag_added_args: int, *a try: copy_ranges_from_strings(args[0], result) except Exception as e: - iast_propagation_error_log(f"bytearray_aspect. {e}") + iast_propagation_error_log("bytearray_aspect", e) return result @@ -228,7 +228,7 @@ def bytearray_extend_aspect(orig_function: Optional[Callable], flag_added_args: try: return _extend_aspect(op1, op2) except Exception as e: - iast_propagation_error_log(f"extend_aspect. {e}") + iast_propagation_error_log("extend_aspect", e) return op1.extend(op2) @@ -264,7 +264,7 @@ def ljust_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: taint_pyobject_with_ranges(result, ranges_new) return result except Exception as e: - iast_propagation_error_log(f"ljust_aspect. {e}") + iast_propagation_error_log("ljust_aspect", e) return result @@ -313,7 +313,7 @@ def zfill_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: ) taint_pyobject_with_ranges(result, tuple(ranges_new)) except Exception as e: - iast_propagation_error_log(f"zfill_aspect. {e}") + iast_propagation_error_log("zfill_aspect", e) return result @@ -343,7 +343,7 @@ def format_aspect(orig_function: Optional[Callable], flag_added_args: int, *args params = tuple(args) + tuple(kwargs.values()) return _format_aspect(candidate_text, params, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"format_aspect. {e}") + iast_propagation_error_log("format_aspect", e) return candidate_text.format(*args, **kwargs) @@ -397,7 +397,7 @@ def format_map_aspect(orig_function: Optional[Callable], flag_added_args: int, * return result return aspect_result except Exception as e: - iast_propagation_error_log(f"format_map_aspect. {e}") + iast_propagation_error_log("format_map_aspect", e) return result @@ -428,7 +428,7 @@ def repr_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: copy_and_shift_ranges_from_strings(args[0], result, offset, len(check_offset)) except Exception as e: - iast_propagation_error_log(f"repr_aspect. {e}") + iast_propagation_error_log("repr_aspect", e) return result @@ -468,7 +468,7 @@ def format_value_aspect( else: return new_new_text except Exception as e: - iast_propagation_error_log(f"format_value_aspect. {e}") + iast_propagation_error_log("format_value_aspect", e) return new_new_text return format(new_text) @@ -545,7 +545,7 @@ def decode_aspect(orig_function: Optional[Callable], flag_added_args: int, *args inc_dec = codecs.getincrementaldecoder(codec)(**kwargs) return incremental_translation(self, inc_dec, inc_dec.decode, "") except Exception as e: - iast_propagation_error_log(f"decode_aspect. {e}") + iast_propagation_error_log("decode_aspect", e) return result @@ -566,7 +566,7 @@ def encode_aspect(orig_function: Optional[Callable], flag_added_args: int, *args inc_enc = codecs.getincrementalencoder(codec)(**kwargs) return incremental_translation(self, inc_enc, inc_enc.encode, b"") except Exception as e: - iast_propagation_error_log(f"encode_aspect. {e}") + iast_propagation_error_log("encode_aspect", e) return result @@ -585,7 +585,7 @@ def upper_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: try: return common_replace("upper", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"upper_aspect. {e}") + iast_propagation_error_log("upper_aspect", e) return candidate_text.upper(*args, **kwargs) @@ -603,7 +603,7 @@ def lower_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: try: return common_replace("lower", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"lower_aspect. {e}") + iast_propagation_error_log("lower_aspect", e) return candidate_text.lower(*args, **kwargs) @@ -823,7 +823,7 @@ def replace_aspect(orig_function: Optional[Callable], flag_added_args: int, *arg return aspect_result except Exception as e: - iast_propagation_error_log(f"replace_aspect. {e}") + iast_propagation_error_log("replace_aspect", e) return orig_result @@ -840,7 +840,7 @@ def swapcase_aspect(orig_function: Optional[Callable], flag_added_args: int, *ar try: return common_replace("swapcase", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"swapcase_aspect. {e}") + iast_propagation_error_log("swapcase_aspect", e) return candidate_text.swapcase(*args, **kwargs) @@ -857,7 +857,7 @@ def title_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: try: return common_replace("title", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"title_aspect. {e}") + iast_propagation_error_log("title_aspect", e) return candidate_text.title(*args, **kwargs) @@ -875,7 +875,7 @@ def capitalize_aspect(orig_function: Optional[Callable], flag_added_args: int, * try: return common_replace("capitalize", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"capitalize_aspect. {e}") + iast_propagation_error_log("capitalize_aspect", e) return candidate_text.capitalize(*args, **kwargs) @@ -906,7 +906,7 @@ def casefold_aspect(orig_function: Optional[Callable], flag_added_args: int, *ar try: return common_replace("casefold", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"casefold_aspect. {e}") + iast_propagation_error_log("casefold_aspect", e) return candidate_text.casefold(*args, **kwargs) # type: ignore[union-attr] @@ -923,7 +923,7 @@ def translate_aspect(orig_function: Optional[Callable], flag_added_args: int, *a try: return common_replace("translate", candidate_text, *args, **kwargs) except Exception as e: - iast_propagation_error_log(f"translate_aspect. {e}") + iast_propagation_error_log("translate_aspect", e) return candidate_text.translate(*args, **kwargs) @@ -964,7 +964,7 @@ def re_findall_aspect( if ranges: result = copy_ranges_to_iterable_with_strings(result, ranges) except Exception as e: - iast_propagation_error_log(f"re_findall_aspect. {e}") + iast_propagation_error_log("re_findall_aspect", e) return result @@ -1002,7 +1002,7 @@ def re_finditer_aspect(orig_function: Optional[Callable], flag_added_args: int, for elem in result_backup: taint_pyobject_with_ranges(elem, ranges) except Exception as e: - iast_propagation_error_log(f"IAST propagation error. re_finditer_aspect. {e}") + iast_propagation_error_log("IAST propagation error. re_finditer_aspect", e) return result @@ -1175,7 +1175,7 @@ def re_groups_aspect(orig_function: Optional[Callable], flag_added_args: int, *a try: return copy_ranges_to_iterable_with_strings(result, get_ranges(self)) except Exception as e: - iast_propagation_error_log(f"re_groups_aspect. {e}") + iast_propagation_error_log("re_groups_aspect", e) return result @@ -1200,7 +1200,7 @@ def re_group_aspect(orig_function: Optional[Callable], flag_added_args: int, *ar else: result = copy_ranges_to_string(result, get_ranges(self)) except Exception as e: - iast_propagation_error_log(f"re_group_aspect. {e}") + iast_propagation_error_log("re_group_aspect", e) return result @@ -1231,7 +1231,7 @@ def re_expand_aspect(orig_function: Optional[Callable], flag_added_args: int, *a elif is_pyobject_tainted(args[0]): result = copy_ranges_to_string(result, get_ranges(args[0])) except Exception as e: - iast_propagation_error_log(f"re_expand_aspect. {e}") + iast_propagation_error_log("re_expand_aspect", e) return result @@ -1241,7 +1241,7 @@ def ospathjoin_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathjoin(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"ospathjoin_aspect. {e}") + iast_propagation_error_log("ospathjoin_aspect", e) return os.path.join(*args, **kwargs) @@ -1251,7 +1251,7 @@ def ospathbasename_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathbasename(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"_aspect_ospathbasename. {e}") + iast_propagation_error_log("_aspect_ospathbasename", e) return os.path.basename(*args, **kwargs) @@ -1261,7 +1261,7 @@ def ospathdirname_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathdirname(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"_aspect_ospathdirname. {e}") + iast_propagation_error_log("_aspect_ospathdirname", e) return os.path.dirname(*args, **kwargs) @@ -1271,7 +1271,7 @@ def ospathnormcase_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathnormcase(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"ospathnormcase_aspect. {e}") + iast_propagation_error_log("ospathnormcase_aspect", e) return os.path.normcase(*args, **kwargs) @@ -1281,7 +1281,7 @@ def ospathsplit_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathsplit(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"ospathnormcase_aspect. {e}") + iast_propagation_error_log("ospathnormcase_aspect", e) return os.path.split(*args, **kwargs) @@ -1291,7 +1291,7 @@ def ospathsplitdrive_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathsplitdrive(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"_aspect_ospathsplitdrive. {e}") + iast_propagation_error_log("_aspect_ospathsplitdrive", e) return os.path.splitdrive(*args, **kwargs) @@ -1301,7 +1301,7 @@ def ospathsplitext_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathsplitext(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"_aspect_ospathsplitext. {e}") + iast_propagation_error_log("_aspect_ospathsplitext", e) return os.path.splitext(*args, **kwargs) @@ -1311,7 +1311,7 @@ def ospathsplitroot_aspect(*args: Any, **kwargs: Any) -> Any: try: return _aspect_ospathsplitroot(*args, **kwargs) except Exception as e: - iast_propagation_error_log(f"_aspect_ospathsplitroot. {e}") + iast_propagation_error_log("_aspect_ospathsplitroot", e) return os.path.splitroot(*args, **kwargs) # type: ignore[attr-defined] @@ -1334,7 +1334,7 @@ def lstrip_aspect(orig_function: Optional[Callable], flag_added_args: int, *args _strip_lstrip_aspect(candidate_text, result) return result except Exception as e: - iast_propagation_error_log(f"lstrip_aspect. {e}") + iast_propagation_error_log("lstrip_aspect", e) return result @@ -1377,7 +1377,7 @@ def rstrip_aspect(orig_function: Optional[Callable], flag_added_args: int, *args taint_pyobject_with_ranges(result, tuple(ranges_new)) return result except Exception as e: - iast_propagation_error_log(f"rstrip_aspect. {e}") + iast_propagation_error_log("rstrip_aspect", e) return result @@ -1399,7 +1399,7 @@ def strip_aspect(orig_function: Optional[Callable], flag_added_args: int, *args: _strip_lstrip_aspect(candidate_text, result) return result except Exception as e: - iast_propagation_error_log(f"strip_aspect. {e}") + iast_propagation_error_log("strip_aspect", e) return result @@ -1441,6 +1441,6 @@ def modulo_aspect(*args: Any, **kwargs: Any) -> Any: try: return _modulo_aspect(args[0], args[1], result) except Exception as e: - iast_propagation_error_log(f"modulo_aspect. {e}") + iast_propagation_error_log("modulo_aspect", e) return result diff --git a/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.cpp b/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.cpp index 09a71d61e6a..397b43d7b88 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.cpp +++ b/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.cpp @@ -59,6 +59,15 @@ TaintEngineContext::TaintEngineContext() { } +// Lifecycle guard. Prevent access during interpreter/module teardown. +std::atomic TaintEngineContext::shutting_down{ false }; + +void +TaintEngineContext::set_shutting_down(bool v) +{ + shutting_down.store(v, std::memory_order_release); +} + std::optional TaintEngineContext::start_request_context() { @@ -171,6 +180,9 @@ TaintEngineContext::get_tainted_object_map(PyObject* obj) TaintedObjectMapTypePtr TaintEngineContext::get_tainted_object_map_from_pyobject(PyObject* tainted_object) { + if (shutting_down.load(std::memory_order_acquire)) { + return nullptr; + } for (const auto& context_map : request_context_slots) { if (!context_map) { continue; diff --git a/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.h b/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.h index 6c2eab01deb..5a083ec0e23 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.h +++ b/ddtrace/appsec/_iast/_taint_tracking/context/taint_engine_context.h @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -49,9 +50,15 @@ class TaintEngineContext // Parse and clamp capacity from environment static size_t assign_request_context_slots_size(); + // Global lifecycle flag to avoid use-after-destruction during interpreter/module teardown. + static std::atomic shutting_down; + public: TaintEngineContext(); + // Lifecycle control: mark the context as shutting down to prevent further access. + static void set_shutting_down(bool v); + // Fast-path: get the taint map for a known context_id (slot index). // Returns nullptr if the slot is empty or out of lifecycle. TaintedObjectMapTypePtr get_tainted_object_map_by_ctx_id(size_t ctx_id); diff --git a/ddtrace/appsec/_iast/_taint_tracking/native.cpp b/ddtrace/appsec/_iast/_taint_tracking/native.cpp index 295f188c499..61a10e2ca8e 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/native.cpp +++ b/ddtrace/appsec/_iast/_taint_tracking/native.cpp @@ -72,6 +72,25 @@ PYBIND11_MODULE(_native, m) // Create a atexit callback to cleanup the Initializer before the interpreter finishes auto atexit_register = safe_import("atexit", "register"); atexit_register(py::cpp_function([]() { + // During interpreter shutdown (esp. with gevent), heavy cleanup can + // trigger refcounting or Python API calls without a valid runtime. + // If gevent monkey-patching is active, skip setting the shutdown flag + // because it interferes with greenlet scheduling at exit. + + bool gevent_active = false; + try { + auto is_patched = safe_import("gevent.monkey", "is_module_patched"); + gevent_active = + asbool(is_patched("threading")) || asbool(is_patched("socket")) || asbool(is_patched("ssl")); + } catch (const py::error_already_set&) { + PyErr_Clear(); + } + + if (!gevent_active) { + py::gil_scoped_acquire gil; // safe to touch Python-adjacent state + TaintEngineContext::set_shutting_down(true); + } + initializer.reset(); if (taint_engine_context) { taint_engine_context->clear_all_request_context_slots(); diff --git a/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.cpp b/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.cpp index 52438f1dceb..3031cfa4012 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.cpp +++ b/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.cpp @@ -458,6 +458,7 @@ pyexport_taintrange(py::module& m) .value("SQL_INJECTION", VulnerabilityType::SQL_INJECTION) .value("SSRF", VulnerabilityType::SSRF) .value("STACKTRACE_LEAK", VulnerabilityType::STACKTRACE_LEAK) + .value("UNTRUSTED_SERIALIZATION", VulnerabilityType::UNTRUSTED_SERIALIZATION) .value("WEAK_CIPHER", VulnerabilityType::WEAK_CIPHER) .value("WEAK_HASH", VulnerabilityType::WEAK_HASH) .value("WEAK_RANDOMNESS", VulnerabilityType::WEAK_RANDOMNESS) diff --git a/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.h b/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.h index ce08b6fd201..9add6c43974 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.h +++ b/ddtrace/appsec/_iast/_taint_tracking/taint_tracking/taint_range.h @@ -36,21 +36,22 @@ using TaintedObjectMapType = std::unordered_map; diff --git a/ddtrace/appsec/_iast/constants.py b/ddtrace/appsec/_iast/constants.py index d1d71d158fa..f7f56601fb0 100644 --- a/ddtrace/appsec/_iast/constants.py +++ b/ddtrace/appsec/_iast/constants.py @@ -14,6 +14,7 @@ VULN_UNVALIDATED_REDIRECT = "UNVALIDATED_REDIRECT" VULN_CODE_INJECTION = "CODE_INJECTION" VULN_XSS = "XSS" +VULN_UNTRUSTED_SERIALIZATION = "UNTRUSTED_SERIALIZATION" VULN_SSRF = "SSRF" VULN_STACKTRACE_LEAK = "STACKTRACE_LEAK" @@ -71,8 +72,6 @@ "rmdir", "listdir", }, - "pickle": {"load"}, - "_pickle": {"load"}, "posix": { "mkdir", "remove", @@ -89,6 +88,41 @@ "tarfile": {"open"}, "zipfile": {"ZipFile"}, } + +DEFAULT_COMMAND_INJECTION_FUNCTIONS = { + "subprocess": {"Popen", "Popen.wait", "run"}, + "os": { + "system", + "spawnl", + "spawnlp", + "spawnv", + "spawnvp", + "spawnvp", + }, + "posix": { + "system", + }, +} + +DEFAULT_SSRF_FUNCTIONS = { + "requests.api": {"get", "post", "put"}, + "urllib3": { + "request", + }, + "urllib3._request_methods": { + "request", + }, + "http.client": { + "request", + }, + "urllib.request": { + "urlopen", + }, + "webbrowser": { + "open", + }, +} + DBAPI_SQLITE = "sqlite" DBAPI_PSYCOPG = "psycopg" DBAPI_MYSQL = "mysql" diff --git a/ddtrace/appsec/_iast/main.py b/ddtrace/appsec/_iast/main.py index 9a3bcea664a..b0ce66a241a 100644 --- a/ddtrace/appsec/_iast/main.py +++ b/ddtrace/appsec/_iast/main.py @@ -34,9 +34,9 @@ from ddtrace.appsec._iast.secure_marks.validators import ssrf_validator from ddtrace.appsec._iast.secure_marks.validators import unvalidated_redirect_validator from ddtrace.appsec._iast.taint_sinks.code_injection import patch as code_injection_patch -from ddtrace.appsec._iast.taint_sinks.command_injection import patch as command_injection_patch from ddtrace.appsec._iast.taint_sinks.header_injection import patch as header_injection_patch from ddtrace.appsec._iast.taint_sinks.insecure_cookie import patch as insecure_cookie_patch +from ddtrace.appsec._iast.taint_sinks.untrusted_serialization import patch as unstrusted_serialization_patch from ddtrace.appsec._iast.taint_sinks.unvalidated_redirect import patch as unvalidated_redirect_patch from ddtrace.appsec._iast.taint_sinks.weak_cipher import patch as weak_cipher_patch from ddtrace.appsec._iast.taint_sinks.weak_hash import patch as weak_hash_patch @@ -66,9 +66,9 @@ def patch_iast(): # sink points if asm_config._iast_sink_points_enabled: code_injection_patch() - command_injection_patch() header_injection_patch() insecure_cookie_patch() + unstrusted_serialization_patch() unvalidated_redirect_patch() weak_cipher_patch() weak_hash_patch() diff --git a/ddtrace/appsec/_iast/taint_sinks/_base.py b/ddtrace/appsec/_iast/taint_sinks/_base.py index d1f9a199abd..c82186e63fd 100644 --- a/ddtrace/appsec/_iast/taint_sinks/_base.py +++ b/ddtrace/appsec/_iast/taint_sinks/_base.py @@ -132,12 +132,19 @@ def _compute_file_line(cls) -> Tuple[Optional[str], Optional[int], Optional[str] @staticmethod def _rel_path(file_name: str) -> str: - if file_name.startswith(PURELIB_PATH): - return os.path.relpath(file_name, start=PURELIB_PATH) - if file_name.startswith(STDLIB_PATH): - return os.path.relpath(file_name, start=STDLIB_PATH) - if file_name.startswith(CWD): - return os.path.relpath(file_name, start=CWD) + file_name_norm = file_name.replace("\\", "/") + if file_name_norm.startswith(PURELIB_PATH): + return os.path.relpath(file_name_norm, start=PURELIB_PATH) + + if file_name_norm.startswith(STDLIB_PATH): + return os.path.relpath(file_name_norm, start=STDLIB_PATH) + if file_name_norm.startswith(CWD): + return os.path.relpath(file_name_norm, start=CWD) + # If the path contains site-packages anywhere, return 'site-packages/' + # Normalize separators to forward slashes for consistency + if (idx := file_name_norm.find("/site-packages/")) != -1: + print(f"file_name_norm({idx}): {file_name_norm}") + return file_name_norm[idx:] return "" @classmethod @@ -155,7 +162,7 @@ def _create_evidence_and_report( ): if isinstance(evidence_value, IAST.TEXT_TYPES): if isinstance(evidence_value, (bytes, bytearray)): - evidence_value = evidence_value.decode("utf-8") + evidence_value = evidence_value.decode("utf-8", "ignore") evidence = Evidence(value=evidence_value, dialect=dialect) else: log.debug("Unexpected evidence_value type: %s", type(evidence_value)) diff --git a/ddtrace/appsec/_iast/taint_sinks/ast_taint.py b/ddtrace/appsec/_iast/taint_sinks/ast_taint.py index dc4f09cfbbf..4eef9df4edb 100644 --- a/ddtrace/appsec/_iast/taint_sinks/ast_taint.py +++ b/ddtrace/appsec/_iast/taint_sinks/ast_taint.py @@ -5,9 +5,13 @@ from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled from ddtrace.appsec._iast._metrics import _set_metric_iast_executed_sink from ddtrace.appsec._iast._span_metrics import increment_iast_span_metric +from ddtrace.appsec._iast.constants import DEFAULT_COMMAND_INJECTION_FUNCTIONS from ddtrace.appsec._iast.constants import DEFAULT_PATH_TRAVERSAL_FUNCTIONS +from ddtrace.appsec._iast.constants import DEFAULT_SSRF_FUNCTIONS from ddtrace.appsec._iast.constants import DEFAULT_WEAK_RANDOMNESS_FUNCTIONS +from ddtrace.appsec._iast.taint_sinks.command_injection import _iast_report_cmdi from ddtrace.appsec._iast.taint_sinks.path_traversal import check_and_report_path_traversal +from ddtrace.appsec._iast.taint_sinks.ssrf import _iast_report_ssrf from ddtrace.appsec._iast.taint_sinks.weak_randomness import WeakRandomness @@ -30,6 +34,12 @@ def ast_function( if flag_added_args > 0: args = args[flag_added_args:] + # print(f"func! {func}") + # if hasattr(func, "__module__"): + # print(f"func_name: {func_name}, module: {func.__module__}") + # print(DEFAULT_SSRF_FUNCTIONS.get(func.__module__)) + # print(func_name in DEFAULT_SSRF_FUNCTIONS.get(func.__module__, "")) + if ( instance.__class__.__module__ == "random" and cls_name == "Random" @@ -44,7 +54,22 @@ def ast_function( # Report Telemetry Metrics _set_metric_iast_executed_sink(WeakRandomness.vulnerability_type) - elif hasattr(func, "__module__") and DEFAULT_PATH_TRAVERSAL_FUNCTIONS.get(func.__module__): - if func_name in DEFAULT_PATH_TRAVERSAL_FUNCTIONS[func.__module__]: - check_and_report_path_traversal(*args, **kwargs) + elif ( + hasattr(func, "__module__") + and DEFAULT_PATH_TRAVERSAL_FUNCTIONS.get(func.__module__) + and func_name in DEFAULT_PATH_TRAVERSAL_FUNCTIONS[func.__module__] + ): + check_and_report_path_traversal(*args, **kwargs) + elif ( + hasattr(func, "__module__") + and DEFAULT_COMMAND_INJECTION_FUNCTIONS.get(func.__module__) + and func_name in DEFAULT_COMMAND_INJECTION_FUNCTIONS[func.__module__] + ): + _iast_report_cmdi(func_name, *args, **kwargs) + elif ( + hasattr(func, "__module__") + and DEFAULT_SSRF_FUNCTIONS.get(func.__module__) + and func_name in DEFAULT_SSRF_FUNCTIONS[func.__module__] + ): + _iast_report_ssrf(func_name, func.__module__, *args, **kwargs) return func(*args, **kwargs) diff --git a/ddtrace/appsec/_iast/taint_sinks/code_injection.py b/ddtrace/appsec/_iast/taint_sinks/code_injection.py index cb8e05908d0..d6d6a2acc54 100644 --- a/ddtrace/appsec/_iast/taint_sinks/code_injection.py +++ b/ddtrace/appsec/_iast/taint_sinks/code_injection.py @@ -120,5 +120,5 @@ def _iast_report_code_injection(code_string: Text): # Report Telemetry Metrics _set_metric_iast_executed_sink(CodeInjection.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_report_code_injection. {e}") + iast_error("propagation::sink_point::Error in _iast_report_code_injection", e) return reported diff --git a/ddtrace/appsec/_iast/taint_sinks/command_injection.py b/ddtrace/appsec/_iast/taint_sinks/command_injection.py index 6bf99aaec98..83dc6641565 100644 --- a/ddtrace/appsec/_iast/taint_sinks/command_injection.py +++ b/ddtrace/appsec/_iast/taint_sinks/command_injection.py @@ -1,6 +1,3 @@ -from typing import List -from typing import Union - from ddtrace.appsec._constants import IAST from ddtrace.appsec._constants import IAST_SPAN_TAGS from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled @@ -9,74 +6,54 @@ from ddtrace.appsec._iast._span_metrics import increment_iast_span_metric from ddtrace.appsec._iast._taint_tracking import VulnerabilityType from ddtrace.appsec._iast.constants import VULN_CMDI -import ddtrace.contrib.internal.subprocess.patch as subprocess_patch -from ddtrace.internal.logger import get_logger -from ddtrace.internal.module import ModuleWatchdog -from ddtrace.settings.asm import config as asm_config from .._logs import iast_error -from .._logs import iast_propagation_sink_point_debug_log from ._base import VulnerabilityBase -log = get_logger(__name__) - - -def get_version() -> str: - return "" - - -_IAST_CMDI = "iast_cmdi" -_IS_PATCHED = False - +class CommandInjection(VulnerabilityBase): + vulnerability_type = VULN_CMDI + secure_mark = VulnerabilityType.COMMAND_INJECTION -def patch(): - global _IS_PATCHED - if _IS_PATCHED and not asm_config._iast_is_testing: - return - if not asm_config._iast_enabled: - return +IS_REPORTED_INTRUMENTED_SINK_METRIC = False - _IS_PATCHED = True - @ModuleWatchdog.after_module_imported("subprocess") - def _(module): - subprocess_patch.patch() - subprocess_patch.add_str_callback(_IAST_CMDI, _iast_report_cmdi) - subprocess_patch.add_lst_callback(_IAST_CMDI, _iast_report_cmdi) +def _iast_report_cmdi(func_name, *args, **kwargs) -> None: + global IS_REPORTED_INTRUMENTED_SINK_METRIC + if not IS_REPORTED_INTRUMENTED_SINK_METRIC: _set_metric_iast_instrumented_sink(VULN_CMDI) + IS_REPORTED_INTRUMENTED_SINK_METRIC = True - -def unpatch() -> None: - subprocess_patch.del_str_callback(_IAST_CMDI) - subprocess_patch.del_lst_callback(_IAST_CMDI) - - -class CommandInjection(VulnerabilityBase): - vulnerability_type = VULN_CMDI - secure_mark = VulnerabilityType.COMMAND_INJECTION - - -def _iast_report_cmdi(shell_args: Union[str, List[str]]) -> None: report_cmdi = "" - + if len(args) == 0: + shell_args = kwargs.get("args", []) + elif isinstance(args[0], (list, tuple)): + shell_args = args[0] + else: + shell_args = args try: if is_iast_request_enabled(): if CommandInjection.has_quota(): - iast_propagation_sink_point_debug_log("Check command injection sink point") from .._taint_tracking.aspects import join_aspect - + from .._taint_tracking.aspects import str_aspect + + if "spawn" in func_name: + shell_args = list(shell_args[1:]) + if isinstance(shell_args[1], (list, tuple)): + shell_args[1] = join_aspect( + " ".join, 1, " ", [str_aspect(str, 1, arg) for arg in shell_args[1]] + ) if isinstance(shell_args, (list, tuple)): for arg in shell_args: if CommandInjection.is_tainted_pyobject(arg): - report_cmdi = join_aspect(" ".join, 1, " ", shell_args) + str_shell_args = [str_aspect(str, 1, arg) for arg in shell_args] + report_cmdi = join_aspect(" ".join, 1, " ", str_shell_args) break elif CommandInjection.is_tainted_pyobject(shell_args): report_cmdi = shell_args if report_cmdi and isinstance(report_cmdi, IAST.TEXT_TYPES): - iast_propagation_sink_point_debug_log("Reporting command injection") CommandInjection.report(evidence_value=report_cmdi) # Reports Span Metrics @@ -84,4 +61,4 @@ def _iast_report_cmdi(shell_args: Union[str, List[str]]) -> None: # Report Telemetry Metrics _set_metric_iast_executed_sink(CommandInjection.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_report_cmdi. {e}") + iast_error("propagation::sink_point::Error in _iast_report_cmdi", e) diff --git a/ddtrace/appsec/_iast/taint_sinks/header_injection.py b/ddtrace/appsec/_iast/taint_sinks/header_injection.py index ecba7f1d8a0..7d4ab9acc26 100644 --- a/ddtrace/appsec/_iast/taint_sinks/header_injection.py +++ b/ddtrace/appsec/_iast/taint_sinks/header_injection.py @@ -192,7 +192,7 @@ def _iast_django_response(wrapped, instance, args, kwargs): elif hasattr(instance, "_store"): instance._store = HeaderInjectionDict(instance._store) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_django_response. {e}") + iast_error("propagation::sink_point::Error in _iast_django_response", e) class HeaderInjectionDict(dict): @@ -264,7 +264,7 @@ def _iast_report_header_injection(headers_args, check_header_injection=True, che # Report Telemetry Metrics _set_metric_iast_executed_sink(HeaderInjection.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_report_header_injection. {e}") + iast_error("propagation::sink_point::Error in _iast_report_header_injection", e) def _check_type_headers_and_report_header_injection( diff --git a/ddtrace/appsec/_iast/taint_sinks/insecure_cookie.py b/ddtrace/appsec/_iast/taint_sinks/insecure_cookie.py index 7aae9b430f3..9dd0e5f5022 100644 --- a/ddtrace/appsec/_iast/taint_sinks/insecure_cookie.py +++ b/ddtrace/appsec/_iast/taint_sinks/insecure_cookie.py @@ -141,5 +141,5 @@ def _iast_response_cookies(wrapped, instance, args, kwargs): cookie_key, kwargs.get("secure") is not True, kwargs.get("httponly") is not True, report_samesite ) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_response_cookies. {e}") + iast_error("propagation::sink_point::Error in _iast_response_cookies", e) return wrapped(*args, **kwargs) diff --git a/ddtrace/appsec/_iast/taint_sinks/path_traversal.py b/ddtrace/appsec/_iast/taint_sinks/path_traversal.py index 69669e6ed60..084523ec901 100644 --- a/ddtrace/appsec/_iast/taint_sinks/path_traversal.py +++ b/ddtrace/appsec/_iast/taint_sinks/path_traversal.py @@ -21,17 +21,18 @@ class PathTraversal(VulnerabilityBase): secure_mark = VulnerabilityType.PATH_TRAVERSAL -IS_REPORTED_INTRUMENTED_SINK = False +IS_REPORTED_INTRUMENTED_SINK_METRIC = False def check_and_report_path_traversal(*args: Any, **kwargs: Any) -> None: - global IS_REPORTED_INTRUMENTED_SINK - if not IS_REPORTED_INTRUMENTED_SINK: + global IS_REPORTED_INTRUMENTED_SINK_METRIC + if not IS_REPORTED_INTRUMENTED_SINK_METRIC: _set_metric_iast_instrumented_sink(VULN_PATH_TRAVERSAL) - IS_REPORTED_INTRUMENTED_SINK = True + IS_REPORTED_INTRUMENTED_SINK_METRIC = True + try: if is_iast_request_enabled(): - filename_arg = args[0] if args else kwargs.get("file", None) + filename_arg = args[0] if len(args) > 0 else kwargs.get("file", None) if ( isinstance(filename_arg, IAST.TEXT_TYPES) and PathTraversal.has_quota() @@ -44,4 +45,4 @@ def check_and_report_path_traversal(*args: Any, **kwargs: Any) -> None: # Report Telemetry Metrics _set_metric_iast_executed_sink(PathTraversal.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in check_and_report_path_traversal. {e}") + iast_error("propagation::sink_point::Error in check_and_report_path_traversal", e) diff --git a/ddtrace/appsec/_iast/taint_sinks/sql_injection.py b/ddtrace/appsec/_iast/taint_sinks/sql_injection.py index 35156ec0d52..762d580bb59 100644 --- a/ddtrace/appsec/_iast/taint_sinks/sql_injection.py +++ b/ddtrace/appsec/_iast/taint_sinks/sql_injection.py @@ -51,7 +51,7 @@ def _on_report_sqli(*args, **kwargs) -> bool: # Report Telemetry Metrics _set_metric_iast_executed_sink(SqlInjection.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in check_and_report_sqli. {e}") + iast_error("propagation::sink_point::Error in check_and_report_sqli", e) return reported diff --git a/ddtrace/appsec/_iast/taint_sinks/ssrf.py b/ddtrace/appsec/_iast/taint_sinks/ssrf.py index f9a252b9114..46a4a891406 100644 --- a/ddtrace/appsec/_iast/taint_sinks/ssrf.py +++ b/ddtrace/appsec/_iast/taint_sinks/ssrf.py @@ -1,11 +1,10 @@ -from typing import Callable - from ddtrace.appsec._constants import IAST from ddtrace.appsec._constants import IAST_SPAN_TAGS from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled from ddtrace.appsec._iast._logs import iast_error from ddtrace.appsec._iast._logs import iast_propagation_sink_point_debug_log from ddtrace.appsec._iast._metrics import _set_metric_iast_executed_sink +from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink from ddtrace.appsec._iast._span_metrics import increment_iast_span_metric from ddtrace.appsec._iast._taint_tracking import VulnerabilityType from ddtrace.appsec._iast._taint_tracking import get_ranges @@ -13,7 +12,6 @@ from ddtrace.appsec._iast.taint_sinks._base import VulnerabilityBase from ddtrace.internal.utils import ArgumentError from ddtrace.internal.utils import get_argument_value -from ddtrace.internal.utils.importlib import func_name class SSRF(VulnerabilityBase): @@ -22,16 +20,19 @@ class SSRF(VulnerabilityBase): _FUNC_TO_URL_ARGUMENT = { - "http.client.request": (1, "url"), - "requests.sessions.request": (1, "url"), - "urllib.request.urlopen": (0, "url"), - "urllib3._request_methods.request": (1, "url"), - "urllib3.request.request": (1, "url"), - "webbrowser.open": (0, "url"), + "requests.api": (0, "url"), + "urllib.request": (0, "url"), + "urllib3": (0, "url"), + "http.client": (1, "url"), + "urllib3._request_methods": (1, "url"), + "webbrowser": (0, "url"), } -def _iast_report_ssrf(func: Callable, *args, **kwargs): +IS_REPORTED_INTRUMENTED_SINK_METRIC = False + + +def _iast_report_ssrf(func_name: str, module_name, *args, **kwargs): """ Check and report potential SSRF (Server-Side Request Forgery) vulnerabilities in function calls. @@ -40,10 +41,16 @@ def _iast_report_ssrf(func: Callable, *args, **kwargs): URL fragments (parts after #) are handled specially - if all tainted parts are in the fragment, no vulnerability is reported. """ - func_key = func_name(func) - arg_pos, kwarg_name = _FUNC_TO_URL_ARGUMENT.get(func_key, (None, None)) + global IS_REPORTED_INTRUMENTED_SINK_METRIC + if not IS_REPORTED_INTRUMENTED_SINK_METRIC: + _set_metric_iast_instrumented_sink(VULN_SSRF) + IS_REPORTED_INTRUMENTED_SINK_METRIC = True + + arg_pos, kwarg_name = _FUNC_TO_URL_ARGUMENT.get(module_name, (None, None)) if arg_pos is None: - iast_propagation_sink_point_debug_log("%s not found in list of functions supported for SSRF", func_key) + iast_propagation_sink_point_debug_log( + f"{module_name}.{func_name} not found in list of functions supported for SSRF" + ) return try: @@ -51,7 +58,7 @@ def _iast_report_ssrf(func: Callable, *args, **kwargs): report_ssrf = get_argument_value(list(args), kwargs, arg_pos, kw) except ArgumentError: iast_propagation_sink_point_debug_log( - "Failed to get URL argument from _FUNC_TO_URL_ARGUMENT dict for function %s", func_key + f"Failed to get URL argument from _FUNC_TO_URL_ARGUMENT dict for function {module_name}.{func_name}" ) return if report_ssrf and isinstance(report_ssrf, IAST.TEXT_TYPES): @@ -73,4 +80,4 @@ def _iast_report_ssrf(func: Callable, *args, **kwargs): # Report Telemetry Metrics increment_iast_span_metric(IAST_SPAN_TAGS.TELEMETRY_EXECUTED_SINK, SSRF.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_report_ssrf. {e}") + iast_error("propagation::sink_point::Error in _iast_report_ssrf", e) diff --git a/ddtrace/appsec/_iast/taint_sinks/stacktrace_leak.py b/ddtrace/appsec/_iast/taint_sinks/stacktrace_leak.py index 97210b5af18..d2885c368dc 100644 --- a/ddtrace/appsec/_iast/taint_sinks/stacktrace_leak.py +++ b/ddtrace/appsec/_iast/taint_sinks/stacktrace_leak.py @@ -126,4 +126,4 @@ def iast_check_stacktrace_leak(content: str) -> None: else: set_report_stacktrace_later(evidence) except Exception as e: - iast_error(f"propagation::sink_point::Error in iast_check_stacktrace_leak. {e}") + iast_error("propagation::sink_point::Error in iast_check_stacktrace_leak", e) diff --git a/ddtrace/appsec/_iast/taint_sinks/untrusted_serialization.py b/ddtrace/appsec/_iast/taint_sinks/untrusted_serialization.py new file mode 100644 index 00000000000..b878663ecdf --- /dev/null +++ b/ddtrace/appsec/_iast/taint_sinks/untrusted_serialization.py @@ -0,0 +1,117 @@ +from typing import Text + +from ddtrace.appsec._constants import IAST +from ddtrace.appsec._constants import IAST_SPAN_TAGS +from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled +from ddtrace.appsec._iast._logs import iast_error +from ddtrace.appsec._iast._metrics import _set_metric_iast_executed_sink +from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink +from ddtrace.appsec._iast._patch_modules import WrapFunctonsForIAST +from ddtrace.appsec._iast._span_metrics import increment_iast_span_metric +from ddtrace.appsec._iast._taint_tracking import VulnerabilityType +from ddtrace.appsec._iast.constants import VULN_UNTRUSTED_SERIALIZATION +from ddtrace.appsec._iast.taint_sinks._base import VulnerabilityBase +from ddtrace.internal.logger import get_logger +from ddtrace.settings.asm import config as asm_config + + +log = get_logger(__name__) + + +class UntrustedSerialization(VulnerabilityBase): + vulnerability_type = VULN_UNTRUSTED_SERIALIZATION + secure_mark = VulnerabilityType.UNTRUSTED_SERIALIZATION + + +def get_version() -> Text: + return "" + + +_IS_PATCHED = False + +_MODULES = { + ("pickle", "load"), # maps to pickle._load/_pickle.load + ("pickle", "loads"), # maps to pickle.loads/_pickle.loads + ("pickle", "_load"), + ("pickle", "_loads"), + ("pickle", "_Unpickler.load"), + ("_pickle", "load"), + ("_pickle", "loads"), + ("_pickle", "Unpickler.load"), + ("dill", "load"), + ("dill", "loads"), + ("yaml", "load"), + ("yaml", "unsafe_load"), + ("yaml", "load_all"), + ("yaml", "unsafe_load_all"), + ("yaml", "full_load"), + ("yaml", "full_load_all"), +} + + +def patch(): + global _IS_PATCHED + if _IS_PATCHED and not asm_config._iast_is_testing: + return + + if not asm_config._iast_enabled: + return + + _IS_PATCHED = True + + iast_funcs = WrapFunctonsForIAST() + for module, function in _MODULES: + iast_funcs.wrap_function( + module, + function, + _wrap_serializers, + ) + + iast_funcs.patch() + + _set_metric_iast_instrumented_sink(VULN_UNTRUSTED_SERIALIZATION) + + +def _wrap_serializers(wrapped, instance, args, kwargs): + # YAML safe loader handling. If caller uses yaml.load with SafeLoader + # (either as second positional arg or via Loader kwarg), do not report. + if not _is_yaml_safe_load(args, kwargs): + _iast_report_untrusted_serializastion(kwargs.get("data", args[0] if len(args) > 0 else None)) + return wrapped(*args, **kwargs) + + +def _is_yaml_safe_load(args, kwargs): + """Return True when a yaml "safe" loader is explicitly provided. + + Detects yaml.load(..., SafeLoader) or yaml.load(..., Loader=SafeLoader) patterns. + The function imports yaml lazily to avoid hard dependency at import time. + """ + try: + import yaml + + loader_kw = kwargs.get("Loader") or kwargs.get("loader") + loader_pos = args[1] if len(args) > 1 else None + loader = loader_kw or loader_pos + return loader is not None and (loader is getattr(yaml, "SafeLoader", object())) + except Exception: + # If yaml is not importable or anything fails, do not treat as safe + return False + + +def _iast_report_untrusted_serializastion(code_string: Text): + try: + if is_iast_request_enabled(): + if ( + isinstance(code_string, IAST.TEXT_TYPES) + and UntrustedSerialization.has_quota() + and UntrustedSerialization.is_tainted_pyobject(code_string) + ): + UntrustedSerialization.report(evidence_value=code_string) + # Reports Span Metrics + increment_iast_span_metric( + IAST_SPAN_TAGS.TELEMETRY_EXECUTED_SINK, UntrustedSerialization.vulnerability_type + ) + # Report Telemetry Metrics + _set_metric_iast_executed_sink(UntrustedSerialization.vulnerability_type) + except Exception as e: + iast_error("propagation::sink_point::Error in _iast_report_untrusted_serializastion", e) diff --git a/ddtrace/appsec/_iast/taint_sinks/unvalidated_redirect.py b/ddtrace/appsec/_iast/taint_sinks/unvalidated_redirect.py index 19093704898..551e3123ab0 100644 --- a/ddtrace/appsec/_iast/taint_sinks/unvalidated_redirect.py +++ b/ddtrace/appsec/_iast/taint_sinks/unvalidated_redirect.py @@ -103,4 +103,4 @@ def _iast_report_unvalidated_redirect(headers): # Report Telemetry Metrics _set_metric_iast_executed_sink(UnvalidatedRedirect.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_report_unvalidated_redirect. {e}") + iast_error("propagation::sink_point::Error in _iast_report_unvalidated_redirect", e) diff --git a/ddtrace/appsec/_iast/taint_sinks/xss.py b/ddtrace/appsec/_iast/taint_sinks/xss.py index 1281a664d11..29e486ccd0c 100644 --- a/ddtrace/appsec/_iast/taint_sinks/xss.py +++ b/ddtrace/appsec/_iast/taint_sinks/xss.py @@ -3,7 +3,7 @@ from ddtrace.appsec._constants import IAST from ddtrace.appsec._constants import IAST_SPAN_TAGS from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled -from ddtrace.appsec._iast._logs import iast_error +from ddtrace.appsec._iast._logs import iast_propagation_sink_point_debug_log from ddtrace.appsec._iast._metrics import _set_metric_iast_executed_sink from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink from ddtrace.appsec._iast._patch_modules import WrapFunctonsForIAST @@ -106,4 +106,4 @@ def _iast_report_xss(code_string: Text): # Report Telemetry Metrics _set_metric_iast_executed_sink(XSS.vulnerability_type) except Exception as e: - iast_error(f"propagation::sink_point::Error in _iast_report_xss. {e}") + iast_propagation_sink_point_debug_log(f"propagation::sink_point::Error in _iast_report_xss. {e}") diff --git a/ddtrace/appsec/_metrics.py b/ddtrace/appsec/_metrics.py index 44c61ec22f5..0157be01aaf 100644 --- a/ddtrace/appsec/_metrics.py +++ b/ddtrace/appsec/_metrics.py @@ -31,7 +31,7 @@ class WARNING_TAGS(metaclass=_constants.Constant_Class): @deduplication -def _set_waf_error_log(msg: str, version: str, error_level: bool = True) -> None: +def _set_waf_error_log(msg: str, version: str, action: str, error_level: bool = True) -> None: """used for waf configuration errors""" try: log_tags = { @@ -47,6 +47,7 @@ def _set_waf_error_log(msg: str, version: str, error_level: bool = True) -> None tags = ( ("waf_version", ddwaf_version), ("event_rules_version", version or UNKNOWN_VERSION), + ("action", action), ) telemetry.telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.APPSEC, "waf.config_errors", 1, tags=tags) except Exception: @@ -80,7 +81,9 @@ def _set_waf_init_metric(info: DDWaf_info, success: bool): TELEMETRY_NAMESPACE.APPSEC, "waf.init", 1, tags=tags + (("success", bool_str[success]),) ) if not success: - telemetry.telemetry_writer.add_count_metric(TELEMETRY_NAMESPACE.APPSEC, "waf.config_errors", 1, tags=tags) + telemetry.telemetry_writer.add_count_metric( + TELEMETRY_NAMESPACE.APPSEC, "waf.config_errors", 1, tags=tags + (("action", "init"),) + ) except Exception: extra = {"product": "appsec", "exec_limit": 6, "more_info": ":waf:init"} logger.warning(WARNING_TAGS.TELEMETRY_METRICS, extra=extra, exc_info=True) diff --git a/ddtrace/appsec/_utils.py b/ddtrace/appsec/_utils.py index 0423c8126fa..4f27e29759c 100644 --- a/ddtrace/appsec/_utils.py +++ b/ddtrace/appsec/_utils.py @@ -14,7 +14,6 @@ from ddtrace.appsec._constants import API_SECURITY from ddtrace.appsec._constants import APPSEC -from ddtrace.appsec._constants import IAST from ddtrace.contrib.internal.trace_utils_base import _get_header_value_case_insensitive from ddtrace.internal._unpatched import unpatched_json_loads from ddtrace.internal.logger import get_logger @@ -344,18 +343,6 @@ def get_triggers(span) -> Any: return None -def get_security(span) -> Any: - if asm_config._use_metastruct_for_iast: - return span.get_struct_tag(IAST.STRUCT) - json_payload = span.get_tag(IAST.JSON) - if json_payload: - try: - return json.loads(json_payload) - except Exception: - log.debug("Failed to parse security", exc_info=True) - return None - - def add_context_log(logger: logging.Logger, msg: str, offset: int = 0) -> str: filename, line_number, function_name, _stack_info = logger.findCaller(False, 3 + offset) return f"{msg}[{filename}, line {line_number}, in {function_name}]" diff --git a/ddtrace/contrib/integration_registry/registry.yaml b/ddtrace/contrib/integration_registry/registry.yaml index 2abb67f43a4..d61219c9496 100644 --- a/ddtrace/contrib/integration_registry/registry.yaml +++ b/ddtrace/contrib/integration_registry/registry.yaml @@ -17,7 +17,7 @@ integrations: tested_versions_by_dependency: aiohttp: min: 3.7.4.post0 - max: 3.12.11 + max: 3.12.15 - integration_name: aiohttp_jinja2 is_external_package: true @@ -393,6 +393,16 @@ integrations: min: 20.12.1 max: 24.11.1 +- integration_name: google_adk + is_external_package: true + is_tested: true + dependency_names: + - google-adk + tested_versions_by_dependency: + google-adk: + min: 1.0.0 + max: 1.14.1 + - integration_name: google_genai is_external_package: true is_tested: true @@ -567,7 +577,7 @@ integrations: tested_versions_by_dependency: mariadb: min: 1.0.11 - max: 1.1.12 + max: 1.1.13 - integration_name: mcp is_external_package: true diff --git a/ddtrace/contrib/internal/aiobotocore/patch.py b/ddtrace/contrib/internal/aiobotocore/patch.py index 1f629d05076..b4576f695a6 100644 --- a/ddtrace/contrib/internal/aiobotocore/patch.py +++ b/ddtrace/contrib/internal/aiobotocore/patch.py @@ -160,6 +160,10 @@ async def _wrapped_api_call(original_func, instance, args, kwargs): "aws.region": region_name, "region": region_name, } + + if region_name: + meta["aws.partition"] = aws.get_aws_partition(region_name) + span.set_tags(meta) result = await original_func(*args, **kwargs) diff --git a/ddtrace/contrib/internal/anthropic/utils.py b/ddtrace/contrib/internal/anthropic/utils.py deleted file mode 100644 index 0721f33529d..00000000000 --- a/ddtrace/contrib/internal/anthropic/utils.py +++ /dev/null @@ -1,4 +0,0 @@ -from ddtrace.internal.logger import get_logger - - -log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/azure_functions/patch.py b/ddtrace/contrib/internal/azure_functions/patch.py index c51b5a3b59d..5be8fd8917e 100644 --- a/ddtrace/contrib/internal/azure_functions/patch.py +++ b/ddtrace/contrib/internal/azure_functions/patch.py @@ -1,4 +1,3 @@ -import os from typing import Dict import azure.functions as azure_functions @@ -10,9 +9,10 @@ from ddtrace.ext import SpanKind from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool +from ddtrace.propagation.http import HTTPPropagator +from ddtrace.settings._config import _get_config from .utils import create_context -from .utils import message_list_has_single_context from .utils import wrap_function_with_tracing @@ -20,7 +20,7 @@ "azure_functions", dict( _default_service=schematize_service_name("azure_functions"), - distributed_tracing=asbool(os.getenv("DD_AZURE_FUNCTIONS_DISTRIBUTED_TRACING", default=True)), + distributed_tracing=asbool(_get_config("DD_AZURE_FUNCTIONS_DISTRIBUTED_TRACING", default=True)), ), ) @@ -97,35 +97,58 @@ def _wrap_service_bus_trigger(pin, func, function_name, trigger_arg_name, trigge def context_factory(kwargs): resource_name = f"{trigger_type} {function_name}" - msg = kwargs.get(trigger_arg_name) - - # Reparent trace if single message or list of messages all with same context - if isinstance(msg, azure_functions.ServiceBusMessage): - application_properties = msg.application_properties - elif ( - isinstance(msg, list) - and msg - and isinstance(msg[0], azure_functions.ServiceBusMessage) - and message_list_has_single_context(msg) - ): - application_properties = msg[0].application_properties - else: - application_properties = None - - return create_context("azure.functions.patched_service_bus", pin, resource_name, headers=application_properties) + return create_context("azure.functions.patched_service_bus", pin, resource_name) def pre_dispatch(ctx, kwargs): - msg = kwargs.get(trigger_arg_name) + entity_name = trigger_details.get("topicName") or trigger_details.get("queueName") + cardinality = trigger_details.get("cardinality") + msg_arg_value = kwargs.get(trigger_arg_name) - if isinstance(msg, azure_functions.ServiceBusMessage): - message_id = msg.message_id + if ( + cardinality == azure_functions.Cardinality.MANY + and isinstance(msg_arg_value, list) + and isinstance(msg_arg_value[0], azure_functions.ServiceBusMessage) + ): + batch_count = str(len(msg_arg_value)) + fully_qualified_namespace = ( + getattr(msg_arg_value[0], "metadata", {}).get("Client", {}).get("FullyQualifiedNamespace") + ) + message_id = None + + if config.azure_functions.distributed_tracing: + for message in msg_arg_value: + parent_context = HTTPPropagator.extract(message.application_properties) + if parent_context.trace_id is not None and parent_context.span_id is not None: + ctx.span.link_span(parent_context) + elif isinstance(msg_arg_value, azure_functions.ServiceBusMessage): + batch_count = None + fully_qualified_namespace = ( + getattr(msg_arg_value, "metadata", {}).get("Client", {}).get("FullyQualifiedNamespace") + ) + message_id = msg_arg_value.message_id + + if config.azure_functions.distributed_tracing: + parent_context = HTTPPropagator.extract(msg_arg_value.application_properties) + if parent_context.trace_id is not None and parent_context.span_id is not None: + ctx.span.link_span(parent_context) else: + batch_count = None + fully_qualified_namespace = None message_id = None - entity_name = trigger_details.get("topicName") or trigger_details.get("queueName") return ( "azure.functions.service_bus_trigger_modifier", - (ctx, config.azure_functions, function_name, trigger_type, SpanKind.CONSUMER, entity_name, message_id), + ( + ctx, + config.azure_functions, + function_name, + trigger_type, + SpanKind.CONSUMER, + entity_name, + fully_qualified_namespace, + message_id, + batch_count, + ), ) return wrap_function_with_tracing(func, context_factory, pre_dispatch=pre_dispatch) diff --git a/ddtrace/contrib/internal/azure_functions/utils.py b/ddtrace/contrib/internal/azure_functions/utils.py index 4ef21fa40b7..ea1f74b33bd 100644 --- a/ddtrace/contrib/internal/azure_functions/utils.py +++ b/ddtrace/contrib/internal/azure_functions/utils.py @@ -1,15 +1,11 @@ import functools import inspect -from typing import List - -import azure.functions as azure_functions from ddtrace import config from ddtrace.contrib.internal.trace_utils import int_service from ddtrace.ext import SpanTypes from ddtrace.internal import core from ddtrace.internal.schema import schematize_cloud_faas_operation -from ddtrace.propagation.http import HTTPPropagator def create_context(context_name, pin, resource=None, headers=None): @@ -63,13 +59,3 @@ def wrapper(*args, **kwargs): core.dispatch(*post_dispatch(ctx, res)) return wrapper - - -def message_list_has_single_context(msg_list: List[azure_functions.ServiceBusMessage]): - first_context = HTTPPropagator.extract(msg_list[0].application_properties) - for message in msg_list[1:]: - context = HTTPPropagator.extract(message.application_properties) - if first_context != context: - return False - - return True diff --git a/ddtrace/contrib/internal/azure_servicebus/__init__.py b/ddtrace/contrib/internal/azure_servicebus/__init__.py index 851bd0cb984..ee86e32b84d 100644 --- a/ddtrace/contrib/internal/azure_servicebus/__init__.py +++ b/ddtrace/contrib/internal/azure_servicebus/__init__.py @@ -29,5 +29,16 @@ This option can also be set with the ``DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING`` environment variable. + Default: ``True`` + +.. py:data:: ddtrace.config.azure_servicebus['batch_links'] + + Add create spans and span links for service bus messages added to and sent in a batch. + + This option can also be set with the ``DD_TRACE_AZURE_SERVICEBUS_BATCH_LINKS_ENABLED`` + environment variable. + + Distributed tracing must also be enabled for span links to be added. + Default: ``True`` """ diff --git a/ddtrace/contrib/internal/azure_servicebus/patch.py b/ddtrace/contrib/internal/azure_servicebus/patch.py index b3367ebbcfd..b0007371d0b 100644 --- a/ddtrace/contrib/internal/azure_servicebus/patch.py +++ b/ddtrace/contrib/internal/azure_servicebus/patch.py @@ -1,4 +1,3 @@ -import os from typing import Dict import azure.servicebus as azure_servicebus @@ -8,20 +7,21 @@ from ddtrace import config from ddtrace._trace.pin import Pin from ddtrace.contrib.internal.trace_utils import unwrap as _u -from ddtrace.internal import core +from ddtrace.ext import azure_servicebus as azure_servicebusx from ddtrace.internal.schema import schematize_service_name -from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool +from ddtrace.settings._config import _get_config from .utils import create_context -from .utils import handle_service_bus_message_arg +from .utils import dispatch_message_modifier config._add( "azure_servicebus", dict( _default_service=schematize_service_name("azure_servicebus"), - distributed_tracing=asbool(os.getenv("DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING", default=True)), + distributed_tracing=asbool(_get_config("DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING", default=True)), + batch_links=asbool(_get_config("DD_TRACE_AZURE_SERVICEBUS_BATCH_LINKS_ENABLED", default=True)), ), ) @@ -49,31 +49,74 @@ def _patch(azure_servicebus_module): azure_servicebus_module._datadog_patch = True if azure_servicebus_module.__name__ == "azure.servicebus.aio": - Pin().onto(azure_servicebus_aio.ServiceBusSender) + Pin().onto(azure_servicebus_module.ServiceBusSender) + _w("azure.servicebus.aio", "ServiceBusSender.create_message_batch", _patched_create_message_batch_async) _w("azure.servicebus.aio", "ServiceBusSender.send_messages", _patched_send_messages_async) _w("azure.servicebus.aio", "ServiceBusSender.schedule_messages", _patched_schedule_messages_async) else: Pin().onto(azure_servicebus_module.ServiceBusSender) + Pin().onto(azure_servicebus_module.ServiceBusMessageBatch) + _w("azure.servicebus", "ServiceBusMessageBatch.add_message", _patched_add_message) + _w("azure.servicebus", "ServiceBusSender.create_message_batch", _patched_create_message_batch) _w("azure.servicebus", "ServiceBusSender.send_messages", _patched_send_messages) _w("azure.servicebus", "ServiceBusSender.schedule_messages", _patched_schedule_messages) +def _patched_create_message_batch(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled() or not config.azure_servicebus.batch_links: + return wrapped(*args, **kwargs) + + batch = wrapped(*args, **kwargs) + + batch._dd_entity_name = instance.entity_name + batch._dd_fully_qualified_namespace = instance.fully_qualified_namespace + + return batch + + +async def _patched_create_message_batch_async(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled() or not config.azure_servicebus.batch_links: + return await wrapped(*args, **kwargs) + + batch = await wrapped(*args, **kwargs) + + batch._dd_entity_name = instance.entity_name + batch._dd_fully_qualified_namespace = instance.fully_qualified_namespace + + return batch + + +def _patched_add_message(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled() or not config.azure_servicebus.batch_links: + return wrapped(*args, **kwargs) + + resource_name = instance._dd_entity_name + fully_qualified_namespace = instance._dd_fully_qualified_namespace + operation_name = f"{azure_servicebusx.CLOUD}.{azure_servicebusx.SERVICE}.{azure_servicebusx.CREATE}" + + with create_context("azure.servicebus.patched_producer_batch", pin, operation_name, resource_name) as ctx, ctx.span: + dispatch_message_modifier( + ctx, args, kwargs, azure_servicebusx.CREATE, resource_name, fully_qualified_namespace, "message" + ) + return wrapped(*args, **kwargs) + + def _patched_send_messages(wrapped, instance, args, kwargs): pin = Pin.get_from(instance) if not pin or not pin.enabled(): return wrapped(*args, **kwargs) resource_name = instance.entity_name + fully_qualified_namespace = instance.fully_qualified_namespace + operation_name = f"{azure_servicebusx.CLOUD}.{azure_servicebusx.SERVICE}.{azure_servicebusx.SEND}" - with create_context("azure.servicebus.patched_producer", pin, resource_name) as ctx, ctx.span: - if config.azure_servicebus.distributed_tracing: - message_arg_value = get_argument_value(args, kwargs, 0, "message", True) - handle_service_bus_message_arg(ctx.span, message_arg_value) - core.dispatch( - "azure.servicebus.send_message_modifier", - (ctx, config.azure_servicebus, resource_name, instance.fully_qualified_namespace), + with create_context("azure.servicebus.patched_producer_send", pin, operation_name, resource_name) as ctx, ctx.span: + dispatch_message_modifier( + ctx, args, kwargs, azure_servicebusx.SEND, resource_name, fully_qualified_namespace, "message" ) - return wrapped(*args, **kwargs) @@ -83,16 +126,13 @@ async def _patched_send_messages_async(wrapped, instance, args, kwargs): return await wrapped(*args, **kwargs) resource_name = instance.entity_name + fully_qualified_namespace = instance.fully_qualified_namespace + operation_name = f"{azure_servicebusx.CLOUD}.{azure_servicebusx.SERVICE}.{azure_servicebusx.SEND}" - with create_context("azure.servicebus.patched_producer", pin, resource_name) as ctx, ctx.span: - if config.azure_servicebus.distributed_tracing: - message_arg_value = get_argument_value(args, kwargs, 0, "message", True) - handle_service_bus_message_arg(ctx.span, message_arg_value) - core.dispatch( - "azure.servicebus.send_message_modifier", - (ctx, config.azure_servicebus, resource_name, instance.fully_qualified_namespace), + with create_context("azure.servicebus.patched_producer_send", pin, operation_name, resource_name) as ctx, ctx.span: + dispatch_message_modifier( + ctx, args, kwargs, azure_servicebusx.SEND, resource_name, fully_qualified_namespace, "message" ) - return await wrapped(*args, **kwargs) @@ -102,16 +142,15 @@ def _patched_schedule_messages(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) resource_name = instance.entity_name - - with create_context("azure.servicebus.patched_producer", pin, resource_name) as ctx, ctx.span: - if config.azure_servicebus.distributed_tracing: - message_arg_value = get_argument_value(args, kwargs, 0, "messages", True) - handle_service_bus_message_arg(ctx.span, message_arg_value) - core.dispatch( - "azure.servicebus.send_message_modifier", - (ctx, config.azure_servicebus, resource_name, instance.fully_qualified_namespace), + fully_qualified_namespace = instance.fully_qualified_namespace + operation_name = f"{azure_servicebusx.CLOUD}.{azure_servicebusx.SERVICE}.{azure_servicebusx.SEND}" + + with create_context( + "azure.servicebus.patched_producer_schedule", pin, operation_name, resource_name + ) as ctx, ctx.span: + dispatch_message_modifier( + ctx, args, kwargs, azure_servicebusx.SEND, resource_name, fully_qualified_namespace, "messages" ) - return wrapped(*args, **kwargs) @@ -121,16 +160,15 @@ async def _patched_schedule_messages_async(wrapped, instance, args, kwargs): return await wrapped(*args, **kwargs) resource_name = instance.entity_name - - with create_context("azure.servicebus.patched_producer", pin, resource_name) as ctx, ctx.span: - if config.azure_servicebus.distributed_tracing: - message_arg_value = get_argument_value(args, kwargs, 0, "messages", True) - handle_service_bus_message_arg(ctx.span, message_arg_value) - core.dispatch( - "azure.servicebus.send_message_modifier", - (ctx, config.azure_servicebus, resource_name, instance.fully_qualified_namespace), + fully_qualified_namespace = instance.fully_qualified_namespace + operation_name = f"{azure_servicebusx.CLOUD}.{azure_servicebusx.SERVICE}.{azure_servicebusx.SEND}" + + with create_context( + "azure.servicebus.patched_producer_schedule", pin, operation_name, resource_name + ) as ctx, ctx.span: + dispatch_message_modifier( + ctx, args, kwargs, azure_servicebusx.SEND, resource_name, fully_qualified_namespace, "messages" ) - return await wrapped(*args, **kwargs) @@ -144,5 +182,9 @@ def _unpatch(azure_servicebus_module): return azure_servicebus_module._datadog_patch = False + _u(azure_servicebus_module.ServiceBusSender, "create_message_batch") _u(azure_servicebus_module.ServiceBusSender, "send_messages") _u(azure_servicebus_module.ServiceBusSender, "schedule_messages") + + if azure_servicebus_module.__name__ == "azure.servicebus": + _u(azure_servicebus_module.ServiceBusMessageBatch, "add_message") diff --git a/ddtrace/contrib/internal/azure_servicebus/utils.py b/ddtrace/contrib/internal/azure_servicebus/utils.py index 49a4d135064..270a94143be 100644 --- a/ddtrace/contrib/internal/azure_servicebus/utils.py +++ b/ddtrace/contrib/internal/azure_servicebus/utils.py @@ -1,20 +1,18 @@ +from typing import Union +from uuid import UUID + import azure.servicebus as azure_servicebus import azure.servicebus.amqp as azure_servicebus_amqp from ddtrace import config from ddtrace.contrib.trace_utils import ext_service from ddtrace.ext import SpanTypes -from ddtrace.ext import azure_servicebus as azure_servicebusx from ddtrace.internal import core -from ddtrace.internal.schema import schematize_cloud_messaging_operation -from ddtrace.internal.schema.span_attribute_schema import SpanDirection +from ddtrace.internal.utils import get_argument_value from ddtrace.propagation.http import HTTPPropagator -def create_context(context_name, pin, resource=None): - operation_name = schematize_cloud_messaging_operation( - azure_servicebusx.PRODUCE, cloud_provider="azure", cloud_service="servicebus", direction=SpanDirection.OUTBOUND - ) +def create_context(context_name, pin, operation_name, resource=None): return core.context_with_data( context_name, span_name=operation_name, @@ -25,9 +23,8 @@ def create_context(context_name, pin, resource=None): ) -def handle_service_bus_message_arg(span, message_arg_value): +def handle_service_bus_message_context(span, message_arg_value): if isinstance(message_arg_value, (azure_servicebus.ServiceBusMessage, azure_servicebus_amqp.AmqpAnnotatedMessage)): - message_arg_value.application_properties inject_context(span, message_arg_value) elif ( isinstance(message_arg_value, list) @@ -38,14 +35,20 @@ def handle_service_bus_message_arg(span, message_arg_value): ): for message in message_arg_value: inject_context(span, message) + elif isinstance(message_arg_value, azure_servicebus.ServiceBusMessageBatch): + for message in message_arg_value._messages: + parent_context = HTTPPropagator.extract(message._message.application_properties) + if parent_context.trace_id is not None and parent_context.span_id is not None: + span.link_span(parent_context) def inject_context(span, message): """ - message.application_properties is of type Dict[str | bytes, PrimitiveTypes] | Dict[str | bytes, Any] | None + ServiceBusMessage.application_properties is of type Dict[str | bytes, PrimitiveTypes] | None + AmqpAnnotatedMessage.application_properties is of type Dict[str | bytes, Any] | None while HTTPPropagator.inject expects type of Dict[str, str]. - Inject the context into an empty dictionary and merge it with message.application_properties + Inject the context into an empty dictionary and merge it with application_properties to preserve the original type. """ inject_carrier = {} @@ -56,3 +59,51 @@ def inject_context(span, message): message.application_properties = {} message.application_properties.update(inject_carrier) + + +def handle_service_bus_message_attributes(message_arg_value): + if isinstance(message_arg_value, azure_servicebus.ServiceBusMessage): + batch_count = None + message_id = message_arg_value.message_id + elif isinstance(message_arg_value, azure_servicebus_amqp.AmqpAnnotatedMessage): + batch_count = None + message_id_raw: Union[str, bytes, UUID, None] = getattr(message_arg_value.properties, "message_id", None) + + # stringify bytes/UUID, strip whitespace in strings, and map empty strings to None + if message_id_raw: + message_id = str(message_id_raw).strip() or None + else: + message_id = None + elif isinstance(message_arg_value, azure_servicebus.ServiceBusMessageBatch): + batch_count = str(len(message_arg_value._messages)) + message_id = None + elif isinstance(message_arg_value, list): + batch_count = str(len(message_arg_value)) + message_id = None + else: + message_id = None + batch_count = None + return message_id, batch_count + + +def dispatch_message_modifier( + ctx, args, kwargs, message_operation, resource_name, fully_qualified_namespace, message_arg +): + message_arg_value = get_argument_value(args, kwargs, 0, message_arg, True) + message_id, batch_count = handle_service_bus_message_attributes(message_arg_value) + + if config.azure_servicebus.distributed_tracing: + handle_service_bus_message_context(ctx.span, message_arg_value) + + core.dispatch( + "azure.servicebus.message_modifier", + ( + ctx, + config.azure_servicebus, + message_operation, + resource_name, + fully_qualified_namespace, + message_id, + batch_count, + ), + ) diff --git a/ddtrace/contrib/internal/boto/patch.py b/ddtrace/contrib/internal/boto/patch.py index 766a53da0c5..59a7153edf6 100644 --- a/ddtrace/contrib/internal/boto/patch.py +++ b/ddtrace/contrib/internal/boto/patch.py @@ -124,6 +124,7 @@ def patched_query_request(original_func, instance, args, kwargs): if region_name: meta[aws.REGION] = region_name meta[aws.AWSREGION] = region_name + meta[aws.PARTITION] = aws.get_aws_partition(region_name) if in_aws_lambda(): # Derive the peer hostname now that we have both service and region. @@ -192,6 +193,7 @@ def patched_auth_request(original_func, instance, args, kwargs): if region_name: meta[aws.REGION] = region_name meta[aws.AWSREGION] = region_name + meta[aws.PARTITION] = aws.get_aws_partition(region_name) if in_aws_lambda(): # Derive the peer hostname diff --git a/ddtrace/contrib/internal/botocore/services/bedrock_agents.py b/ddtrace/contrib/internal/botocore/services/bedrock_agents.py index e727dd6dd06..e2291a0ed8c 100644 --- a/ddtrace/contrib/internal/botocore/services/bedrock_agents.py +++ b/ddtrace/contrib/internal/botocore/services/bedrock_agents.py @@ -38,8 +38,8 @@ def __iter__(self): response = _process_streamed_response_chunks(chunks) try: self._dd_integration.translate_bedrock_traces(traces, self._dd_span) - except Exception as e: - log.error("Error translating Bedrock traces: %s", e, exc_info=True) + except Exception: + log.error("Error translating Bedrock traces", exc_info=True) self._dd_integration.llmobs_set_tags(self._dd_span, self._args, self._kwargs, response, operation="agent") self._dd_span.finish() diff --git a/ddtrace/contrib/internal/django/__init__.py b/ddtrace/contrib/internal/django/__init__.py index 6810499bca2..195fa6242a5 100644 --- a/ddtrace/contrib/internal/django/__init__.py +++ b/ddtrace/contrib/internal/django/__init__.py @@ -85,6 +85,29 @@ Default: ``False`` +.. envvar:: DD_DJANGO_TRACING_MINIMAL + + Enables minimal tracing mode for performance-sensitive applications. When enabled, this disables + Django ORM, cache, and template instrumentation while keeping middleware instrumentation enabled. + This can significantly reduce overhead by removing Django-specific spans while preserving visibility + into the underlying database drivers, cache clients, and other integrations. + + This is equivalent to setting: + - ``DD_DJANGO_INSTRUMENT_TEMPLATES=false`` + - ``DD_DJANGO_INSTRUMENT_DATABASES=false`` + - ``DD_DJANGO_INSTRUMENT_CACHES=false`` + + For example, with ``DD_DJANGO_INSTRUMENT_DATABASES=false``, Django ORM query spans are disabled + but database driver spans (e.g., psycopg, MySQLdb) will still be created, providing visibility + into the actual database queries without the Django ORM overhead. + + Consider using this option if your application is performance-sensitive and the additional + Django-layer spans are not required for your observability needs. + + Default: ``False`` + + *New in version v3.15.0.* + .. py:data:: ddtrace.config.django['instrument_middleware'] Whether or not to instrument middleware. diff --git a/ddtrace/contrib/internal/django/database.py b/ddtrace/contrib/internal/django/database.py index 119fe8ef7b3..ab7b9045de7 100644 --- a/ddtrace/contrib/internal/django/database.py +++ b/ddtrace/contrib/internal/django/database.py @@ -12,6 +12,7 @@ import ddtrace from ddtrace import config +from ddtrace._trace.pin import Pin from ddtrace.contrib import dbapi from ddtrace.contrib.internal.trace_utils import _convert_to_string from ddtrace.ext import db @@ -24,7 +25,6 @@ from ddtrace.internal.wrapping import wrap from ddtrace.propagation._database_monitoring import _DBM_Propagator from ddtrace.settings.integration import IntegrationConfig -from ddtrace.trace import Pin log = get_logger(__name__) diff --git a/ddtrace/contrib/internal/django/patch.py b/ddtrace/contrib/internal/django/patch.py index 65548858433..8f5aec1eb16 100644 --- a/ddtrace/contrib/internal/django/patch.py +++ b/ddtrace/contrib/internal/django/patch.py @@ -39,6 +39,9 @@ log = get_logger(__name__) +# TODO[4.0]: Change this to True by default +DJANGO_TRACING_MINIMAL = asbool(_get_config("DD_DJANGO_TRACING_MINIMAL", default=False)) + config._add( "django", dict( @@ -49,10 +52,11 @@ trace_fetch_methods=asbool(os.getenv("DD_DJANGO_TRACE_FETCH_METHODS", default=False)), distributed_tracing_enabled=True, instrument_middleware=asbool(os.getenv("DD_DJANGO_INSTRUMENT_MIDDLEWARE", default=True)), - instrument_templates=asbool(os.getenv("DD_DJANGO_INSTRUMENT_TEMPLATES", default=True)), - instrument_databases=asbool(os.getenv("DD_DJANGO_INSTRUMENT_DATABASES", default=True)), + instrument_templates=asbool(os.getenv("DD_DJANGO_INSTRUMENT_TEMPLATES", default=not DJANGO_TRACING_MINIMAL)), + instrument_databases=asbool(os.getenv("DD_DJANGO_INSTRUMENT_DATABASES", default=not DJANGO_TRACING_MINIMAL)), + # TODO[4.0]: remove this option and make it the default behavior when databases are instrumented always_create_database_spans=asbool(os.getenv("DD_DJANGO_ALWAYS_CREATE_DATABASE_SPANS", default=True)), - instrument_caches=asbool(os.getenv("DD_DJANGO_INSTRUMENT_CACHES", default=True)), + instrument_caches=asbool(os.getenv("DD_DJANGO_INSTRUMENT_CACHES", default=not DJANGO_TRACING_MINIMAL)), trace_query_string=None, # Default to global config include_user_name=asm_config._django_include_user_name, include_user_email=asm_config._django_include_user_email, diff --git a/ddtrace/contrib/internal/flask/patch.py b/ddtrace/contrib/internal/flask/patch.py index c5a3d9651e2..f3a4264b23a 100644 --- a/ddtrace/contrib/internal/flask/patch.py +++ b/ddtrace/contrib/internal/flask/patch.py @@ -455,7 +455,12 @@ def _wrap(rule, endpoint=None, view_func=None, provide_automatic_options=None, * # should we do something special with these views? Change the name/resource? Add tags? core.dispatch("service_entrypoint.patch", (unwrap(view_func),)) wrapped_view = wrap_view(instance, view_func, name=endpoint, resource=rule) - for method in kwargs.get("methods", []): + methods = kwargs.get("methods", ["GET"]) + try: + methods = iter(methods) + except Exception: # nosec + methods = ["GET"] + for method in methods: endpoint_collection.add_endpoint(method, rule, operation_name="flask.request") return wrapped( rule, diff --git a/ddtrace/contrib/internal/google_adk/__init__.py b/ddtrace/contrib/internal/google_adk/__init__.py new file mode 100644 index 00000000000..d7e1663b13f --- /dev/null +++ b/ddtrace/contrib/internal/google_adk/__init__.py @@ -0,0 +1,29 @@ +""" +The Google ADK integration instruments the Google ADK Python SDK to create spans for Agent requests. + +All traces submitted from the Google ADK integration are tagged by: + +- ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. +- model used in the request. +- provider used in the request. + + +Enabling +~~~~~~~~ + +The Google ADK integration is enabled automatically when you use +:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. + +Global Configuration +~~~~~~~~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.google_adk["service"] + + The service name reported by default for Google ADK requests. + + Alternatively, you can set this option with the ``DD_SERVICE`` or ``DD_GOOGLE_ADK_SERVICE`` environment + variables. + + Default: ``DD_SERVICE`` + +""" diff --git a/ddtrace/contrib/internal/google_adk/patch.py b/ddtrace/contrib/internal/google_adk/patch.py new file mode 100644 index 00000000000..9c1b803d128 --- /dev/null +++ b/ddtrace/contrib/internal/google_adk/patch.py @@ -0,0 +1,244 @@ +import sys +from typing import Any +from typing import Dict +from typing import Union + +import google.adk as adk + +from ddtrace import config +from ddtrace._trace.pin import Pin +from ddtrace.contrib.internal.trace_utils import check_module_path +from ddtrace.contrib.trace_utils import unwrap +from ddtrace.contrib.trace_utils import with_traced_module +from ddtrace.contrib.trace_utils import wrap +from ddtrace.internal.logger import get_logger +from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._integrations import GoogleAdkIntegration +from ddtrace.llmobs._integrations.google_utils import extract_provider_and_model_name + + +logger = get_logger(__name__) + +config._add("google_adk", {}) + + +def _supported_versions() -> Dict[str, str]: + return {"google.adk": ">=1.0.0"} + + +def get_version() -> str: + return getattr(adk, "__version__", "") + + +@with_traced_module +def _traced_agent_run_async(adk, pin, wrapped, instance, args, kwargs): + """Trace the main execution of an agent (async generator).""" + integration: GoogleAdkIntegration = adk._datadog_integration + provider_name, model_name = extract_provider_and_model_name(instance=instance.agent.model, model_name_attr="model") + + span = integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, wrapped.__name__), + provider=provider_name, + model=model_name, + kind="agent", + submit_to_llmobs=True, + **kwargs, + ) + + try: + agen = wrapped(*args, **kwargs) + except Exception: + span.set_exc_info(*sys.exc_info()) + span.finish() + raise + + async def _generator(): + response_events = [] + try: + async for event in agen: + response_events.append(event) + yield event + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + kwargs["instance"] = instance.agent + integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=response_events, operation="agent") + span.finish() + del kwargs["instance"] + + return _generator() + + +@with_traced_module +async def _traced_functions_call_tool_async(adk, pin, wrapped, instance, args, kwargs): + integration: GoogleAdkIntegration = adk._datadog_integration + agent = extract_agent_from_tool_context(args, kwargs) + if agent is None: + logger.warning("Unable to trace google adk live tool call, could not extract agent from tool context.") + return wrapped(*args, **kwargs) + + provider_name, model_name = extract_provider_and_model_name(instance=agent.model, model_name_attr="model") + instance = instance or args[0] + + with integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, wrapped.__name__), + provider=provider_name, + model=model_name, + kind="tool", + submit_to_llmobs=True, + ) as span: + result = None + try: + result = await wrapped(*args, **kwargs) + return result + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + integration.llmobs_set_tags( + span, + args=args, + kwargs=kwargs, + response=result, + operation="tool", + ) + + +@with_traced_module +async def _traced_functions_call_tool_live(adk, pin, wrapped, instance, args, kwargs): + integration: GoogleAdkIntegration = adk._datadog_integration + agent = extract_agent_from_tool_context(args, kwargs) + if agent is None: + logger.warning("Unable to trace google adk live tool call, could not extract agent from tool context.") + agen = wrapped(*args, **kwargs) + async for item in agen: + yield item + + provider_name, model_name = extract_provider_and_model_name(instance=agent.model, model_name_attr="model") + + with integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, wrapped.__name__), + provider=provider_name, + model=model_name, + kind="tool", + submit_to_llmobs=True, + ) as span: + result = None + try: + agen = wrapped(*args, **kwargs) + async for item in agen: + yield item + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + integration.llmobs_set_tags( + span, + args=args, + kwargs=kwargs, + response=result, + operation="tool", + ) + + +@with_traced_module +def _traced_code_executor_execute_code(adk, pin, wrapped, instance, args, kwargs): + """Trace the execution of code by the agent (sync).""" + integration: GoogleAdkIntegration = adk._datadog_integration + invocation_context = get_argument_value(args, kwargs, 0, "invocation_context") + agent = getattr(getattr(invocation_context, "agent", None), "model", {}) + provider_name, model_name = extract_provider_and_model_name(instance=agent, model_name_attr="model") + + # Signature: execute_code(self, invocation_context, code_execution_input) + with integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, wrapped.__name__), + provider=provider_name, + model=model_name, + kind="code_execute", + submit_to_llmobs=True, + ) as span: + result = None + try: + result = wrapped(*args, **kwargs) + return result + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + integration.llmobs_set_tags( + span, + args=args, + kwargs=kwargs, + response=result, + operation="code_execute", + ) + + +def extract_agent_from_tool_context(args: Any, kwargs: Any) -> Union[str, None]: + tool_context = get_argument_value(args, kwargs, 2, "tool_context") + agent = None + if hasattr(tool_context, "_invocation_context") and hasattr(tool_context._invocation_context, "agent"): + agent = tool_context._invocation_context.agent + return agent + + +CODE_EXECUTOR_CLASSES = [ + "BuiltInCodeExecutor", # make an external llm tool call to use the llms built in code executor + "VertexAiCodeExecutor", + "UnsafeLocalCodeExecutor", + "ContainerCodeExecutor", # additional package dependendy +] + + +def patch(): + """Patch the `google.adk` library for tracing.""" + + if getattr(adk, "_datadog_patch", False): + return + + setattr(adk, "_datadog_patch", True) + Pin().onto(adk) + integration: GoogleAdkIntegration = GoogleAdkIntegration(integration_config=config.google_adk) + setattr(adk, "_datadog_integration", integration) + + # Agent entrypoints (async generators) + wrap("google.adk", "runners.Runner.run_async", _traced_agent_run_async(adk)) + wrap("google.adk", "runners.Runner.run_live", _traced_agent_run_async(adk)) + + # Tool execution (central dispatch) + wrap("google.adk", "flows.llm_flows.functions.__call_tool_async", _traced_functions_call_tool_async(adk)) + wrap("google.adk", "flows.llm_flows.functions.__call_tool_live", _traced_functions_call_tool_live(adk)) + + # Code executors + for code_executor in CODE_EXECUTOR_CLASSES: + if check_module_path(adk, f"code_executors.{code_executor}.execute_code"): + wrap( + "google.adk", + f"code_executors.{code_executor}.execute_code", + _traced_code_executor_execute_code(adk), + ) + + +def unpatch(): + """Unpatch the `google.adk` library.""" + if not hasattr(adk, "_datadog_patch") or not getattr(adk, "_datadog_patch"): + return + setattr(adk, "_datadog_patch", False) + + unwrap(adk.runners.Runner, "run_async") + unwrap(adk.runners.Runner, "run_live") + + unwrap(adk.flows.llm_flows.functions, "__call_tool_async") + unwrap(adk.flows.llm_flows.functions, "__call_tool_live") + + # Code executors + for code_executor in CODE_EXECUTOR_CLASSES: + if check_module_path(adk, f"code_executors.{code_executor}.execute_code"): + unwrap(getattr(adk.code_executors, code_executor), "execute_code") + + delattr(adk, "_datadog_integration") diff --git a/ddtrace/contrib/internal/httplib/patch.py b/ddtrace/contrib/internal/httplib/patch.py index 919f1847b53..33b0216c87a 100644 --- a/ddtrace/contrib/internal/httplib/patch.py +++ b/ddtrace/contrib/internal/httplib/patch.py @@ -87,7 +87,7 @@ def _call_asm_wrap(func, instance, *args, **kwargs): def _wrap_request(func, instance, args, kwargs): # Use any attached tracer if available, otherwise use the global tracer - if asm_config._iast_enabled or (asm_config._asm_enabled and asm_config._ep_enabled): + if asm_config._asm_enabled and asm_config._ep_enabled: func_to_call = functools.partial(_call_asm_wrap, func, instance) else: func_to_call = func diff --git a/ddtrace/contrib/internal/pytest/_plugin_v2.py b/ddtrace/contrib/internal/pytest/_plugin_v2.py index 0d21b554fbc..f9d3696096f 100644 --- a/ddtrace/contrib/internal/pytest/_plugin_v2.py +++ b/ddtrace/contrib/internal/pytest/_plugin_v2.py @@ -1,6 +1,5 @@ import os from pathlib import Path -import re import typing as t from _pytest.runner import runtestprotocol @@ -101,11 +100,12 @@ log = get_logger(__name__) -_NODEID_REGEX = re.compile("^((?P.*)/(?P[^/]*?))::(?P.*?)$") OUTCOME_QUARANTINED = "quarantined" DISABLED_BY_TEST_MANAGEMENT_REASON = "Flaky test is disabled by Datadog" INCOMPATIBLE_PLUGINS = ("flaky", "rerunfailures") +skipped_suites = set() + skip_pytest_runtest_protocol = False # Module-level variable to store the current test's coverage collector @@ -151,13 +151,27 @@ def _handle_itr_should_skip(item, test_id) -> bool: if hasattr(item.config, "workeroutput"): if "itr_skipped_count" not in item.config.workeroutput: item.config.workeroutput["itr_skipped_count"] = 0 - item.config.workeroutput["itr_skipped_count"] += 1 + if not is_suite_skipping_mode: + item.config.workeroutput["itr_skipped_count"] += 1 return True return False +def _handle_itr_xdist_skipped_suite(item, suite_id) -> bool: + if suite_id in skipped_suites: + log.debug("Suite is already skipped") + return False + + if hasattr(item.config, "workeroutput"): + if "itr_skipped_count" not in item.config.workeroutput: + item.config.workeroutput["itr_skipped_count"] = 0 + item.config.workeroutput["itr_skipped_count"] += 1 + skipped_suites.add(suite_id) + return True + + def _handle_test_management(item, test_id): """Add a user property to identify quarantined tests, and mark them for skipping if quarantine is enabled in skipping mode. @@ -605,6 +619,7 @@ def _pytest_runtest_protocol_post_yield(item, nextitem, coverage_collector): if next_test_id is None or next_test_id.parent_id != suite_id: if InternalTestSuite.is_itr_skippable(suite_id) and not InternalTestSuite.was_itr_forced_run(suite_id): InternalTestSuite.mark_itr_skipped(suite_id) + _handle_itr_xdist_skipped_suite(item, suite_id) else: _handle_coverage_dependencies(suite_id) InternalTestSuite.finish(suite_id) diff --git a/ddtrace/contrib/internal/requests/patch.py b/ddtrace/contrib/internal/requests/patch.py index dcfc2049438..8db2b85189d 100644 --- a/ddtrace/contrib/internal/requests/patch.py +++ b/ddtrace/contrib/internal/requests/patch.py @@ -54,12 +54,6 @@ def patch(): _w("requests", "Session.request", _wrap_request) Pin(_config=config.requests).onto(requests.Session) - if asm_config._iast_enabled: - from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink - from ddtrace.appsec._iast.constants import VULN_SSRF - - _set_metric_iast_instrumented_sink(VULN_SSRF) - def unpatch(): """Disable traced sessions""" diff --git a/ddtrace/contrib/internal/subprocess/patch.py b/ddtrace/contrib/internal/subprocess/patch.py index 8d954220f9a..29e5ba5b3b1 100644 --- a/ddtrace/contrib/internal/subprocess/patch.py +++ b/ddtrace/contrib/internal/subprocess/patch.py @@ -85,15 +85,14 @@ def del_lst_callback(name: str): def should_trace_subprocess(): - return not asm_config._bypass_instrumentation_for_waf and (asm_config._asm_enabled or asm_config._iast_enabled) + return not asm_config._bypass_instrumentation_for_waf and asm_config._asm_enabled def patch() -> List[str]: """Patch subprocess and os functions to enable security monitoring. This function instruments various subprocess and os functions to provide - security monitoring capabilities for AAP (Application Attack Protection) - and IAST (Interactive Application Security Testing). + security monitoring capabilities for AAP (Application Attack Protection). Note: Patching always occurs because AAP can be enabled dynamically via remote config. @@ -470,7 +469,7 @@ def _traced_ossystem(module, pin, wrapped, instance, args, kwargs): """Traced wrapper for os.system function. Note: - Only instruments when AAP or IAST is enabled and WAF bypass is not active. + Only instruments when AAP is enabled and WAF bypass is not active. Creates spans with shell command details, exit codes, and component tags. """ if should_trace_subprocess(): @@ -500,11 +499,11 @@ def _traced_fork(module, pin, wrapped, instance, args, kwargs): """Traced wrapper for os.fork function. Note: - Only instruments when AAP or IAST is enabled. + Only instruments when AAP is enabled. Creates spans with fork operation details. """ - if not (asm_config._asm_enabled or asm_config._iast_enabled): + if not asm_config._asm_enabled: return wrapped(*args, **kwargs) with pin.tracer.trace(COMMANDS.SPAN_NAME, resource="fork", span_type=SpanTypes.SYSTEM) as span: @@ -518,10 +517,10 @@ def _traced_osspawn(module, pin, wrapped, instance, args, kwargs): """Traced wrapper for os._spawnvef function (used by all os.spawn* variants). Note: - Only instruments when AAP or IAST is enabled. + Only instruments when AAP is enabled. Creates spans with spawn operation details and exit codes for P_WAIT mode. """ - if not (asm_config._asm_enabled or asm_config._iast_enabled): + if not asm_config._asm_enabled: return wrapped(*args, **kwargs) try: @@ -552,7 +551,7 @@ def _traced_subprocess_init(module, pin, wrapped, instance, args, kwargs): """Traced wrapper for subprocess.Popen.__init__ method. Note: - Only instruments when AAP or IAST is enabled and WAF bypass is not active. + Only instruments when AAP is enabled and WAF bypass is not active. Stores command details in context for later use by _traced_subprocess_wait. Creates a span that will be completed by the wait() method. """ @@ -594,7 +593,7 @@ def _traced_subprocess_wait(module, pin, wrapped, instance, args, kwargs): """Traced wrapper for subprocess.Popen.wait method. Note: - Only instruments when AAP or IAST is enabled and WAF bypass is not active. + Only instruments when AAP is enabled and WAF bypass is not active. Retrieves command details stored by _traced_subprocess_init and completes the span with execution results and exit code. """ diff --git a/ddtrace/contrib/internal/trace_utils.py b/ddtrace/contrib/internal/trace_utils.py index b0b66c2160c..55213a08bc0 100644 --- a/ddtrace/contrib/internal/trace_utils.py +++ b/ddtrace/contrib/internal/trace_utils.py @@ -646,3 +646,35 @@ def _convert_to_string(attr): else: return ensure_text(attr) return attr + + +def check_module_path(module, attr_path): + """ + Helper function to safely check if a nested attribute path exists on a module. + + Args: + module: The root module object + attr_path: Dot-separated path to the attribute (e.g., "flows.llm_flows.functions") + + Returns: + bool: True if the full path exists, False otherwise + + Example: + check_module_path(adk, "flows.llm_flows.functions.__call_tool_async") + check_module_path(adk, "agents.llm_agent.LlmAgent") + """ + if not module: + return False + + try: + current = module + for attr in attr_path.split("."): + if not hasattr(current, attr): + return False + current = getattr(current, attr) + + return True + except (ImportError, AttributeError): + # Some modules may raise ImportError when accessing attributes that require + # additional dependencies (e.g., ContainerCodeExecutor requiring Docker for google-adk and an extra pkg install) + return False diff --git a/ddtrace/contrib/internal/urllib/patch.py b/ddtrace/contrib/internal/urllib/patch.py index 725f723832f..a3a7a0d31f2 100644 --- a/ddtrace/contrib/internal/urllib/patch.py +++ b/ddtrace/contrib/internal/urllib/patch.py @@ -27,12 +27,6 @@ def patch(): _w("urllib.request", "urlopen", _wrap_open) - if asm_config._iast_enabled: - from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink - from ddtrace.appsec._iast.constants import VULN_SSRF - - _set_metric_iast_instrumented_sink(VULN_SSRF) - def unpatch(): """unpatch any previously patched modules""" diff --git a/ddtrace/contrib/internal/urllib3/patch.py b/ddtrace/contrib/internal/urllib3/patch.py index bb8748dd4e6..d4d2f9060e0 100644 --- a/ddtrace/contrib/internal/urllib3/patch.py +++ b/ddtrace/contrib/internal/urllib3/patch.py @@ -67,12 +67,6 @@ def patch(): _w("urllib3.request", "RequestMethods.request", _wrap_request) Pin().onto(urllib3.connectionpool.HTTPConnectionPool) - if asm_config._iast_enabled: - from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink - from ddtrace.appsec._iast.constants import VULN_SSRF - - _set_metric_iast_instrumented_sink(VULN_SSRF) - def unpatch(): """Disable trace for all urllib3 requests""" diff --git a/ddtrace/contrib/internal/webbrowser/patch.py b/ddtrace/contrib/internal/webbrowser/patch.py index c8a35239d92..973e8934127 100644 --- a/ddtrace/contrib/internal/webbrowser/patch.py +++ b/ddtrace/contrib/internal/webbrowser/patch.py @@ -27,12 +27,6 @@ def patch(): _w("webbrowser", "open", _wrap_open) - if asm_config._iast_enabled: - from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink - from ddtrace.appsec._iast.constants import VULN_SSRF - - _set_metric_iast_instrumented_sink(VULN_SSRF) - def unpatch(): """unpatch any previously patched modules""" diff --git a/ddtrace/debugging/_debugger.py b/ddtrace/debugging/_debugger.py index 40b13ce7163..3ca47be32ad 100644 --- a/ddtrace/debugging/_debugger.py +++ b/ddtrace/debugging/_debugger.py @@ -43,7 +43,7 @@ from ddtrace.debugging._signal.collector import SignalCollector from ddtrace.debugging._signal.model import Signal from ddtrace.debugging._signal.model import SignalState -from ddtrace.debugging._uploader import LogsIntakeUploaderV1 +from ddtrace.debugging._uploader import SignalUploader from ddtrace.debugging._uploader import UploaderProduct from ddtrace.internal import core from ddtrace.internal.logger import get_logger @@ -193,7 +193,7 @@ class Debugger(Service): _probe_meter = _probe_metrics.get_meter("probe") __rc_adapter__ = ProbeRCAdapter - __uploader__ = LogsIntakeUploaderV1 + __uploader__ = SignalUploader __watchdog__ = DebuggerModuleWatchdog __logger__ = ProbeStatusLogger @@ -366,7 +366,7 @@ def _probe_injection_hook(self, module: ModuleType) -> None: f"Cannot install probe {probe.probe_id}: " f"no functions at line {line} within source file {module_origin} found" ) - log.error(message) + log.error(message, extra={"send_to_telemetry": False}) self._probe_registry.set_error(probe, "NoFunctionsAtLine", message) continue for function in (cast(FullyNamedContextWrappedFunction, _) for _ in functions): @@ -487,7 +487,7 @@ def _probe_wrapping_hook(self, module: ModuleType) -> None: "found (note: if the function exists, it might be decorated with an unsupported decorator)" ) self._probe_registry.set_error(probe, "NoFunctionInModule", message) - log.error(message) + log.error(message, extra={"send_to_telemetry": False}) continue if DebuggerWrappingContext.is_wrapped(function): diff --git a/ddtrace/debugging/_encoding.py b/ddtrace/debugging/_encoding.py index 701809a8593..3d4ecbc0f8c 100644 --- a/ddtrace/debugging/_encoding.py +++ b/ddtrace/debugging/_encoding.py @@ -13,6 +13,7 @@ from typing import Iterator from typing import List from typing import Optional +from typing import Tuple from typing import Union from ddtrace.debugging._config import di_config @@ -74,7 +75,7 @@ def put(self, item: Any) -> int: """Enqueue the given item and returns its encoded size.""" @abc.abstractmethod - def flush(self) -> Optional[Union[bytes, bytearray]]: + def flush(self) -> Optional[Tuple[Union[bytes, bytearray], int]]: """Flush the buffer and return the encoded data.""" @@ -341,7 +342,7 @@ def put_encoded(self, item: Snapshot, encoded: bytes) -> int: self._on_full(item, encoded) raise - def flush(self) -> Optional[Union[bytes, bytearray]]: + def flush(self) -> Optional[Tuple[Union[bytes, bytearray], int]]: with self._lock: if self.count == 0: # Reclaim memory @@ -349,9 +350,10 @@ def flush(self) -> Optional[Union[bytes, bytearray]]: return None encoded = self._buffer.flush() + count = self.count self.count = 0 self._full = False - return encoded + return encoded, count def is_full(self) -> bool: with self._lock: diff --git a/ddtrace/debugging/_exception/replay.py b/ddtrace/debugging/_exception/replay.py index 1b3a70bbfa3..f9af26a78e4 100644 --- a/ddtrace/debugging/_exception/replay.py +++ b/ddtrace/debugging/_exception/replay.py @@ -12,7 +12,7 @@ from ddtrace.debugging._session import Session from ddtrace.debugging._signal.snapshot import DEFAULT_CAPTURE_LIMITS from ddtrace.debugging._signal.snapshot import Snapshot -from ddtrace.debugging._uploader import LogsIntakeUploaderV1 +from ddtrace.debugging._uploader import SignalUploader from ddtrace.debugging._uploader import UploaderProduct from ddtrace.internal import core from ddtrace.internal.logger import get_logger @@ -242,7 +242,7 @@ def get_snapshot_count(span: Span) -> int: class SpanExceptionHandler: - __uploader__ = LogsIntakeUploaderV1 + __uploader__ = SignalUploader _instance: t.Optional["SpanExceptionHandler"] = None diff --git a/ddtrace/debugging/_expressions.py b/ddtrace/debugging/_expressions.py index 3b3d1ef6b56..1efa4238aa0 100644 --- a/ddtrace/debugging/_expressions.py +++ b/ddtrace/debugging/_expressions.py @@ -39,6 +39,7 @@ from typing import Tuple from typing import Union +from bytecode import BinaryOp from bytecode import Bytecode from bytecode import Compare from bytecode import Instr @@ -99,6 +100,9 @@ def get_local(_locals: Mapping[str, Any], name: str) -> Any: class DDCompiler: + def __init__(self): + self._lambda_level = 0 + @classmethod def __getmember__(cls, o, a): return object.__getattribute__(o, a) @@ -128,7 +132,12 @@ def _make_function(self, ast: DDASTType, args: Tuple[str, ...], name: str) -> Fu return FunctionType(abstract_code.to_code(), {}, name, (), None) def _make_lambda(self, ast: DDASTType) -> Callable[[Any, Any], Any]: - return self._make_function(ast, ("_dd_it", "_dd_key", "_dd_value", "_locals"), "") + self._lambda_level += 1 + try: + return self._make_function(ast, ("_dd_it", "_dd_key", "_dd_value", "_locals"), "") + finally: + assert self._lambda_level > 0 # nosec + self._lambda_level -= 1 def _compile_direct_predicate(self, ast: DDASTType) -> Optional[List[Instr]]: # direct_predicate => {"": } @@ -248,6 +257,9 @@ def _compile_direct_operation(self, ast: DDASTType) -> Optional[List[Instr]]: return None if arg in {"@it", "@key", "@value"}: + if self._lambda_level <= 0: + msg = f"Invalid use of {arg} outside of lambda" + raise ValueError(msg) return [Instr("LOAD_FAST", f"_dd_{arg[1:]}")] return self._call_function( @@ -290,7 +302,12 @@ def _compile_arg_operation(self, ast: DDASTType) -> Optional[List[Instr]]: raise ValueError("Invalid argument: %r" % a) if cb is None: raise ValueError("Invalid argument: %r" % b) - return cv + ca + cb + [Instr("BUILD_SLICE", 2), Instr("BINARY_SUBSCR")] + + if PY >= (3, 14): + subscr_instruction = Instr("BINARY_OP", BinaryOp.SUBSCR) + else: + subscr_instruction = Instr("BINARY_SUBSCR") + return cv + ca + cb + [Instr("BUILD_SLICE", 2), subscr_instruction] if _type == "filter": a, b = args diff --git a/ddtrace/debugging/_function/discovery.py b/ddtrace/debugging/_function/discovery.py index f811c702d26..7fc219f6ef1 100644 --- a/ddtrace/debugging/_function/discovery.py +++ b/ddtrace/debugging/_function/discovery.py @@ -1,6 +1,5 @@ from collections import defaultdict from collections import deque -from pathlib import Path from types import CodeType from types import FunctionType from types import ModuleType @@ -24,6 +23,7 @@ from ddtrace.internal.utils.inspection import collect_code_objects from ddtrace.internal.utils.inspection import functions_for_code from ddtrace.internal.utils.inspection import linenos +from ddtrace.internal.utils.inspection import resolved_code_origin from ddtrace.internal.utils.inspection import undecorated @@ -209,7 +209,7 @@ def _collect_functions(module: ModuleType) -> Dict[str, _FunctionCodePair]: for name in (k, local_name) if isinstance(k, str) and k != local_name else (local_name,): fullname = ".".join((c.__fullname__, name)) if c.__fullname__ else name - if fullname not in functions or Path(code.co_filename).resolve() == path: + if fullname not in functions or resolved_code_origin(code) == path: # Give precedence to code objects from the module and # try to retrieve any potentially decorated function so # that we don't end up returning the decorator function @@ -285,7 +285,7 @@ def __init__(self, module: ModuleType) -> None: if ( function not in seen_functions - and Path(cast(FunctionType, function).__code__.co_filename).resolve() == module_path + and resolved_code_origin(cast(FunctionType, function).__code__) == module_path ): # We only map line numbers for functions that actually belong to # the module. @@ -342,7 +342,7 @@ def _resolve_pair(self, pair: _FunctionCodePair, fullname: str) -> FullyNamedFun code = pair.code assert code is not None # nosec - f = undecorated(cast(FunctionType, target), cast(str, part), Path(code.co_filename).resolve()) + f = undecorated(cast(FunctionType, target), cast(str, part), resolved_code_origin(code)) if not (isinstance(f, FunctionType) and f.__code__ is code): raise e diff --git a/ddtrace/debugging/_origin/span.py b/ddtrace/debugging/_origin/span.py index 819dc7db401..dae336dd6c7 100644 --- a/ddtrace/debugging/_origin/span.py +++ b/ddtrace/debugging/_origin/span.py @@ -20,7 +20,7 @@ from ddtrace.debugging._session import Session from ddtrace.debugging._signal.collector import SignalCollector from ddtrace.debugging._signal.snapshot import Snapshot -from ddtrace.debugging._uploader import LogsIntakeUploaderV1 +from ddtrace.debugging._uploader import SignalUploader from ddtrace.debugging._uploader import UploaderProduct from ddtrace.ext import EXIT_SPAN_TYPES from ddtrace.internal import core @@ -102,7 +102,7 @@ def from_frame(cls, frame: FrameType) -> "ExitSpanProbe": ExitSpanProbe, cls.build( name=code.co_qualname if sys.version_info >= (3, 11) else code.co_name, # type: ignore[attr-defined] - filename=str(Path(code.co_filename).resolve()), + filename=str(Path(code.co_filename)), line=frame.f_lineno, ), ) @@ -197,7 +197,7 @@ def __exit__(self, exc_type, exc_value, traceback): @dataclass class SpanCodeOriginProcessorEntry: - __uploader__ = LogsIntakeUploaderV1 + __uploader__ = SignalUploader _instance: t.Optional["SpanCodeOriginProcessorEntry"] = None _handler: t.Optional[t.Callable] = None @@ -232,7 +232,7 @@ def disable(cls): @dataclass class SpanCodeOriginProcessorExit(SpanProcessor): - __uploader__ = LogsIntakeUploaderV1 + __uploader__ = SignalUploader _instance: t.Optional["SpanCodeOriginProcessorExit"] = None diff --git a/ddtrace/debugging/_probe/registry.py b/ddtrace/debugging/_probe/registry.py index 83a56ad40f9..e1a578794b4 100644 --- a/ddtrace/debugging/_probe/registry.py +++ b/ddtrace/debugging/_probe/registry.py @@ -203,7 +203,7 @@ def unregister(self, *probes: Probe) -> List[Probe]: def get_pending(self, location: str) -> List[Probe]: """Get the currently pending probes by location.""" - return self._pending[location] + return self._pending[location].copy() def __contains__(self, probe: object) -> bool: """Check if a probe is in the registry.""" diff --git a/ddtrace/debugging/_signal/collector.py b/ddtrace/debugging/_signal/collector.py index 1017231e863..0d6125cdc70 100644 --- a/ddtrace/debugging/_signal/collector.py +++ b/ddtrace/debugging/_signal/collector.py @@ -37,7 +37,10 @@ def __init__(self, tracks: Dict[SignalTrack, BufferedEncoder]) -> None: def _enqueue(self, log_signal: LogSignal) -> None: try: log.debug( - "[%s][P: %s] SignalCollector enqueu signal on track %s", os.getpid(), os.getppid(), log_signal.__track__ + "[%s][P: %s] SignalCollector enqueue signal on track %s", + os.getpid(), + os.getppid(), + log_signal.__track__, ) self._tracks[log_signal.__track__].put(log_signal) except BufferFull: diff --git a/ddtrace/debugging/_signal/utils.py b/ddtrace/debugging/_signal/utils.py index c0b650ce06a..3a338566c61 100644 --- a/ddtrace/debugging/_signal/utils.py +++ b/ddtrace/debugging/_signal/utils.py @@ -359,4 +359,11 @@ def capture_value( elif len(fields) > maxfields: data["notCapturedReason"] = "fieldCount" + if _isinstance(value, BaseException): + # DEV: Celery doesn't like that we store references to these objects so we + # delete them as soon as we're done with them. + for attr in ("args", "__cause__", "__context__", "__suppress_context__"): + if attr in fields: + del fields[attr] + return data diff --git a/ddtrace/debugging/_uploader.py b/ddtrace/debugging/_uploader.py index a33a806aaec..bb21c570e16 100644 --- a/ddtrace/debugging/_uploader.py +++ b/ddtrace/debugging/_uploader.py @@ -13,14 +13,17 @@ from ddtrace.debugging._signal.collector import SignalCollector from ddtrace.debugging._signal.model import SignalTrack from ddtrace.internal import agent +from ddtrace.internal import logger from ddtrace.internal.logger import get_logger -from ddtrace.internal.periodic import ForksafeAwakeablePeriodicService from ddtrace.internal.utils.http import connector from ddtrace.internal.utils.retry import fibonacci_backoff_with_jitter -from ddtrace.internal.utils.time import HourGlass log = get_logger(__name__) +UNSUPPORTED_AGENT = "unsupported_agent" +logger.set_tag_rate_limit(UNSUPPORTED_AGENT, logger.HOUR) + + meter = metrics.get_meter("uploader") @@ -34,18 +37,26 @@ class UploaderProduct(str, Enum): @dataclass class UploaderTrack: + track: SignalTrack endpoint: str queue: SignalQueue + enabled: bool = True + + +class SignalUploaderError(Exception): + """Signal uploader error.""" + + pass -class LogsIntakeUploaderV1(ForksafeAwakeablePeriodicService): - """Logs intake uploader. +class SignalUploader(agent.AgentCheckPeriodicService): + """Signal uploader. - This class implements an interface with the debugger logs intake for both + This class implements an interface with the debugger signal intake for both the debugger and the events platform. """ - _instance: Optional["LogsIntakeUploaderV1"] = None + _instance: Optional["SignalUploader"] = None _products: Set[UploaderProduct] = set() _agent_endpoints: Set[str] = set() @@ -57,9 +68,25 @@ class LogsIntakeUploaderV1(ForksafeAwakeablePeriodicService): def __init__(self, interval: Optional[float] = None) -> None: super().__init__(interval if interval is not None else di_config.upload_interval_seconds) - self._agent_endpoints_cache: HourGlass = HourGlass(duration=60.0) + self._endpoint_suffix = endpoint_suffix = ( + f"?ddtags={quote(di_config.tags)}" if di_config._tags_in_qs and di_config.tags else "" + ) - self.set_track_endpoints() + self._tracks = { + SignalTrack.LOGS: UploaderTrack( + track=SignalTrack.LOGS, + endpoint=f"/debugger/v1/input{endpoint_suffix}", + queue=self.__queue__( + encoder=LogSignalJsonEncoder(di_config.service_name), on_full=self._on_buffer_full + ), + ), + SignalTrack.SNAPSHOT: UploaderTrack( + track=SignalTrack.SNAPSHOT, + endpoint=f"/debugger/v2/input{endpoint_suffix}", # start optimistically + queue=self.__queue__(encoder=SnapshotJsonEncoder(di_config.service_name), on_full=self._on_buffer_full), + ), + } + self._collector = self.__collector__({t: ut.queue for t, ut in self._tracks.items()}) self._headers = { "Content-type": "application/json; charset=utf-8", "Accept": "text/plain", @@ -74,7 +101,7 @@ def __init__(self, interval: Optional[float] = None) -> None: )(self._write) log.debug( - "Logs intake uploader initialized (url: %s, endpoints: %s, interval: %f)", + "Signal uploader initialized (url: %s, endpoints: %s, interval: %f)", di_config._intake_url, {t: ut.endpoint for t, ut in self._tracks.items()}, self.interval, @@ -82,42 +109,43 @@ def __init__(self, interval: Optional[float] = None) -> None: self._flush_full = False - def set_track_endpoints(self) -> None: - if self._agent_endpoints_cache.trickling(): - return - - try: - agent_info = agent.info() - self._agent_endpoints = set(agent_info.get("endpoints", [])) if agent_info is not None else set() - except Exception: - pass # nosec B110 - finally: - self._agent_endpoints_cache.turn() - - snapshot_track = "/debugger/v1/input" - if "/debugger/v2/input" in self._agent_endpoints: - snapshot_track = "/debugger/v2/input" - elif "/debugger/v1/diagnostics" in self._agent_endpoints: - snapshot_track = "/debugger/v1/diagnostics" - - endpoint_suffix = f"?ddtags={quote(di_config.tags)}" if di_config._tags_in_qs and di_config.tags else "" - - self._tracks = { - SignalTrack.LOGS: UploaderTrack( - endpoint=f"/debugger/v1/input{endpoint_suffix}", - queue=self.__queue__( - encoder=LogSignalJsonEncoder(di_config.service_name), on_full=self._on_buffer_full - ), - ), - SignalTrack.SNAPSHOT: UploaderTrack( - endpoint=f"{snapshot_track}{endpoint_suffix}", - queue=self.__queue__(encoder=SnapshotJsonEncoder(di_config.service_name), on_full=self._on_buffer_full), - ), - } - self._collector = self.__collector__({t: ut.queue for t, ut in self._tracks.items()}) + def info_check(self, agent_info: Optional[dict]) -> bool: + if agent_info is None: + # Agent is unreachable + return False + + if "endpoints" not in agent_info: + # Agent not supported + log.debug("Unsupported Datadog agent detected. Please upgrade to 7.49.0.") + return False + + endpoints = set(agent_info.get("endpoints", [])) + snapshot_track = self._tracks[SignalTrack.SNAPSHOT] + snapshot_track.enabled = True + + if "/debugger/v2/input" in endpoints: + log.debug("Detected /debugger/v2/input endpoint") + snapshot_track.endpoint = f"/debugger/v2/input{self._endpoint_suffix}" + elif "/debugger/v1/diagnostics" in endpoints: + log.debug("Detected /debugger/v1/diagnostics endpoint fallback") + snapshot_track.endpoint = f"/debugger/v1/diagnostics{self._endpoint_suffix}" + else: + snapshot_track.enabled = False + log.warning( + UNSUPPORTED_AGENT, + extra={ + "product": "debugger", + "more_info": ( + "Unsupported Datadog agent detected. Snapshots from Dynamic Instrumentation/" + "Exception Replay/Code Origin for Spans will not be uploaded. " + "Please upgrade to version 7.49.0 or later" + ), + }, + ) + + return True def _write(self, payload: bytes, endpoint: str) -> None: - self.set_track_endpoints() try: with self._connect() as conn: conn.request("POST", endpoint, payload, headers=self._headers) @@ -125,9 +153,14 @@ def _write(self, payload: bytes, endpoint: str) -> None: if not (200 <= resp.status < 300): log.error("Failed to upload payload to endpoint %s: [%d] %r", endpoint, resp.status, resp.read()) meter.increment("upload.error", tags={"status": str(resp.status)}) + if 400 <= resp.status < 500: + msg = "Failed to upload payload" + raise SignalUploaderError(msg) else: meter.increment("upload.success") meter.distribution("upload.size", len(payload)) + except SignalUploaderError: + raise except Exception: log.error("Failed to write payload to endpoint %s", endpoint, exc_info=True) meter.increment("error") @@ -147,29 +180,44 @@ def reset(self) -> None: self._collector._tracks = {t: ut.queue for t, ut in self._tracks.items()} def _flush_track(self, track: UploaderTrack) -> None: - queue = track.queue - payload = queue.flush() - if payload is not None: + if (data := track.queue.flush()) is not None and track.enabled: + payload, count = data try: self._write_with_backoff(payload, track.endpoint) - meter.distribution("batch.cardinality", queue.count) + meter.distribution("batch.cardinality", count) + except SignalUploaderError: + if track.track is SignalTrack.SNAPSHOT and not track.endpoint.startswith("/debugger/v1/diagnostics"): + # Downgrade to diagnostics endpoint and retry once + track.endpoint = f"/debugger/v1/diagnostics{self._endpoint_suffix}" + log.debug("Downgrading snapshot endpoint to %s and trying again", track.endpoint) + self._write_with_backoff(payload, track.endpoint) + meter.distribution("batch.cardinality", count) + else: + raise # Propagate error to transition to agent check state except Exception: log.debug("Cannot upload logs payload", exc_info=True) - def periodic(self) -> None: - """Upload the buffer content to the logs intake.""" + def online(self) -> None: + """Upload the buffer content to the agent.""" if self._flush_full: # We received the signal to flush a full buffer self._flush_full = False - for track in self._tracks.values(): - if track.queue.is_full(): - self._flush_track(track) + for uploader_track in self._tracks.values(): + if uploader_track.queue.is_full(): + self._flush_track(uploader_track) for track in self._tracks.values(): if track.queue.count: self._flush_track(track) - on_shutdown = periodic + if not self._tracks[SignalTrack.SNAPSHOT].enabled: + # If the snapshot track is not enabled, we raise an exception to + # transition back to the agent check state in case we detect an + # agent that can handle snapshots safely. + msg = "Snapshot track not enabled" + raise ValueError(msg) + + on_shutdown = online @classmethod def get_collector(cls) -> SignalCollector: diff --git a/ddtrace/debugging/uploader.plantuml b/ddtrace/debugging/uploader.plantuml new file mode 100644 index 00000000000..29eb450d910 --- /dev/null +++ b/ddtrace/debugging/uploader.plantuml @@ -0,0 +1,34 @@ +@startuml +title Signal Uploader State Machine + +state "Agent Check" as AgentCheck + +[*] -u-> AgentCheck + +AgentCheck : check available\nendpoints + +state Online { + state "v2/input" as InputV2 + state "v1/diagnostics" as DiagnosticsV1 + state "No Snapshots" as NoSnapshots + + InputV2 : use the v2/input\nendpoint + DiagnosticsV1 : fallback to v1/diagnostics\nendpoint + NoSnapshots : agent does not support\nredacted snapshots +} + +AgentCheck --> AgentCheck : no info +AgentCheck --> InputV2 : v2/input +AgentCheck --> DiagnosticsV1 : v1/diagnostics +AgentCheck --> NoSnapshots : unsupported + +InputV2 --> AgentCheck : error +DiagnosticsV1 --> AgentCheck : error +NoSnapshots --> AgentCheck : error + +NoSnapshots --> AgentCheck : periodically + +InputV2 --> InputV2 : periodically +DiagnosticsV1 --> DiagnosticsV1 : periodically + +@enduml diff --git a/ddtrace/ext/aws.py b/ddtrace/ext/aws.py index acc0a6425e9..2b63b78bc00 100644 --- a/ddtrace/ext/aws.py +++ b/ddtrace/ext/aws.py @@ -82,7 +82,19 @@ def _add_api_param_span_tags(span, endpoint_name, params): span.set_tag_str("statemachinearn", state_machine_arn) +def get_aws_partition(region_name): + # type: (str) -> str + """Determine AWS partition from region name.""" + if region_name.startswith("cn-"): + return "aws-cn" + elif region_name.startswith("us-gov-"): + return "aws-us-gov" + + return "aws" + + AWSREGION = "aws.region" REGION = "region" +PARTITION = "aws.partition" AGENT = "aws.agent" OPERATION = "aws.operation" diff --git a/ddtrace/ext/azure_servicebus.py b/ddtrace/ext/azure_servicebus.py index 39971abf479..4a00ec545e3 100644 --- a/ddtrace/ext/azure_servicebus.py +++ b/ddtrace/ext/azure_servicebus.py @@ -1,2 +1,6 @@ +CLOUD = "azure" SERVICE = "servicebus" -PRODUCE = "azure.servicebus.send" + +SEND = "send" +CREATE = "create" +RECEIVE = "receive" diff --git a/ddtrace/ext/http.py b/ddtrace/ext/http.py index 0a47e57a6c7..4c47d918add 100644 --- a/ddtrace/ext/http.py +++ b/ddtrace/ext/http.py @@ -18,6 +18,7 @@ CLIENT_IP = "http.client_ip" ROUTE = "http.route" REFERRER_HOSTNAME = "http.referrer_hostname" +ENDPOINT = "http.endpoint" # HTTP headers REFERER_HEADER = "referer" diff --git a/ddtrace/ext/net.py b/ddtrace/ext/net.py index eda2f072fe2..f7414de70ac 100644 --- a/ddtrace/ext/net.py +++ b/ddtrace/ext/net.py @@ -6,6 +6,7 @@ TARGET_HOST = "out.host" TARGET_PORT = "network.destination.port" TARGET_IP = "network.destination.ip" +TARGET_NAME = "network.destination.name" SERVER_ADDRESS = "server.address" diff --git a/ddtrace/internal/_encoding.pyx b/ddtrace/internal/_encoding.pyx index e9255b5b85e..6b11aaf94d1 100644 --- a/ddtrace/internal/_encoding.pyx +++ b/ddtrace/internal/_encoding.pyx @@ -148,7 +148,6 @@ cdef inline int pack_text(msgpack_packer *pk, object text) except? -1: if PyBytesLike_Check(text): L = len(text) if L > MAX_SPAN_META_VALUE_LEN: - PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(text).tp_name) text = truncate_string(text) L = len(text) ret = msgpack_pack_raw(pk, L) diff --git a/ddtrace/internal/agent.py b/ddtrace/internal/agent.py index 7a024ca8f0e..c420fedb611 100644 --- a/ddtrace/internal/agent.py +++ b/ddtrace/internal/agent.py @@ -1,6 +1,9 @@ +import abc import json +import typing as t from ddtrace.internal.logger import get_logger +from ddtrace.internal.periodic import ForksafeAwakeablePeriodicService from ddtrace.settings._agent import config from .utils.http import get_connection @@ -29,3 +32,38 @@ def info(url=None): return None return json.loads(data) + + +class AgentCheckPeriodicService(ForksafeAwakeablePeriodicService, metaclass=abc.ABCMeta): + def __init__(self, interval: float = 0.0): + super().__init__(interval=interval) + + self._state = self._agent_check + + @abc.abstractmethod + def info_check(self, agent_info: t.Optional[dict]) -> bool: + ... + + def _agent_check(self) -> None: + try: + agent_info = info() + except Exception: + agent_info = None + + if self.info_check(agent_info): + self._state = self._online + self._online() + + def _online(self) -> None: + try: + self.online() + except Exception: + self._state = self._agent_check + log.debug("Error during online operation, reverting to agent check", exc_info=True) + + @abc.abstractmethod + def online(self) -> None: + ... + + def periodic(self) -> None: + return self._state() diff --git a/ddtrace/internal/assembly.py b/ddtrace/internal/assembly.py index c1740192540..0d099ed9af2 100644 --- a/ddtrace/internal/assembly.py +++ b/ddtrace/internal/assembly.py @@ -46,7 +46,7 @@ def transform_instruction(opcode: str, arg: t.Any) -> t.Tuple[str, t.Any]: opcode = "LOAD_ATTR" arg = (True, arg) elif opcode.upper() == "LOAD_ATTR" and not isinstance(arg, tuple): - arg = (False, arg) + arg = (sys.version_info >= (3, 14), arg) return opcode, arg @@ -157,6 +157,11 @@ def parse_try_end(self, line: str) -> t.Optional[bc.TryEnd]: def parse_opcode(self, text: str) -> str: opcode = text.upper() + + # `dis` doesn't include `LOAD_METHOD` in 3.14.0rc1 + if sys.version_info >= (3, 14) and opcode == "LOAD_METHOD": + return opcode + if opcode not in dis.opmap: raise ValueError("unknown opcode %s" % opcode) diff --git a/ddtrace/internal/bytecode_injection/__init__.py b/ddtrace/internal/bytecode_injection/__init__.py index b31e52e3140..a151fbea654 100644 --- a/ddtrace/internal/bytecode_injection/__init__.py +++ b/ddtrace/internal/bytecode_injection/__init__.py @@ -30,8 +30,8 @@ class InvalidLine(Exception): # the stack to the state prior to the call. INJECTION_ASSEMBLY = Assembly() -if PY >= (3, 14): - raise NotImplementedError("Python >= 3.14 is not supported yet") +if PY >= (3, 15): + raise NotImplementedError("Python >= 3.15 is not supported yet") elif PY >= (3, 13): INJECTION_ASSEMBLY.parse( r""" diff --git a/ddtrace/internal/ci_visibility/api/_suite.py b/ddtrace/internal/ci_visibility/api/_suite.py index d454488dc3f..7a3e199bd5b 100644 --- a/ddtrace/internal/ci_visibility/api/_suite.py +++ b/ddtrace/internal/ci_visibility/api/_suite.py @@ -69,6 +69,9 @@ def finish_itr_skipped(self) -> None: ) return + # Only count for suite-level skipping mode, not test-level + if self._session_settings.itr_test_skipping_level == ITR_SKIPPING_LEVEL.SUITE: + self.count_itr_skipped() self.mark_itr_skipped() self.finish() diff --git a/ddtrace/internal/ci_visibility/api/_test.py b/ddtrace/internal/ci_visibility/api/_test.py index 3ef346370cc..6ab5dd86c49 100644 --- a/ddtrace/internal/ci_visibility/api/_test.py +++ b/ddtrace/internal/ci_visibility/api/_test.py @@ -260,7 +260,8 @@ def count_itr_skipped(self) -> None: def finish_itr_skipped(self) -> None: log.debug("Finishing Test Visibility test %s with ITR skipped", self) - self.count_itr_skipped() + if self._session_settings.itr_test_skipping_level == ITR_SKIPPING_LEVEL.TEST: + self.count_itr_skipped() self.mark_itr_skipped() self.finish_test(TestStatus.SKIP) diff --git a/ddtrace/internal/constants.py b/ddtrace/internal/constants.py index c392961e31b..efdcb44f8e3 100644 --- a/ddtrace/internal/constants.py +++ b/ddtrace/internal/constants.py @@ -78,13 +78,12 @@ EXTERNAL_ENV_HEADER_NAME = "Datadog-External-Env" EXTERNAL_ENV_ENVIRONMENT_VARIABLE = "DD_EXTERNAL_ENV" +MESSAGING_BATCH_COUNT = "messaging.batch_count" MESSAGING_DESTINATION_NAME = "messaging.destination.name" MESSAGING_MESSAGE_ID = "messaging.message_id" MESSAGING_OPERATION = "messaging.operation" MESSAGING_SYSTEM = "messaging.system" -NETWORK_DESTINATION_NAME = "network.destination.name" - FLASK_ENDPOINT = "flask.endpoint" FLASK_VIEW_ARGS = "flask.view_args" FLASK_URL_RULE = "flask.url_rule" diff --git a/ddtrace/internal/core/crashtracking.py b/ddtrace/internal/core/crashtracking.py index 7804d9fc739..4d33e926acb 100644 --- a/ddtrace/internal/core/crashtracking.py +++ b/ddtrace/internal/core/crashtracking.py @@ -26,6 +26,10 @@ from ddtrace.internal.native._native import crashtracker_init from ddtrace.internal.native._native import crashtracker_on_fork from ddtrace.internal.native._native import crashtracker_status + from ddtrace.internal.native._native import crashtracker_register_native_runtime_callback + from ddtrace.internal.native._native import crashtracker_is_runtime_callback_registered + from ddtrace.internal.native._native import crashtracker_get_registered_runtime_type + from ddtrace.internal.native._native import CallbackResult except ImportError: is_available = False @@ -152,6 +156,16 @@ def start(additional_tags: Optional[Dict[str, str]] = None) -> bool: crashtracker_init(config, receiver_config, metadata) + if ( + crashtracker_config.stacktrace_resolver is not None and + crashtracker_config.stacktrace_resolver != "none" + ): + result = crashtracker_register_native_runtime_callback() + # Shouldn't block on this, but log an error if it fails + if result != CallbackResult.Ok: + print(f"Failed to register runtime callback: {result}", file=sys.stderr) + return False + def crashtracker_fork_handler(): # We recreate the args here mainly to pass updated runtime_id after # fork @@ -169,3 +183,57 @@ def crashtracker_fork_handler(): print(f"Failed to start crashtracker: {e}", file=sys.stderr) return False return True + + +def register_runtime_callback() -> bool: + """ + Register the native runtime callback for stack collection during crashes. + + This should be called after crashtracker initialization to enable Python + runtime stack trace collection in crash reports. The callback provides + frame-by-frame Python stack traces with proper context information. + + Returns: + bool: True if callback was registered successfully, False otherwise + """ + if not is_available: + return False + + try: + result = crashtracker_register_native_runtime_callback() + return result == CallbackResult.Ok + except Exception as e: + print(f"Failed to register runtime callback: {e}", file=sys.stderr) + return False + + +def is_runtime_callback_registered() -> bool: + """ + Check if a runtime callback is currently registered. + + Returns: + bool: True if a callback is registered, False otherwise + """ + if not is_available: + return False + + try: + return crashtracker_is_runtime_callback_registered() + except Exception: + return False + + +def get_registered_runtime_type() -> Optional[str]: + """ + Get the runtime type of the currently registered callback. + + Returns: + Optional[str]: The runtime type ("python") if registered, None otherwise + """ + if not is_available: + return None + + try: + return crashtracker_get_registered_runtime_type() + except Exception: + return None diff --git a/ddtrace/internal/core/event_hub.py b/ddtrace/internal/core/event_hub.py index 7d4ecd3cba1..8860f2d6793 100644 --- a/ddtrace/internal/core/event_hub.py +++ b/ddtrace/internal/core/event_hub.py @@ -3,7 +3,6 @@ from typing import Any from typing import Callable from typing import Dict -from typing import List from typing import Optional from typing import Tuple @@ -11,7 +10,6 @@ _listeners: Dict[str, Dict[Any, Callable[..., Any]]] = {} -_all_listeners: List[Callable[[str, Tuple[Any, ...]], None]] = [] class ResultType(enum.Enum): @@ -61,45 +59,26 @@ def on(event_id: str, callback: Callable[..., Any], name: Any = None) -> None: _listeners[event_id][name] = callback -def on_all(callback: Callable[..., Any]) -> None: - """Register a listener for all events emitted""" - global _all_listeners - if callback not in _all_listeners: - _all_listeners.insert(0, callback) - - def reset(event_id: Optional[str] = None, callback: Optional[Callable[..., Any]] = None) -> None: """Remove all registered listeners. If an event_id is provided, only clear those event listeners. If a callback is provided, then only the listeners for that callback are removed. """ global _listeners - global _all_listeners if callback: - if not event_id: - _all_listeners = [cb for cb in _all_listeners if cb != callback] - elif event_id in _listeners: + if event_id in _listeners: _listeners[event_id] = {name: cb for name, cb in _listeners[event_id].items() if cb != callback} else: if not event_id: _listeners.clear() - _all_listeners.clear() elif event_id in _listeners: del _listeners[event_id] def dispatch(event_id: str, args: Tuple[Any, ...] = ()) -> None: """Call all hooks for the provided event_id with the provided args""" - global _all_listeners global _listeners - for hook in _all_listeners: - try: - hook(event_id, args) - except Exception: - if config._raise: - raise - if event_id not in _listeners: return @@ -116,14 +95,6 @@ def dispatch_with_results(event_id: str, args: Tuple[Any, ...] = ()) -> EventRes returning the results and exceptions from the called hooks """ global _listeners - global _all_listeners - - for hook in _all_listeners: - try: - hook(event_id, args) - except Exception: - if config._raise: - raise if event_id not in _listeners: return _MissingEventDict diff --git a/ddtrace/internal/coverage/code.py b/ddtrace/internal/coverage/code.py index 5227e08fa42..e56022fa491 100644 --- a/ddtrace/internal/coverage/code.py +++ b/ddtrace/internal/coverage/code.py @@ -20,6 +20,7 @@ from ddtrace.internal.packages import purelib_path from ddtrace.internal.packages import stdlib_path from ddtrace.internal.test_visibility.coverage_lines import CoverageLines +from ddtrace.internal.utils.inspection import resolved_code_origin log = get_logger(__name__) @@ -317,7 +318,7 @@ def transform(self, code: CodeType, _module: ModuleType) -> CodeType: if _module is None: return code - code_path = Path(code.co_filename).resolve() + code_path = resolved_code_origin(code) if not any(code_path.is_relative_to(include_path) for include_path in self._include_paths): # Not a code object we want to instrument diff --git a/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt b/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt index 917889648f0..7760cfe5c3a 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt @@ -41,7 +41,7 @@ endif() # Add echion set(ECHION_COMMIT - "158a50f5bba99bce486b007d4c5ee586836610db" # https://github.com/P403n1x87/echion/commit/158a50f5bba99bce486b007d4c5ee586836610db + "576ff5353fc4ed91c283a383b1eb1b32110b42cc" # https://github.com/P403n1x87/echion/commit/576ff5353fc4ed91c283a383b1eb1b32110b42cc CACHE STRING "Commit hash of echion to use") FetchContent_Declare( echion diff --git a/ddtrace/internal/datadog/profiling/stack_v2/__init__.pyi b/ddtrace/internal/datadog/profiling/stack_v2/__init__.pyi new file mode 100644 index 00000000000..87bd4598fb4 --- /dev/null +++ b/ddtrace/internal/datadog/profiling/stack_v2/__init__.pyi @@ -0,0 +1,5 @@ +def register_thread(id: int, native_id: int, name: str) -> None: ... # noqa: A002 +def unregister_thread(name: str) -> None: ... + +is_available: bool +failure_msg: str diff --git a/ddtrace/internal/module.py b/ddtrace/internal/module.py index 7ad11601017..5baedfdf42b 100644 --- a/ddtrace/internal/module.py +++ b/ddtrace/internal/module.py @@ -111,21 +111,34 @@ def unregister_post_run_module_hook(hook: ModuleHookType) -> None: def origin(module: ModuleType) -> t.Optional[Path]: """Get the origin source file of the module.""" try: - # DEV: Use object.__getattribute__ to avoid potential side-effects. - orig = Path(object.__getattribute__(module, "__file__")).resolve() - except (AttributeError, TypeError): - # Module is probably only partially initialised, so we look at its - # spec instead + # Do not access __dd_origin__ directly to avoid force-loading lazy + # modules. + return object.__getattribute__(module, "__dd_origin__") + except AttributeError: try: # DEV: Use object.__getattribute__ to avoid potential side-effects. - orig = Path(object.__getattribute__(module, "__spec__").origin).resolve() - except (AttributeError, ValueError, TypeError): - orig = None + orig = Path(object.__getattribute__(module, "__file__")).resolve() + except (AttributeError, TypeError): + # Module is probably only partially initialised, so we look at its + # spec instead + try: + # DEV: Use object.__getattribute__ to avoid potential side-effects. + orig = Path(object.__getattribute__(module, "__spec__").origin).resolve() + except (AttributeError, ValueError, TypeError): + orig = None - if orig is not None and orig.is_file(): - return orig.with_suffix(".py") if orig.suffix == ".pyc" else orig + if orig is not None and orig.suffix == "pyc": + orig = orig.with_suffix(".py") - return None + if orig is not None: + # If we failed to find a valid origin we don't cache the value and + # try again the next time. + try: + module.__dd_origin__ = orig # type: ignore[attr-defined] + except AttributeError: + pass + + return orig def _resolve(path: Path) -> t.Optional[Path]: diff --git a/ddtrace/internal/native/__init__.py b/ddtrace/internal/native/__init__.py index 73c967ce031..bcc2325497a 100644 --- a/ddtrace/internal/native/__init__.py +++ b/ddtrace/internal/native/__init__.py @@ -15,6 +15,7 @@ from ._native import SerializationError # noqa: F401 from ._native import TraceExporter # noqa: F401 from ._native import TraceExporterBuilder # noqa: F401 +from ._native import logger # noqa: F401 from ._native import store_metadata # noqa: F401 diff --git a/ddtrace/internal/native/_native.pyi b/ddtrace/internal/native/_native.pyi index c388b3efbf5..705132c2527 100644 --- a/ddtrace/internal/native/_native.pyi +++ b/ddtrace/internal/native/_native.pyi @@ -1,4 +1,4 @@ -from typing import Dict, List, Optional +from typing import Dict, List, Literal, Optional class DDSketch: def __init__(self): ... @@ -90,6 +90,12 @@ class CrashtrackerStatus: Initialized: "CrashtrackerStatus" FailedToInitialize: "CrashtrackerStatus" +class CallbackResult: + Ok: "CallbackResult" + AlreadyRegistered: "CallbackResult" + NullCallback: "CallbackResult" + UnknownError: "CallbackResult" + def crashtracker_init( config: CrashtrackerConfiguration, receiver_config: CrashtrackerReceiverConfig, metadata: CrashtrackerMetadata ) -> None: ... @@ -98,6 +104,9 @@ def crashtracker_on_fork( ) -> None: ... def crashtracker_status() -> CrashtrackerStatus: ... def crashtracker_receiver() -> None: ... +def crashtracker_register_native_runtime_callback() -> CallbackResult: ... +def crashtracker_is_runtime_callback_registered() -> bool: ... +def crashtracker_get_registered_runtime_type() -> Optional[str]: ... class PyTracerMetadata: """ @@ -232,6 +241,12 @@ class TraceExporterBuilder: :param version: The version string of the application. """ ... + def set_service(self, service: str) -> TraceExporterBuilder: + """ + Set the service name of the TraceExporter. + :param version: The version string of the application. + """ + ... def set_git_commit_sha(self, git_commit_sha: str) -> TraceExporterBuilder: """ Set the git commit sha of the TraceExporter. @@ -317,6 +332,11 @@ class TraceExporterBuilder: :param runtime_id: The runtime id to use for telemetry. """ ... + def enable_health_metrics(self) -> TraceExporterBuilder: + """ + Enable health metrics in the TraceExporter + """ + ... def build(self) -> TraceExporter: """ Build and return a TraceExporter instance with the configured settings. @@ -346,6 +366,57 @@ class BuilderError(Exception): ... +class logger: + """ + Native logging module for configuring and managing log output. + """ + + @staticmethod + def configure( + output: Literal["stdout", "stderr", "file"] = "stdout", + path: Optional[str] = None, + max_files: Optional[int] = None, + max_size_bytes: Optional[int] = None, + ) -> None: + """ + Configure the logger with the specified output destination. + + :param output: Output destination ("stdout", "stderr", or "file") + :param path: File path (required if output is "file") + :param max_files: Maximum number of log files to keep (for file output) + :param max_size_bytes: Maximum size of each log file in bytes (for file output) + :raises ValueError: If configuration is invalid + """ + ... + @staticmethod + def disable(output: str) -> None: + """ + Disable logging output by type. + + :param output: Output type to disable ("file", "stdout", or "stderr") + :raises ValueError: If output type is invalid + """ + ... + @staticmethod + def set_log_level(level: str) -> None: + """ + Set the log level for the logger. + + :param level: Log level ("trace", "debug", "info", "warn", or "error") + :raises ValueError: If log level is invalid + """ + ... + @staticmethod + def log(level: str, message: str) -> None: + """ + Logs messages + + :param level: Log level ("trace", "debug", "info", "warn", or "error") + :param message: message to be displayed in the log. + :raises ValueError: If log level is invalid + """ + ... + class DeserializationError(Exception): """ Raised when there is an error deserializing trace payload. diff --git a/ddtrace/internal/packages.py b/ddtrace/internal/packages.py index 90efda331e0..d6fe6f16fae 100644 --- a/ddtrace/internal/packages.py +++ b/ddtrace/internal/packages.py @@ -99,6 +99,21 @@ def _effective_root(rel_path: Path, parent: Path) -> str: return base if root.is_dir() and (root / "__init__.py").exists() else "/".join(rel_path.parts[:2]) +# DEV: Since we can't lock on sys.path, these operations can be racy. +_SYS_PATH_HASH: t.Optional[int] = None +_RESOLVED_SYS_PATH: t.List[Path] = [] + + +def resolve_sys_path() -> t.List[Path]: + global _SYS_PATH_HASH, _RESOLVED_SYS_PATH + + if (h := hash(tuple(sys.path))) != _SYS_PATH_HASH: + _SYS_PATH_HASH = h + _RESOLVED_SYS_PATH = [Path(_).resolve() for _ in sys.path] + + return _RESOLVED_SYS_PATH + + def _root_module(path: Path) -> str: # Try the most likely prefixes first for parent_path in (purelib_path, platlib_path): @@ -112,7 +127,7 @@ def _root_module(path: Path) -> str: # Try to resolve the root module using sys.path. We keep the shortest # relative path as the one more likely to give us the root module. min_relative_path = max_parent_path = None - for parent_path in (Path(_).resolve() for _ in sys.path): + for parent_path in resolve_sys_path(): try: relative = path.relative_to(parent_path) if min_relative_path is None or len(relative.parents) < len(min_relative_path.parents): @@ -240,7 +255,9 @@ def module_to_package(module: ModuleType) -> t.Optional[Distribution]: @cached(maxsize=256) def is_stdlib(path: Path) -> bool: - rpath = path.resolve() + rpath = path + if not rpath.is_absolute() or rpath.is_symlink(): + rpath = rpath.resolve() return (rpath.is_relative_to(stdlib_path) or rpath.is_relative_to(platstdlib_path)) and not ( rpath.is_relative_to(purelib_path) or rpath.is_relative_to(platlib_path) diff --git a/ddtrace/internal/processor/stats.py b/ddtrace/internal/processor/stats.py index 45e4cd533f7..efd8492769b 100644 --- a/ddtrace/internal/processor/stats.py +++ b/ddtrace/internal/processor/stats.py @@ -48,6 +48,8 @@ def _is_measured(span: Span) -> bool: str, # type int, # http status code bool, # synthetics request + str, # http method + str, # http endpoint ] @@ -73,8 +75,10 @@ def _span_aggr_key(span: Span) -> SpanAggrKey: resource = span.resource or "" _type = span.span_type or "" status_code = span.get_tag("http.status_code") or 0 + method = span.get_tag("http.method") or "" + endpoint = span.get_tag("http.endpoint") or span.get_tag("http.route") or "" synthetics = span.context.dd_origin == "synthetics" - return span.name, service, resource, _type, int(status_code), synthetics + return (span.name, service, resource, _type, int(status_code), synthetics, method, endpoint) class SpanStatsProcessorV06(PeriodicService, SpanProcessor): @@ -157,12 +161,14 @@ def _serialize_buckets(self) -> List[Dict]: serialized_bucket_keys.append(bucket_time_ns) for aggr_key, stat_aggr in bucket.items(): - name, service, resource, _type, http_status, synthetics = aggr_key + name, service, resource, _type, http_status, synthetics, http_method, http_endpoint = aggr_key serialized_bucket = { "Name": compat.ensure_text(name), "Resource": compat.ensure_text(resource), "Synthetics": synthetics, "HTTPStatusCode": http_status, + "HTTPMethod": http_method, + "HTTPEndpoint": http_endpoint, "Hits": stat_aggr.hits, "TopLevelHits": stat_aggr.top_level_hits, "Duration": stat_aggr.duration, diff --git a/ddtrace/internal/products.py b/ddtrace/internal/products.py index e2d55983ef0..629e746c46d 100644 --- a/ddtrace/internal/products.py +++ b/ddtrace/internal/products.py @@ -12,6 +12,7 @@ from ddtrace.internal.telemetry import report_configuration from ddtrace.internal.telemetry import telemetry_writer from ddtrace.internal.uwsgi import check_uwsgi +from ddtrace.internal.uwsgi import uWSGIConfigDeprecationWarning from ddtrace.internal.uwsgi import uWSGIConfigError from ddtrace.internal.uwsgi import uWSGIMasterProcess from ddtrace.settings._core import DDConfig @@ -224,6 +225,11 @@ def _() -> None: except uWSGIConfigError: log.error("uWSGI configuration error", exc_info=True) + + except uWSGIConfigDeprecationWarning: + log.warning("uWSGI configuration deprecation warning", exc_info=True) + self._do_products() + except Exception: log.exception("Failed to check uWSGI configuration") diff --git a/ddtrace/internal/symbol_db/symbols.py b/ddtrace/internal/symbol_db/symbols.py index 803ce59cd11..9842d57eac9 100644 --- a/ddtrace/internal/symbol_db/symbols.py +++ b/ddtrace/internal/symbol_db/symbols.py @@ -38,6 +38,7 @@ from ddtrace.internal.utils.http import connector from ddtrace.internal.utils.http import multipart from ddtrace.internal.utils.inspection import linenos +from ddtrace.internal.utils.inspection import resolved_code_origin from ddtrace.internal.utils.inspection import undecorated from ddtrace.settings._agent import config as agent_config from ddtrace.settings.symbol_db import config as symdb_config @@ -96,7 +97,8 @@ def get_fields(cls: type) -> t.Set[str]: return { code.co_names[b.arg] for a, b in zip(*(islice(t, i, None) for i, t in enumerate(tee(dis.get_instructions(code), 2)))) - if a.opname == "LOAD_FAST" and a.arg == 0 and b.opname == "STORE_ATTR" + # Python 3.14 changed this to LOAD_FAST_BORROW + if a.opname.startswith("LOAD_FAST") and a.arg & 15 == 0 and b.opname == "STORE_ATTR" } except AttributeError: return set() @@ -323,7 +325,7 @@ def _(cls, code: CodeType, data: ScopeData, recursive: bool = True): return None data.seen.add(code_id) - if Path(code.co_filename).resolve() != data.origin: + if (code_origin := resolved_code_origin(code)) != data.origin: # Comes from another module. return None @@ -337,7 +339,7 @@ def _(cls, code: CodeType, data: ScopeData, recursive: bool = True): return Scope( scope_type=ScopeType.CLOSURE, # DEV: Not in the sense of a Python closure. name=code.co_name, - source_file=str(Path(code.co_filename).resolve()), + source_file=str(code_origin), start_line=start_line, end_line=end_line, symbols=Symbol.from_code(code), diff --git a/ddtrace/internal/telemetry/logging.py b/ddtrace/internal/telemetry/logging.py index 23f23212912..15967346981 100644 --- a/ddtrace/internal/telemetry/logging.py +++ b/ddtrace/internal/telemetry/logging.py @@ -14,6 +14,5 @@ def emit(self, record: logging.LogRecord) -> None: - Log all records with a level of ERROR or higher with telemetry """ if record.levelno >= logging.ERROR: - # Capture start up errors - full_file_name = os.path.join(record.pathname, record.filename) - self.telemetry_writer.add_error(1, record.msg, full_file_name, record.lineno) + if getattr(record, "send_to_telemetry", None) in (None, True): + self.telemetry_writer.add_error_log(record.msg, record.exc_info) diff --git a/ddtrace/internal/telemetry/writer.py b/ddtrace/internal/telemetry/writer.py index e36fcfa3320..09ceb9273b8 100644 --- a/ddtrace/internal/telemetry/writer.py +++ b/ddtrace/internal/telemetry/writer.py @@ -17,6 +17,7 @@ from ddtrace.internal.endpoints import endpoint_collection from ddtrace.internal.logger import get_logger +from ddtrace.internal.packages import is_user_code from ddtrace.internal.utils.http import get_connection from ddtrace.settings._agent import config as agent_config from ddtrace.settings._telemetry import config @@ -31,10 +32,8 @@ from ..utils.version import version as tracer_version from . import modules from .constants import TELEMETRY_APM_PRODUCT -from .constants import TELEMETRY_LOG_LEVEL # noqa:F401 +from .constants import TELEMETRY_LOG_LEVEL from .constants import TELEMETRY_NAMESPACE -from .constants import TELEMETRY_TYPE_DISTRIBUTION -from .constants import TELEMETRY_TYPE_GENERATE_METRICS from .constants import TELEMETRY_TYPE_LOGS from .data import get_application from .data import get_host_info @@ -107,7 +106,7 @@ def send_event(self, request: Dict) -> Optional[httplib.HTTPResponse]: resp.status, ) else: - log.debug("Failed to send Instrumentation Telemetry to %s. response: %s", self.url, resp.status) + log.debug("Failed to send Instrumentation Telemetry to %s. Response: %s", self.url, resp.status) except Exception as e: log.debug("Failed to send Instrumentation Telemetry to %s. Error: %s", self.url, str(e)) finally: @@ -162,10 +161,6 @@ def __init__(self, is_periodic=True, agentless=None): self._periodic_count = 0 self._is_periodic = is_periodic self._integrations_queue = dict() # type: Dict[str, Dict] - # Currently telemetry only supports reporting a single error. - # If we'd like to report multiple errors in the future - # we could hack it in by xor-ing error codes and concatenating strings - self._error = (0, "") # type: Tuple[int, str] self._namespace = MetricNamespace() self._logs = set() # type: Set[Dict[str, Any]] self._forked = False # type: bool @@ -207,9 +202,8 @@ def __init__(self, is_periodic=True, agentless=None): # This will occur when the agent writer starts. self.enable() # Force app started for unit tests - if config.FORCE_START: - self._app_started() - # Send logged error to telemetry + if config.FORCE_START and (app_started := self._app_started()): + self._events_queue.append(app_started) get_logger("ddtrace").addHandler(DDTelemetryErrorHandler(self)) def enable(self): @@ -264,18 +258,15 @@ def add_event(self, payload, payload_type): Payload types accepted by telemetry/proxy v2: app-started, app-closing, app-integrations-change """ if self.enable(): - event = { - "tracer_time": int(time.time()), - "runtime_id": get_runtime_id(), - "api_version": "v2", - "seq_id": next(self._sequence_payloads), - "debug": self._debug, - "application": get_application(config.SERVICE, config.VERSION, config.ENV), - "host": get_host_info(), - "payload": payload, - "request_type": payload_type, - } - self._events_queue.append(event) + self._events_queue.append({"payload": payload, "request_type": payload_type}) + + def add_events(self, events): + # type: (List[Dict[str, Any]]) -> None + """ + Adds a list of Telemetry events to the TelemetryWriter event buffer + """ + if self.enable(): + self._events_queue.extend(events) def add_integration(self, integration_name, patched, auto_patched=None, error_msg=None, version=""): # type: (str, bool, Optional[bool], Optional[str], Optional[str]) -> None @@ -285,6 +276,9 @@ def add_integration(self, integration_name, patched, auto_patched=None, error_ms :param str integration_name: name of patched module :param bool auto_enabled: True if module is enabled in _monkey.PATCH_MODULES """ + if not self.enable(): + return + # Integrations can be patched before the telemetry writer is enabled. with self._service_lock: if integration_name not in self._integrations_queue: @@ -300,21 +294,11 @@ def add_integration(self, integration_name, patched, auto_patched=None, error_ms self._integrations_queue[integration_name]["compatible"] = error_msg == "" self._integrations_queue[integration_name]["error"] = error_msg - def add_error(self, code, msg, filename, line_number): - # type: (int, str, Optional[str], Optional[int]) -> None - """Add an error to be submitted with an event. - Note that this overwrites any previously set errors. - """ - if filename and line_number is not None: - msg = "%s:%s: %s" % (filename, line_number, msg) - self._error = (code, msg) - - def _app_started(self, register_app_shutdown=True): - # type: (bool) -> None + def _app_started(self, register_app_shutdown: bool = True) -> Optional[Dict[str, Any]]: """Sent when TelemetryWriter is enabled or forks""" if self._forked or self.started: # app-started events should only be sent by the main process - return + return None # List of configurations to be collected self.started = True @@ -329,10 +313,6 @@ def _app_started(self, register_app_shutdown=True): payload = { "configuration": self._flush_configuration_queue(), - "error": { - "code": self._error[0], - "message": self._error[1], - }, "products": products, } # type: Dict[str, Union[Dict[str, Any], List[Any]]] # Add time to value telemetry metrics for single step instrumentation @@ -343,40 +323,42 @@ def _app_started(self, register_app_shutdown=True): "install_time": config.INSTALL_TIME, } - # Reset the error after it has been reported. - self._error = (0, "") - self.add_event(payload, "app-started") + return {"payload": payload, "request_type": "app-started"} def _app_heartbeat_event(self): - # type: () -> None + # type: () -> Dict[str, Any] if config.DEPENDENCY_COLLECTION and time.monotonic() - self._extended_time > self._extended_heartbeat_interval: self._extended_time += self._extended_heartbeat_interval - self._app_dependencies_loaded_event() + # Extended heartbeat event must be queued after the dependencies loaded event. + # Otherwise, self._imported_dependencies will not be up to date. payload = { "dependencies": [ {"name": name, "version": version} for name, version in self._imported_dependencies.items() ] } - self.add_event(payload, "app-extended-heartbeat") + request_type = "app-extended-heartbeat" else: - self.add_event({}, "app-heartbeat") + payload = {} + request_type = "app-heartbeat" + return {"payload": payload, "request_type": request_type} def _app_closing_event(self): - # type: () -> None + # type: () -> Optional[Dict[str, Any]] """Adds a Telemetry event which notifies the agent that an application instance has terminated""" if self._forked: # app-closing event should only be sent by the main process - return - payload = {} # type: Dict - self.add_event(payload, "app-closing") + return None + return {"payload": {}, "request_type": "app-closing"} def _app_integrations_changed_event(self, integrations): - # type: (List[Dict]) -> None + # type: (List[Dict]) -> Dict[str, Any] """Adds a Telemetry event which sends a list of configured integrations to the agent""" - payload = { - "integrations": integrations, + return { + "payload": { + "integrations": integrations, + }, + "request_type": "app-integrations-change", } - self.add_event(payload, "app-integrations-change") def _flush_integrations_queue(self): # type: () -> List[Dict] @@ -395,61 +377,61 @@ def _flush_configuration_queue(self): return configurations def _app_client_configuration_changed_event(self, configurations): - # type: (List[Dict]) -> None + # type: (List[Dict]) -> Dict[str, Any] """Adds a Telemetry event which sends list of modified configurations to the agent""" - payload = { - "configuration": configurations, + return { + "payload": { + "configuration": configurations, + }, + "request_type": "app-client-configuration-change", } - self.add_event(payload, "app-client-configuration-change") - def _app_dependencies_loaded_event(self): + def _app_dependencies_loaded_event(self) -> Optional[Dict[str, Any]]: """Adds events to report imports done since the last periodic run""" - if not config.DEPENDENCY_COLLECTION or not self._enabled: - return + return None with self._service_lock: newly_imported_deps = modules.get_newly_imported_modules(self._modules_already_imported) if not newly_imported_deps: - return + return None with self._service_lock: - packages = update_imported_dependencies(self._imported_dependencies, newly_imported_deps) - - if packages: - payload = {"dependencies": packages} - self.add_event(payload, "app-dependencies-loaded") + if packages := update_imported_dependencies(self._imported_dependencies, newly_imported_deps): + return {"payload": {"dependencies": packages}, "request_type": "app-dependencies-loaded"} + return None - def _add_endpoints_event(self): + def _flush_app_endpoints(self) -> Optional[Dict[str, Any]]: """Adds a Telemetry event which sends the list of HTTP endpoints found at startup to the agent""" import ddtrace.settings.asm as asm_config_module if not asm_config_module.config._api_security_endpoint_collection or not self._enabled: - return + return None if not endpoint_collection.endpoints: - return + return None with self._service_lock: payload = endpoint_collection.flush(asm_config_module.config._api_security_endpoint_collection_limit) - - self.add_event(payload, "app-endpoints") + return {"payload": payload, "request_type": "app-endpoints"} def _app_product_change(self): - # type: () -> None + # type: () -> Optional[Dict[str, Any]] """Adds a Telemetry event which reports the enablement of an APM product""" if not self._send_product_change_updates: - return + return None - payload = { - "products": { - product: {"version": tracer_version, "enabled": status} - for product, status in self._product_enablement.items() - } - } - self.add_event(payload, "app-product-change") self._send_product_change_updates = False + return { + "payload": { + "products": { + product: {"version": tracer_version, "enabled": status} + for product, status in self._product_enablement.items() + } + }, + "request_type": "app-product-change", + } def product_activated(self, product, enabled): # type: (str, bool) -> None @@ -502,7 +484,7 @@ def add_configurations(self, configuration_list: List[Tuple[str, str, str]]): def add_log(self, level, message, stack_trace="", tags=None): """ - Queues log. This event is meant to send library logs to Datadog’s backend through the Telemetry intake. + Queues log. This event is meant to send library logs to Datadog's backend through the Telemetry intake. This will make support cycles easier and ensure we know about potentially silent issues in libraries. """ if tags is None: @@ -523,42 +505,52 @@ def add_log(self, level, message, stack_trace="", tags=None): # Logs are hashed using the message, level, tags, and stack_trace. This should prevent duplicatation. self._logs.add(data) - def add_integration_error_log(self, msg: str, exc: BaseException) -> None: + def add_error_log(self, msg: str, exc: Union[BaseException, tuple, None]) -> None: if config.LOG_COLLECTION_ENABLED: - stack_trace = self._format_stack_trace(exc) + stack_trace = None if exc is None else self._format_stack_trace(exc) + self.add_log( TELEMETRY_LOG_LEVEL.ERROR, msg, stack_trace=stack_trace if stack_trace is not None else "", ) - def _format_stack_trace(self, exc: BaseException) -> Optional[str]: - exc_type, exc_value, exc_traceback = type(exc), exc, exc.__traceback__ - if exc_traceback: - tb = traceback.extract_tb(exc_traceback) - formatted_tb = ["Traceback (most recent call last):"] - for filename, lineno, funcname, srcline in tb: - if self._should_redact(filename): - formatted_tb.append(" ") - formatted_tb.append(" ") - else: - relative_filename = self._format_file_path(filename) - formatted_line = f' File "{relative_filename}", line {lineno}, in {funcname}\n {srcline}' - formatted_tb.append(formatted_line) - if exc_type: - formatted_tb.append(f"{exc_type.__module__}.{exc_type.__name__}: {exc_value}") - return "\n".join(formatted_tb) + def _format_stack_trace(self, exc: Union[BaseException, tuple]) -> Optional[str]: + if isinstance(exc, tuple) and len(exc) == 3: + exc_type, _, exc_traceback = exc + else: + exc_type, _, exc_traceback = type(exc), exc, getattr(exc, "__traceback__", None) - return None + if not exc_traceback: + return None - def _should_redact(self, filename: str) -> bool: - return "ddtrace" not in filename + tb = traceback.extract_tb(exc_traceback) + formatted_tb = ["Traceback (most recent call last):"] + for filename, lineno, funcname, srcline in tb: + if is_user_code(filename): + formatted_tb.append(" ") + formatted_tb.append(" ") + else: + relative_filename = self._format_file_path(filename) + formatted_line = f' File "{relative_filename}", line {lineno}, in {funcname}\n {srcline}' + formatted_tb.append(formatted_line) + if exc_type: + formatted_tb.append(f"{exc_type.__module__}.{exc_type.__name__}: ") + return "\n".join(formatted_tb) def _format_file_path(self, filename: str) -> str: try: - return os.path.relpath(filename, start=self.CWD) + if "site-packages" in filename: + return filename.split("site-packages", 1)[1].lstrip("/") + elif "lib/python" in filename: + return ( + filename.split("lib/python", 1)[1].split("/", 1)[1] + if "/" in filename.split("lib/python", 1)[1] + else "python_stdlib" + ) + return "" except ValueError: - return filename + return "" def add_gauge_metric( self, namespace: TELEMETRY_NAMESPACE, name: str, value: float, tags: Optional[MetricTagType] = None @@ -627,24 +619,27 @@ def _flush_log_metrics(self): self._logs = set() return log_metrics - def _generate_metrics_event(self, namespace_metrics) -> None: + def _generate_metrics_events(self, namespace_metrics): + # type: (Dict[str, Dict[str, List[Dict[str, Any]]]]) -> List[Dict[str, Any]] + metric_payloads = [] for payload_type, namespaces in namespace_metrics.items(): for namespace, metrics in namespaces.items(): if metrics: - payload = { - "namespace": namespace, - "series": metrics, - } - log.debug("%s request payload, namespace %s", payload_type, namespace) - if payload_type == TELEMETRY_TYPE_DISTRIBUTION: - self.add_event(payload, TELEMETRY_TYPE_DISTRIBUTION) - elif payload_type == TELEMETRY_TYPE_GENERATE_METRICS: - self.add_event(payload, TELEMETRY_TYPE_GENERATE_METRICS) + metric_payloads.append( + { + "payload": { + "namespace": namespace, + "series": metrics, + }, + "request_type": payload_type, + } + ) + return metric_payloads def _generate_logs_event(self, logs): - # type: (Set[Dict[str, str]]) -> None + # type: (Set[Dict[str, str]]) -> Dict[str, Any] log.debug("%s request payload", TELEMETRY_TYPE_LOGS) - self.add_event({"logs": list(logs)}, TELEMETRY_TYPE_LOGS) + return {"payload": {"logs": list(logs)}, "request_type": TELEMETRY_TYPE_LOGS} def _dispatch(self): # moved core here to avoid circular import @@ -653,47 +648,97 @@ def _dispatch(self): core.dispatch("telemetry.periodic") def periodic(self, force_flush=False, shutting_down=False): - # ensure app_started is called at least once in case traces weren't flushed - self._app_started() - self._app_product_change() - self._dispatch() - - namespace_metrics = self._namespace.flush(float(self.interval)) - if namespace_metrics: - self._generate_metrics_event(namespace_metrics) + """Process and send telemetry events in batches. + + This method handles the periodic collection and sending of telemetry data with two main timing intervals: + 1. Metrics collection interval (10 seconds by default): Collects metrics and logs + 2. Heartbeat interval (60 seconds by default): Sends all collected data to the telemetry endpoint + + The method follows this flow: + 1. Collects metrics and logs that have accumulated since last collection + 2. If not at heartbeat interval and not force_flush: + - Queues the metrics and logs for future sending + - Returns early + 3. At heartbeat interval or force_flush: + - Collects app status (started, product changes) + - Collects integration changes + - Collects configuration changes + - Collects dependency changes + - Collects stored events (ex: metrics and logs) + - Sends everything as a single batch + + Args: + force_flush: If True, bypasses the heartbeat interval check and sends immediately + shutting_down: If True, includes app-closing event in the batch + + Note: + - Metrics are collected every 10 seconds to ensure accurate time-based data + - All data is sent in a single batch every 60 seconds to minimize network overhead + - A heartbeat event is always included to keep RC connections alive + """ + # Collect metrics and logs that have accumulated since last batch + events = [] + if namespace_metrics := self._namespace.flush(float(self.interval)): + if metrics_events := self._generate_metrics_events(namespace_metrics): + events.extend(metrics_events) - logs_metrics = self._flush_log_metrics() - if logs_metrics: - self._generate_logs_event(logs_metrics) + if logs_metrics := self._flush_log_metrics(): + events.append(self._generate_logs_event(logs_metrics)) - # Telemetry metrics and logs should be aggregated into payloads every time periodic is called. - # This ensures metrics and logs are submitted in 10 second time buckets. + # Queue metrics if not at heartbeat interval if self._is_periodic and force_flush is False: if self._periodic_count < self._periodic_threshold: self._periodic_count += 1 + if events: + self.add_events(events) return self._periodic_count = 0 - integrations = self._flush_integrations_queue() - if integrations: - self._app_integrations_changed_event(integrations) + # At heartbeat interval, collect and send all telemetry data + if app_started := self._app_started(): + # app-started should be the first event in the batch + events = [app_started] + events + + if app_product_change := self._app_product_change(): + events.append(app_product_change) - configurations = self._flush_configuration_queue() - if configurations: - self._app_client_configuration_changed_event(configurations) + if integrations := self._flush_integrations_queue(): + events.append(self._app_integrations_changed_event(integrations)) - self._app_dependencies_loaded_event() - self._add_endpoints_event() + if endpoints_payload := self._flush_app_endpoints(): + events.append(endpoints_payload) - if shutting_down: - self._app_closing_event() + if configurations := self._flush_configuration_queue(): + events.append(self._app_client_configuration_changed_event(configurations)) - # Send a heartbeat event to the agent, this is required to keep RC connections alive - self._app_heartbeat_event() + if app_dependencies_loaded := self._app_dependencies_loaded_event(): + events.append(app_dependencies_loaded) - telemetry_events = self._flush_events_queue() - for telemetry_event in telemetry_events: - self._client.send_event(telemetry_event) + if shutting_down and (app_closing := self._app_closing_event()): + events.append(app_closing) + + # Always include a heartbeat to keep RC connections alive + events.append(self._app_heartbeat_event()) + + # Get any queued events and combine with current batch + if queued_events := self._flush_events_queue(): + events.extend(queued_events) + + log.debug("Encoding instrumentation telemetry events: %s", ", ".join([e["request_type"] for e in events])) + # Prepare and send the final batch + batch_event = { + "tracer_time": int(time.time()), + "runtime_id": get_runtime_id(), + "api_version": "v2", + "seq_id": next(self._sequence_payloads), + "debug": self._debug, + "application": get_application(config.SERVICE, config.VERSION, config.ENV), + "host": get_host_info(), + "payload": events, + "request_type": "message-batch", + } + self._dispatch() + self._client.send_event(batch_event) def app_shutdown(self): if self.started: @@ -751,7 +796,9 @@ def _telemetry_excepthook(self, tp, value, root_traceback): lineno = traceback.tb_frame.f_code.co_firstlineno filename = traceback.tb_frame.f_code.co_filename - self.add_error(1, str(value), filename, lineno) + + if "ddtrace/" in filename: + self.add_error_log("Unhandled exception from ddtrace code", (tp, None, root_traceback)) dir_parts = filename.split(os.path.sep) # Check if exception was raised in the `ddtrace.contrib` package @@ -776,8 +823,8 @@ def _telemetry_excepthook(self, tp, value, root_traceback): error_msg = "{}:{} {}".format(filename, lineno, str(value)) self.add_integration(integration_name, True, error_msg=error_msg) - if self._enabled and not self.started: - self._app_started(False) + if self._enabled and not self.started and (app_started := self._app_started()): + self._events_queue.append(app_started) self.app_shutdown() diff --git a/ddtrace/internal/utils/inspection.py b/ddtrace/internal/utils/inspection.py index 603b72f0135..a8a3ca28e82 100644 --- a/ddtrace/internal/utils/inspection.py +++ b/ddtrace/internal/utils/inspection.py @@ -13,6 +13,7 @@ from typing import cast from ddtrace.internal.safety import _isinstance +from ddtrace.internal.utils.cache import cached @singledispatch @@ -31,6 +32,15 @@ def _(f: FunctionType) -> Set[int]: return linenos(f.__code__) +@cached(maxsize=4 << 10) +def _filename_to_resolved_path(filename: str) -> Path: + return Path(filename).resolve() + + +def resolved_code_origin(code: CodeType) -> Path: + return _filename_to_resolved_path(code.co_filename) + + def undecorated(f: FunctionType, name: str, path: Path) -> FunctionType: # Find the original function object from a decorated function. We use the # expected function name to guide the search and pick the correct function. @@ -38,7 +48,7 @@ def undecorated(f: FunctionType, name: str, path: Path) -> FunctionType: # to find the function as soon as possible. def match(g): - return g.__code__.co_name == name and Path(g.__code__.co_filename).resolve() == path + return g.__code__.co_name == name and resolved_code_origin(g.__code__) == path if _isinstance(f, FunctionType) and match(f): return f diff --git a/ddtrace/internal/uwsgi.py b/ddtrace/internal/uwsgi.py index d0bdf3aa724..4885f30f29b 100644 --- a/ddtrace/internal/uwsgi.py +++ b/ddtrace/internal/uwsgi.py @@ -11,6 +11,15 @@ class uWSGIConfigError(Exception): """ +class uWSGIConfigDeprecationWarning(DeprecationWarning): + """uWSGI configuration deprecation warning. + + This is raised when uwsgi configuration is incompatible with the library, + and future versions of the library plans to raise an error and stop + supporting the configuration. + """ + + class uWSGIMasterProcess(Exception): """The process is uWSGI master process.""" @@ -33,6 +42,17 @@ def check_uwsgi(worker_callback: Optional[Callable] = None, atexit: Optional[Cal msg = "enable-threads option must be set to true, or a positive number of threads must be set" raise uWSGIConfigError(msg) + if ( + hasattr(uwsgi, "version_info") + and uwsgi.version_info < (2, 0, 30) + and (uwsgi.opt.get("lazy-apps") or uwsgi.opt.get("lazy")) + and not uwsgi.opt.get("skip-atexit") + ): + msg = "skip-atexit option must be set when lazy-apps or lazy is set for \ + uwsgi<2.0.30, see https://github.com/unbit/uwsgi/pull/2726. We plan \ + to raise an error in ddtrace 4.x release." + raise uWSGIConfigDeprecationWarning(msg) + # If uwsgi has more than one process, it is running in prefork operational mode: uwsgi is going to fork multiple # sub-processes. # If lazy-app is enabled, then the app is loaded in each subprocess independently. This is fine. diff --git a/ddtrace/internal/wrapping/asyncs.py b/ddtrace/internal/wrapping/asyncs.py index 66341657ca4..e6288eb9175 100644 --- a/ddtrace/internal/wrapping/asyncs.py +++ b/ddtrace/internal/wrapping/asyncs.py @@ -34,7 +34,133 @@ ASYNC_GEN_ASSEMBLY = Assembly() ASYNC_HEAD_ASSEMBLY = None -if PY >= (3, 12): +if PY >= (3, 14): + ASYNC_HEAD_ASSEMBLY = Assembly() + ASYNC_HEAD_ASSEMBLY.parse( + r""" + return_generator + pop_top + """ + ) + + COROUTINE_ASSEMBLY.parse( + r""" + get_awaitable 0 + load_const None + + presend: + send @send + yield_value 2 + resume 3 + jump_backward_no_interrupt @presend + send: + end_send + """ + ) + + ASYNC_GEN_ASSEMBLY.parse( + r""" + try @stopiter + copy 1 + store_fast $__ddgen + load_attr (False, 'asend') + store_fast $__ddgensend + load_fast $__ddgen + load_attr (True, '__anext__') + call 0 + + loop: + get_awaitable 0 + load_const None + presend0: + send @send0 + tried + + try @genexit lasti + yield_value 3 + resume 3 + jump_backward_no_interrupt @loop + send0: + end_send + + yield: + call_intrinsic_1 asm.Intrinsic1Op.INTRINSIC_ASYNC_GEN_WRAP + yield_value 3 + resume 1 + push_null + swap 2 + load_fast $__ddgensend + swap 2 + call 1 + jump_backward @loop + tried + + genexit: + try @stopiter + push_exc_info + load_const GeneratorExit + check_exc_match + pop_jump_if_false @exc + pop_top + load_fast $__ddgen + load_attr (True, 'aclose') + call 0 + get_awaitable 0 + load_const None + + presend1: + send @send1 + yield_value 4 + resume 3 + jump_backward_no_interrupt @presend1 + send1: + end_send + pop_top + pop_except + load_const None + return_value + + exc: + pop_top + push_null + load_fast $__ddgen + load_attr (False, 'athrow') + push_null + load_const sys.exc_info + call 0 + call_function_ex + get_awaitable 0 + load_const None + + presend2: + send @send2 + yield_value 4 + resume 3 + jump_backward_no_interrupt @presend2 + send2: + end_send + swap 2 + pop_except + jump_backward @yield + tried + + stopiter: + push_exc_info + load_const StopAsyncIteration + check_exc_match + pop_jump_if_false @propagate + pop_top + pop_except + load_const None + return_value + + propagate: + reraise 0 + """ + ) + + +elif PY >= (3, 12): ASYNC_HEAD_ASSEMBLY = Assembly() ASYNC_HEAD_ASSEMBLY.parse( r""" diff --git a/ddtrace/internal/wrapping/context.py b/ddtrace/internal/wrapping/context.py index 87ce3430855..2e5be4b1013 100644 --- a/ddtrace/internal/wrapping/context.py +++ b/ddtrace/internal/wrapping/context.py @@ -67,8 +67,8 @@ CONTEXT_RETURN = Assembly() CONTEXT_FOOT = Assembly() -if sys.version_info >= (3, 14): - raise NotImplementedError("Python >= 3.14 is not supported yet") +if sys.version_info >= (3, 15): + raise NotImplementedError("Python >= 3.15 is not supported yet") elif sys.version_info >= (3, 13): CONTEXT_HEAD.parse( r""" diff --git a/ddtrace/internal/wrapping/generators.py b/ddtrace/internal/wrapping/generators.py index 91cbed49962..37b762b64e5 100644 --- a/ddtrace/internal/wrapping/generators.py +++ b/ddtrace/internal/wrapping/generators.py @@ -30,7 +30,85 @@ GENERATOR_ASSEMBLY = Assembly() GENERATOR_HEAD_ASSEMBLY = None -if PY >= (3, 12): +if PY >= (3, 14): + GENERATOR_HEAD_ASSEMBLY = Assembly() + GENERATOR_HEAD_ASSEMBLY.parse( + r""" + return_generator + pop_top + """ + ) + + GENERATOR_ASSEMBLY.parse( + r""" + try @stopiter + copy 1 + store_fast $__ddgen + load_attr $send + store_fast $__ddgensend + push_null + load_const next + load_fast $__ddgen + + loop: + call 1 + tried + + yield: + try @genexit lasti + yield_value 3 + resume 1 + push_null + swap 2 + load_fast $__ddgensend + swap 2 + jump_backward @loop + tried + + genexit: + try @stopiter + push_exc_info + load_const GeneratorExit + check_exc_match + pop_jump_if_false @exc + pop_top + load_fast $__ddgen + load_method $close + call 0 + swap 2 + pop_except + return_value + + exc: + pop_top + push_null + load_fast $__ddgen + load_attr $throw + push_null + load_const sys.exc_info + call 0 + call_function_ex + swap 2 + pop_except + jump_backward @yield + tried + + stopiter: + push_exc_info + load_const StopIteration + check_exc_match + pop_jump_if_false @propagate + pop_top + pop_except + load_const None + return_value + + propagate: + reraise 0 + """ + ) + +elif PY >= (3, 12): GENERATOR_HEAD_ASSEMBLY = Assembly() GENERATOR_HEAD_ASSEMBLY.parse( r""" diff --git a/ddtrace/internal/writer/writer.py b/ddtrace/internal/writer/writer.py index eddf6538859..8267bc88866 100644 --- a/ddtrace/internal/writer/writer.py +++ b/ddtrace/internal/writer/writer.py @@ -15,6 +15,7 @@ import ddtrace from ddtrace import config +from ddtrace.internal.hostname import get_hostname import ddtrace.internal.native as native from ddtrace.internal.runtime import get_runtime_id import ddtrace.internal.utils.http @@ -329,7 +330,7 @@ def _send_payload(self, payload: bytes, count: int, client: WriterClientBase) -> else: log_args += (payload,) - log.error(msg, *log_args) + log.error(msg, *log_args, extra={"send_to_telemetry": False}) self._metrics_dist("http.dropped.bytes", len(payload)) self._metrics_dist("http.dropped.traces", count) return response @@ -796,6 +797,7 @@ def _create_exporter(self) -> native.TraceExporter: builder = ( native.TraceExporterBuilder() .set_url(self.intake_url) + .set_hostname(get_hostname()) .set_language("python") .set_language_version(compat.PYTHON_VERSION) .set_language_interpreter(compat.PYTHON_INTERPRETER) @@ -805,6 +807,12 @@ def _create_exporter(self) -> native.TraceExporter: .set_input_format(self._api_version) .set_output_format(self._api_version) ) + if config.service: + builder.set_service(config.service) + if config.env: + builder.set_env(config.env) + if config.version: + builder.set_app_version(config.version) if self._test_session_token is not None: builder.set_test_session_token(self._test_session_token) if self._stats_opt_out: @@ -820,6 +828,8 @@ def _create_exporter(self) -> native.TraceExporter: config._telemetry_heartbeat_interval * 1000 ) # Convert DD_TELEMETRY_HEARTBEAT_INTERVAL to milliseconds builder.enable_telemetry(heartbeat_ms, get_runtime_id()) + if config._health_metrics_enabled: + builder.enable_health_metrics() return builder.build() @@ -1042,7 +1052,7 @@ def _flush_single_payload( msg += ", payload %s" log_args += (binascii.hexlify(encoded).decode(),) # type: ignore - log.error(msg, *log_args) + log.error(msg, *log_args, extra={"send_to_telemetry": False}) def periodic(self): self.flush_queue(raise_exc=False) diff --git a/ddtrace/llmobs/_constants.py b/ddtrace/llmobs/_constants.py index dfb15b9da05..54ec9b7e263 100644 --- a/ddtrace/llmobs/_constants.py +++ b/ddtrace/llmobs/_constants.py @@ -86,6 +86,10 @@ SPAN_LINKS = "_ml_obs.span_links" NAME = "_ml_obs.name" + +# Prompt constants +DEFAULT_PROMPT_NAME = "unnamed-prompt" + DECORATOR = "_ml_obs.decorator" INTEGRATION = "_ml_obs.integration" diff --git a/ddtrace/llmobs/_experiment.py b/ddtrace/llmobs/_experiment.py index ea5e91e51d7..7bfb80c0ea5 100644 --- a/ddtrace/llmobs/_experiment.py +++ b/ddtrace/llmobs/_experiment.py @@ -82,7 +82,7 @@ class EvaluationResult(TypedDict): evaluations: Dict[str, Dict[str, JSONType]] -class ExperimentResult(TypedDict): +class ExperimentRowResult(TypedDict): idx: int record_id: Optional[str] span_id: str @@ -96,6 +96,11 @@ class ExperimentResult(TypedDict): error: Dict[str, Optional[str]] +class ExperimentResult(TypedDict): + summary_evaluations: Dict[str, Dict[str, JSONType]] + rows: List[ExperimentRowResult] + + class Dataset: name: str description: str @@ -304,11 +309,19 @@ def __init__( tags: Optional[Dict[str, str]] = None, config: Optional[ExperimentConfigType] = None, _llmobs_instance: Optional["LLMObs"] = None, + summary_evaluators: Optional[ + List[ + Callable[ + [List[DatasetRecordInputType], List[JSONType], List[JSONType], Dict[str, List[JSONType]]], JSONType + ] + ] + ] = None, ) -> None: self.name = name self._task = task self._dataset = dataset self._evaluators = evaluators + self._summary_evaluators = summary_evaluators or [] self._description = description self._tags: Dict[str, str] = tags or {} self._tags["ddtrace.version"] = str(ddtrace.__version__) @@ -327,21 +340,12 @@ def __init__( self._id: Optional[str] = None self._run_name: Optional[str] = None - def run( - self, jobs: int = 1, raise_errors: bool = False, sample_size: Optional[int] = None - ) -> List[ExperimentResult]: - if not self._llmobs_instance: + def run(self, jobs: int = 1, raise_errors: bool = False, sample_size: Optional[int] = None) -> ExperimentResult: + if not self._llmobs_instance or not self._llmobs_instance.enabled: raise ValueError( "LLMObs is not enabled. Ensure LLM Observability is enabled via `LLMObs.enable(...)` " "and create the experiment via `LLMObs.experiment(...)` before running the experiment." ) - if not self._llmobs_instance.enabled: - logger.warning( - "Skipping experiment as LLMObs is not enabled. " - "Ensure LLM Observability is enabled via `LLMObs.enable(...)` " - "or set `DD_LLMOBS_ENABLED=1` and use `ddtrace-run` to run your application." - ) - return [] project = self._llmobs_instance._dne_client.project_create_or_get(self._project_name) self._project_id = project.get("_id", "") @@ -360,11 +364,13 @@ def run( self._run_name = experiment_run_name task_results = self._run_task(jobs, raise_errors, sample_size) evaluations = self._run_evaluators(task_results, raise_errors=raise_errors) - experiment_results = self._merge_results(task_results, evaluations) + summary_evals = self._run_summary_evaluators(task_results, evaluations, raise_errors) + experiment_results = self._merge_results(task_results, evaluations, summary_evals) experiment_evals = self._generate_metrics_from_exp_results(experiment_results) self._llmobs_instance._dne_client.experiment_eval_post( self._id, experiment_evals, convert_tags_dict_to_list(self._tags) ) + return experiment_results @property @@ -476,9 +482,56 @@ def _run_evaluators(self, task_results: List[TaskResult], raise_errors: bool = F evaluations.append(evaluation) return evaluations + def _run_summary_evaluators( + self, task_results: List[TaskResult], eval_results: List[EvaluationResult], raise_errors: bool = False + ) -> List[EvaluationResult]: + evaluations: List[EvaluationResult] = [] + inputs: List[DatasetRecordInputType] = [] + outputs: List[JSONType] = [] + expected_outputs: List[JSONType] = [] + evals_dict = {} + + # name of evaluator (not summary evaluator) -> list of eval results ordered by index of the list of task results + # this is being computed so that the user can use the evaluation results in its original form + eval_results_by_name: dict[str, List[JSONType]] = {} + for idx, task_result in enumerate(task_results): + outputs.append(task_result["output"]) + record: DatasetRecord = self._dataset[idx] + inputs.append(record["input_data"]) + expected_outputs.append(record["expected_output"]) + + eval_result_at_idx_by_name = eval_results[idx]["evaluations"] + for name, eval_value in eval_result_at_idx_by_name.items(): + if name not in eval_results_by_name: + eval_results_by_name[name] = [] + + eval_results_by_name[name].append(eval_value.get("value")) + + for idx, summary_evaluator in enumerate(self._summary_evaluators): + eval_result: JSONType = None + eval_err: JSONType = None + + try: + eval_result = summary_evaluator(inputs, outputs, expected_outputs, eval_results_by_name) + except Exception as e: + exc_type, exc_value, exc_tb = sys.exc_info() + exc_type_name = type(e).__name__ if exc_type is not None else "Unknown Exception" + exc_stack = "".join(traceback.format_exception(exc_type, exc_value, exc_tb)) + eval_err = {"message": str(exc_value), "type": exc_type_name, "stack": exc_stack} + if raise_errors: + raise RuntimeError(f"Summary evaluator {summary_evaluator.__name__} failed") from e + evals_dict[summary_evaluator.__name__] = {"value": eval_result, "error": eval_err} + evaluation: EvaluationResult = {"idx": idx, "evaluations": evals_dict} + evaluations.append(evaluation) + + return evaluations + def _merge_results( - self, task_results: List[TaskResult], evaluations: List[EvaluationResult] - ) -> List[ExperimentResult]: + self, + task_results: List[TaskResult], + evaluations: List[EvaluationResult], + summary_evaluations: Optional[List[EvaluationResult]], + ) -> ExperimentResult: experiment_results = [] for idx, task_result in enumerate(task_results): output_data = task_result["output"] @@ -486,7 +539,7 @@ def _merge_results( metadata.update(task_result.get("metadata") or {}) record: DatasetRecord = self._dataset[idx] evals = evaluations[idx]["evaluations"] - exp_result: ExperimentResult = { + exp_result: ExperimentRowResult = { "idx": idx, "span_id": task_result.get("span_id", ""), "trace_id": task_result.get("trace_id", ""), @@ -500,10 +553,28 @@ def _merge_results( "error": task_result["error"], } experiment_results.append(exp_result) - return experiment_results + + summary_evals: Dict[str, Dict[str, JSONType]] = {} + if summary_evaluations: + for summary_evaluation in summary_evaluations: + for name, eval_data in summary_evaluation["evaluations"].items(): + summary_evals[name] = eval_data + + result: ExperimentResult = { + "summary_evaluations": summary_evals, + "rows": experiment_results, + } + return result def _generate_metric_from_evaluation( - self, eval_name: str, eval_value: JSONType, err: JSONType, span_id: str, trace_id: str, timestamp_ns: int + self, + eval_name: str, + eval_value: JSONType, + err: JSONType, + span_id: str, + trace_id: str, + timestamp_ns: int, + source: str = "custom", ) -> "LLMObsExperimentEvalMetricEvent": metric_type = None if eval_value is None: @@ -516,6 +587,7 @@ def _generate_metric_from_evaluation( metric_type = "categorical" eval_value = str(eval_value).lower() return { + "metric_source": source, "span_id": span_id, "trace_id": trace_id, "timestamp_ms": int(timestamp_ns / 1e6), @@ -528,14 +600,18 @@ def _generate_metric_from_evaluation( } def _generate_metrics_from_exp_results( - self, experiment_results: List[ExperimentResult] + self, experiment_result: ExperimentResult ) -> List["LLMObsExperimentEvalMetricEvent"]: eval_metrics = [] - for exp_result in experiment_results: + latest_timestamp: int = 0 + for exp_result in experiment_result["rows"]: evaluations = exp_result.get("evaluations") or {} span_id = exp_result.get("span_id", "") trace_id = exp_result.get("trace_id", "") timestamp_ns = cast(int, exp_result.get("timestamp", 0)) + if timestamp_ns > latest_timestamp: + latest_timestamp = timestamp_ns + for eval_name, eval_data in evaluations.items(): if not eval_data: continue @@ -544,6 +620,20 @@ def _generate_metrics_from_exp_results( eval_name, eval_value, eval_data.get("error"), span_id, trace_id, timestamp_ns ) eval_metrics.append(eval_metric) + + for name, summary_eval_data in experiment_result.get("summary_evaluations", {}).items(): + if not summary_eval_data: + continue + eval_metric = self._generate_metric_from_evaluation( + name, + summary_eval_data.get("value"), + summary_eval_data.get("error"), + "", + "", + latest_timestamp, + source="summary", + ) + eval_metrics.append(eval_metric) return eval_metrics diff --git a/ddtrace/llmobs/_integrations/__init__.py b/ddtrace/llmobs/_integrations/__init__.py index 93ad8a9a62d..c79c6033ddb 100644 --- a/ddtrace/llmobs/_integrations/__init__.py +++ b/ddtrace/llmobs/_integrations/__init__.py @@ -2,6 +2,7 @@ from .base import BaseLLMIntegration from .bedrock import BedrockIntegration from .gemini import GeminiIntegration +from .google_adk import GoogleAdkIntegration from .google_genai import GoogleGenAIIntegration from .langchain import LangChainIntegration from .litellm import LiteLLMIntegration @@ -15,6 +16,7 @@ "BaseLLMIntegration", "BedrockIntegration", "GeminiIntegration", + "GoogleAdkIntegration", "GoogleGenAIIntegration", "LangChainIntegration", "LiteLLMIntegration", diff --git a/ddtrace/llmobs/_integrations/anthropic.py b/ddtrace/llmobs/_integrations/anthropic.py index 17b89fe00fb..ac506bcd97c 100644 --- a/ddtrace/llmobs/_integrations/anthropic.py +++ b/ddtrace/llmobs/_integrations/anthropic.py @@ -24,9 +24,10 @@ from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._integrations.utils import update_proxy_workflow_input_output_value from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs.utils import ToolCall -from ddtrace.llmobs.utils import ToolDefinition -from ddtrace.llmobs.utils import ToolResult +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolCall +from ddtrace.llmobs.types import ToolDefinition +from ddtrace.llmobs.types import ToolResult from ddtrace.trace import Span @@ -69,9 +70,9 @@ def _llmobs_set_tags( span._set_ctx_item(TOOL_DEFINITIONS, tools) messages = kwargs.get("messages") system_prompt = kwargs.get("system") - input_messages = self._extract_input_message(messages, system_prompt) + input_messages = self._extract_input_message(list(messages) if messages else [], system_prompt) - output_messages = [{"content": ""}] + output_messages: List[Message] = [Message(content="")] if not span.error and response is not None: output_messages = self._extract_output_message(response) span_kind = "workflow" if span._get_ctx_item(PROXY_REQUEST) else "llm" @@ -92,14 +93,16 @@ def _llmobs_set_tags( ) update_proxy_workflow_input_output_value(span, span_kind) - def _extract_input_message(self, messages, system_prompt: Optional[Union[str, List[Dict[str, Any]]]] = None): + def _extract_input_message( + self, messages: List[Dict[str, Any]], system_prompt: Optional[Union[str, List[Dict[str, Any]]]] = None + ) -> List[Message]: """Extract input messages from the stored prompt. Anthropic allows for messages and multiple texts in a message, which requires some special casing. """ if not isinstance(messages, Iterable): log.warning("Anthropic input must be a list of messages.") - input_messages = [] + input_messages: List[Message] = [] if system_prompt is not None: messages = [{"content": system_prompt, "role": "system"}] + messages @@ -115,16 +118,16 @@ def _extract_input_message(self, messages, system_prompt: Optional[Union[str, Li log.warning("Anthropic input message must have content and role.") if isinstance(content, str): - input_messages.append({"content": content, "role": role}) + input_messages.append(Message(content=content, role=str(role))) elif isinstance(content, list): for block in content: if _get_attr(block, "type", None) == "text": - input_messages.append({"content": _get_attr(block, "text", ""), "role": role}) + input_messages.append(Message(content=str(_get_attr(block, "text", "")), role=str(role))) elif _get_attr(block, "type", None) == "image": # Store a placeholder for potentially enormous binary image data. - input_messages.append({"content": "([IMAGE DETECTED])", "role": role}) + input_messages.append(Message(content="([IMAGE DETECTED])", role=str(role))) elif _get_attr(block, "type", None) == "tool_use": text = _get_attr(block, "text", None) @@ -132,26 +135,26 @@ def _extract_input_message(self, messages, system_prompt: Optional[Union[str, Li if isinstance(input_data, str): input_data = json.loads(input_data) tool_call_info = ToolCall( - name=_get_attr(block, "name", ""), + name=str(_get_attr(block, "name", "")), arguments=input_data, - tool_id=_get_attr(block, "id", ""), - type=_get_attr(block, "type", ""), + tool_id=str(_get_attr(block, "id", "")), + type=str(_get_attr(block, "type", "")), ) if text is None: text = "" - input_messages.append({"content": text, "role": role, "tool_calls": [tool_call_info]}) + input_messages.append(Message(content=str(text), role=str(role), tool_calls=[tool_call_info])) elif _get_attr(block, "type", None) == "tool_result": content = _get_attr(block, "content", None) formatted_content = self._format_tool_result_content(content) tool_result_info = ToolResult( result=formatted_content, - tool_id=_get_attr(block, "tool_use_id", ""), + tool_id=str(_get_attr(block, "tool_use_id", "")), type="tool_result", ) - input_messages.append({"content": "", "role": role, "tool_results": [tool_result_info]}) + input_messages.append(Message(content="", role=str(role), tool_results=[tool_result_info])) else: - input_messages.append({"content": str(block), "role": role}) + input_messages.append(Message(content=str(block), role=str(role))) return input_messages @@ -169,34 +172,33 @@ def _format_tool_result_content(self, content) -> str: return ",".join(formatted_content) return str(content) - def _extract_output_message(self, response): + def _extract_output_message(self, response) -> List[Message]: """Extract output messages from the stored response.""" - output_messages = [] + output_messages: List[Message] = [] content = _get_attr(response, "content", "") role = _get_attr(response, "role", "") if isinstance(content, str): - return [{"content": content, "role": role}] + return [Message(content=content, role=str(role))] elif isinstance(content, list): for completion in content: text = _get_attr(completion, "text", None) if isinstance(text, str): - output_messages.append({"content": text, "role": role}) - else: - if _get_attr(completion, "type", None) == "tool_use": - input_data = _get_attr(completion, "input", "") - if isinstance(input_data, str): - input_data = json.loads(input_data) - tool_call_info = ToolCall( - name=_get_attr(completion, "name", ""), - arguments=input_data, - tool_id=_get_attr(completion, "id", ""), - type=_get_attr(completion, "type", ""), - ) - if text is None: - text = "" - output_messages.append({"content": text, "role": role, "tool_calls": [tool_call_info]}) + output_messages.append(Message(content=text, role=str(role))) + elif _get_attr(completion, "type", None) == "tool_use": + input_data = _get_attr(completion, "input", "") + if isinstance(input_data, str): + input_data = json.loads(input_data) + tool_call_info = ToolCall( + name=str(_get_attr(completion, "name", "")), + arguments=input_data, + tool_id=str(_get_attr(completion, "id", "")), + type=str(_get_attr(completion, "type", "")), + ) + if text is None: + text = "" + output_messages.append(Message(content=str(text), role=str(role), tool_calls=[tool_call_info])) return output_messages def _extract_usage(self, span: Span, usage: Dict[str, Any]): diff --git a/ddtrace/llmobs/_integrations/bedrock.py b/ddtrace/llmobs/_integrations/bedrock.py index 083d19f9803..b6eb244e300 100644 --- a/ddtrace/llmobs/_integrations/bedrock.py +++ b/ddtrace/llmobs/_integrations/bedrock.py @@ -35,7 +35,8 @@ from ddtrace.llmobs._telemetry import record_bedrock_agent_span_event_created from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs._writer import LLMObsSpanEvent -from ddtrace.llmobs.utils import ToolDefinition +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolDefinition from ddtrace.trace import Span @@ -110,7 +111,7 @@ def _llmobs_set_tags( self._extract_input_message_for_converse(prompt) if is_converse else self._extract_input_message(prompt) ) - output_messages = [{"content": ""}] + output_messages: List[Message] = [Message(content="")] if not span.error and response is not None: if ctx["resource"] == "Converse": output_messages = self._extract_output_message_for_converse(response) @@ -191,7 +192,7 @@ def translate_bedrock_traces(self, traces, root_span) -> None: self._active_span_by_step_id.clear() @staticmethod - def _extract_input_message_for_converse(prompt: List[Dict[str, Any]]): + def _extract_input_message_for_converse(prompt: List[Dict[str, Any]]) -> List[Message]: """Extract input messages from the stored prompt for converse `prompt` is an array of `message` objects. Each `message` has a role and content field. @@ -203,8 +204,8 @@ def _extract_input_message_for_converse(prompt: List[Dict[str, Any]]): """ if not isinstance(prompt, list): log.warning("Bedrock input is not a list of messages or a string.") - return [{"content": ""}] - input_messages = [] + return [Message(content="")] + input_messages: List[Message] = [] for message in prompt: if not isinstance(message, dict): continue @@ -226,7 +227,7 @@ def _extract_output_message_for_converse(response: Dict[str, Any]): For more info, see bedrock converse response syntax: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html#API_runtime_Converse_ResponseSyntax """ - default_content = [{"content": ""}] + default_content: List[Message] = [Message(content="")] message = response.get("output", {}).get("message", {}) if not message: return default_content @@ -241,7 +242,7 @@ def _converse_output_stream_processor() -> ( Generator[ None, Dict[str, Any], - Tuple[List[Dict[str, Any]], Dict[str, str], Dict[str, int]], + Tuple[List[Message], Dict[str, str], Dict[str, int]], ] ): """ @@ -259,7 +260,7 @@ def _converse_output_stream_processor() -> ( """ usage_metrics: Dict[str, int] = {} metadata: Dict[str, str] = {} - messages: List[Dict[str, Any]] = [] + messages: List[Message] = [] text_content_blocks: Dict[int, str] = {} tool_content_blocks: Dict[int, Dict[str, Any]] = {} @@ -336,47 +337,48 @@ def _converse_output_stream_processor() -> ( ) if not messages: - messages.append({"role": "assistant", "content": ""}) + messages.append(Message(content="", role="assistant")) normalize_input_tokens(usage_metrics) return messages, metadata, usage_metrics @staticmethod - def _extract_input_message(prompt): + def _extract_input_message(prompt) -> List[Message]: """Extract input messages from the stored prompt. Anthropic allows for messages and multiple texts in a message, which requires some special casing. """ if isinstance(prompt, str): - return [{"content": prompt}] + return [Message(content=prompt)] if not isinstance(prompt, list): log.warning("Bedrock input is not a list of messages or a string.") - return [{"content": ""}] - input_messages = [] + return [Message(content="")] + input_messages: List[Message] = [] for p in prompt: content = p.get("content", "") if isinstance(content, list) and isinstance(content[0], dict): for entry in content: if entry.get("type") == "text": - input_messages.append({"content": entry.get("text", ""), "role": str(p.get("role", ""))}) + input_messages.append(Message(content=entry.get("text", ""), role=str(p.get("role", "")))) elif entry.get("type") == "image": # Store a placeholder for potentially enormous binary image data. - input_messages.append({"content": "([IMAGE DETECTED])", "role": str(p.get("role", ""))}) + input_messages.append(Message(content="([IMAGE DETECTED])", role=str(p.get("role", "")))) else: - input_messages.append({"content": content, "role": str(p.get("role", ""))}) + input_messages.append(Message(content=str(content), role=str(p.get("role", "")))) return input_messages @staticmethod - def _extract_output_message(response): + def _extract_output_message(response) -> List[Message]: """Extract output messages from the stored response. Anthropic allows for chat messages, which requires some special casing. """ if isinstance(response["text"], str): - return [{"content": response["text"]}] + return [Message(content=response["text"])] if isinstance(response["text"], list): if isinstance(response["text"][0], str): - return [{"content": str(content)} for content in response["text"]] + return [Message(content=str(content)) for content in response["text"]] if isinstance(response["text"][0], dict): - return [{"content": response["text"][0].get("text", "")}] + return [Message(content=response["text"][0].get("text", ""))] + return [] def _get_base_url(self, **kwargs: Dict[str, Any]) -> Optional[str]: instance = kwargs.get("instance") @@ -396,8 +398,8 @@ def _extract_tool_definitions(self, tool_config: Dict[str, Any]) -> List[ToolDef for tool in tools: tool_spec = _get_attr(tool, "toolSpec", {}) tool_definition_info = ToolDefinition( - name=_get_attr(tool_spec, "name", ""), - description=_get_attr(tool_spec, "description", ""), + name=str(_get_attr(tool_spec, "name", "")), + description=str(_get_attr(tool_spec, "description", "")), schema=_get_attr(tool_spec, "inputSchema", {}), ) tool_definitions.append(tool_definition_info) diff --git a/ddtrace/llmobs/_integrations/bedrock_agents.py b/ddtrace/llmobs/_integrations/bedrock_agents.py index cec0b4b0eaa..9f59b0a1082 100644 --- a/ddtrace/llmobs/_integrations/bedrock_agents.py +++ b/ddtrace/llmobs/_integrations/bedrock_agents.py @@ -3,12 +3,12 @@ import sys from typing import Any from typing import Dict +from typing import List +from typing import Literal from typing import Optional from typing import Tuple from ddtrace._trace.span import Span -from ddtrace.constants import ERROR_MSG -from ddtrace.constants import ERROR_TYPE from ddtrace.internal._rand import rand128bits from ddtrace.internal.logger import get_logger from ddtrace.internal.utils.formats import format_trace_id @@ -17,6 +17,12 @@ from ddtrace.llmobs._utils import _get_ml_app from ddtrace.llmobs._utils import _get_session_id from ddtrace.llmobs._utils import safe_json +from ddtrace.llmobs._writer import LLMObsSpanEvent +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import _ErrorField +from ddtrace.llmobs.types import _Meta +from ddtrace.llmobs.types import _MetaIO +from ddtrace.llmobs.types import _SpanField log = get_logger(__name__) @@ -51,7 +57,7 @@ def _build_span_event( metadata=None, input_val=None, output_val=None, -): +) -> LLMObsSpanEvent: if span_id is None: span_id = rand128bits() apm_trace_id = format_trace_id(root_span.trace_id) @@ -61,7 +67,7 @@ def _build_span_event( session_id = _get_session_id(root_span) ml_app = _get_ml_app(root_span) tags = [f"ml_app:{ml_app}", f"session_id:{session_id}", "integration:bedrock_agents"] - span_event = { + span_event: LLMObsSpanEvent = { "name": span_name, "span_id": str(span_id), "trace_id": format_trace_id(llmobs_trace_id), @@ -70,12 +76,13 @@ def _build_span_event( "start_ns": int(start_ns or root_span.start_ns), "duration": int(duration_ns or DEFAULT_SPAN_DURATION), "status": "error" if error else "ok", - "meta": { - "span.kind": str(span_kind), - "metadata": {}, - "input": {}, - "output": {}, - }, + "meta": _Meta( + span=_SpanField(kind=span_kind), + metadata={}, + input=_MetaIO(), + output=_MetaIO(), + error=_ErrorField(), + ), "metrics": {}, "_dd": { "span_id": str(span_id), @@ -85,15 +92,15 @@ def _build_span_event( } if metadata is not None: span_event["meta"]["metadata"] = metadata - io_key = "messages" if span_kind == "llm" else "value" - if input_val is not None: + io_key: Literal["messages", "value"] = "messages" if span_kind == "llm" else "value" + if input_val is not None and "input" in span_event["meta"]: span_event["meta"]["input"][io_key] = input_val - if output_val is not None: + if output_val is not None and "output" in span_event["meta"]: span_event["meta"]["output"][io_key] = output_val - if error_msg is not None: - span_event["meta"][ERROR_MSG] = error_msg - if error_type is not None: - span_event["meta"][ERROR_TYPE] = error_type + if error_msg is not None and "error" in span_event["meta"]: + span_event["meta"]["error"]["message"] = error_msg + if error_type is not None and "error" in span_event["meta"]: + span_event["meta"]["error"]["type"] = error_type return span_event @@ -176,7 +183,7 @@ def _create_or_update_bedrock_trace_step_span(trace, trace_step_id, inner_span_e def _translate_custom_orchestration_trace( trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates a custom orchestration bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating if the trace is finished. """ @@ -197,8 +204,8 @@ def _translate_custom_orchestration_trace( def _translate_orchestration_trace( - trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: + trace: Dict[str, Any], root_span: Span, current_active_span: Optional[LLMObsSpanEvent], trace_step_id: str +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates an orchestration bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating if the trace is finished. """ @@ -224,7 +231,7 @@ def _translate_orchestration_trace( def _translate_failure_trace( trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates a failure bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating that the span is finished. """ @@ -253,7 +260,7 @@ def _translate_failure_trace( def _translate_guardrail_trace( trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates a guardrail bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating that the span is finished. """ @@ -288,8 +295,8 @@ def _translate_guardrail_trace( def _translate_post_processing_trace( - trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: + trace: Dict[str, Any], root_span: Span, current_active_span: Optional[LLMObsSpanEvent], trace_step_id: str +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates a postprocessing bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating if the span is finished. """ @@ -305,8 +312,8 @@ def _translate_post_processing_trace( def _translate_pre_processing_trace( - trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: + trace: Dict[str, Any], root_span: Span, current_active_span: Optional[LLMObsSpanEvent], trace_step_id: str +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates a preprocessing bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating if the span is finished. """ @@ -322,8 +329,8 @@ def _translate_pre_processing_trace( def _translate_routing_classifier_trace( - trace: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]], trace_step_id: str -) -> Tuple[Optional[Dict[str, Any]], bool]: + trace: Dict[str, Any], root_span: Span, current_active_span: Optional[LLMObsSpanEvent], trace_step_id: str +) -> Tuple[Optional[LLMObsSpanEvent], bool]: """Translates a routing classifier bedrock trace into a LLMObs span event. Returns the translated span event and a boolean indicating if the span is finished. """ @@ -346,7 +353,7 @@ def _translate_routing_classifier_trace( def _model_invocation_input_span( model_input: Dict[str, Any], trace_step_id: str, start_ns: int, root_span: Span -) -> Optional[Dict[str, Any]]: +) -> Optional[LLMObsSpanEvent]: """Translates a Bedrock model invocation input trace into a LLMObs span event.""" model_id = model_input.get("foundationModel", "") model_provider, model_name = parse_model_id(model_id) @@ -355,9 +362,9 @@ def _model_invocation_input_span( except (json.JSONDecodeError, UnicodeDecodeError): log.warning("Failed to decode model input text.") text = {} - input_messages = [{"content": text.get("system", ""), "role": "system"}] + input_messages: List[Message] = [Message(content=text.get("system", ""), role="system")] for message in text.get("messages", []): - input_messages.append({"content": message.get("content", ""), "role": message.get("role", "")}) + input_messages.append(Message(content=message.get("content", ""), role=message.get("role", ""))) span_event = _build_span_event( "modelInvocation", root_span, @@ -371,39 +378,40 @@ def _model_invocation_input_span( def _model_invocation_output_span( - model_output: Dict[str, Any], current_active_span: Optional[Dict[str, Any]], root_span: Span -) -> Optional[Dict[str, Any]]: + model_output: Dict[str, Any], current_active_span: Optional[LLMObsSpanEvent], root_span: Span +) -> Optional[LLMObsSpanEvent]: """Translates a Bedrock model invocation output trace into a LLMObs span event.""" if not current_active_span: log.warning("Error in processing modelInvocationOutput.") return None bedrock_metadata = model_output.get("metadata", {}) start_ns, duration_ns = _extract_start_and_duration_from_metadata(bedrock_metadata, root_span) - output_messages = [] + output_messages: List[Message] = [] parsed_response = model_output.get("parsedResponse", {}) if parsed_response: - output_messages.append({"content": safe_json(parsed_response), "role": "assistant"}) + output_messages.append(Message(content=safe_json(parsed_response) or "", role="assistant")) else: raw_response = model_output.get("rawResponse", {}).get("content", "") - output_messages.append({"content": raw_response, "role": "assistant"}) + output_messages.append(Message(content=raw_response, role="assistant")) reasoning_text = model_output.get("reasoningContent", {}).get("reasoningText", {}) - if reasoning_text: - current_active_span["metadata"]["reasoningText"] = str(reasoning_text.get("text", "")) + if reasoning_text and "metadata" in current_active_span["meta"]: + current_active_span["meta"]["metadata"]["reasoningText"] = str(reasoning_text.get("text", "")) token_metrics = { "input_tokens": bedrock_metadata.get("usage", {}).get("inputTokens", 0), "output_tokens": bedrock_metadata.get("usage", {}).get("outputTokens", 0), } current_active_span["start_ns"] = int(start_ns) current_active_span["duration"] = int(duration_ns) - current_active_span["meta"]["output"]["messages"] = output_messages + if "output" in current_active_span["meta"]: + current_active_span["meta"]["output"]["messages"] = output_messages current_active_span["metrics"] = token_metrics return current_active_span def _rationale_span( rationale: Dict[str, Any], trace_step_id: str, start_ns: int, root_span: Span -) -> Optional[Dict[str, Any]]: +) -> Optional[LLMObsSpanEvent]: """Translates a Bedrock rationale trace into a LLMObs span event.""" span_event = _build_span_event( "reasoning", root_span, trace_step_id, "task", start_ns=start_ns, output_val=rationale.get("text", "") @@ -413,7 +421,7 @@ def _rationale_span( def _invocation_input_span( invocation_input: Dict[str, Any], trace_step_id: str, start_ns: int, root_span: Span -) -> Optional[Dict[str, Any]]: +) -> Optional[LLMObsSpanEvent]: """Translates a Bedrock invocation input trace into a LLMObs span event.""" span_name = "" tool_metadata = {} @@ -447,8 +455,8 @@ def _invocation_input_span( def _observation_span( - observation: Dict[str, Any], root_span: Span, current_active_span: Optional[Dict[str, Any]] -) -> Optional[Dict[str, Any]]: + observation: Dict[str, Any], root_span: Span, current_active_span: Optional[LLMObsSpanEvent] +) -> Optional[LLMObsSpanEvent]: """Translates a Bedrock observation trace into a LLMObs span event.""" observation_type = observation.get("type", "") if observation_type in ("FINISH", "REPROMPT"): @@ -479,7 +487,8 @@ def _observation_span( start_ns, duration_ns = _extract_start_and_duration_from_metadata(bedrock_metadata, root_span) current_active_span["start_ns"] = int(start_ns) current_active_span["duration"] = int(duration_ns) - current_active_span["meta"]["output"]["value"] = output_value + if "output" in current_active_span["meta"]: + current_active_span["meta"]["output"]["value"] = output_value return current_active_span diff --git a/ddtrace/llmobs/_integrations/crewai.py b/ddtrace/llmobs/_integrations/crewai.py index e5f104132df..de34ffce85d 100644 --- a/ddtrace/llmobs/_integrations/crewai.py +++ b/ddtrace/llmobs/_integrations/crewai.py @@ -24,6 +24,7 @@ from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor from ddtrace.llmobs._utils import safe_json +from ddtrace.llmobs.types import _SpanLink from ddtrace.trace import Span @@ -123,11 +124,11 @@ def _llmobs_set_tags_crew(self, span, args, kwargs, response): task_span_ids = self._crews_to_task_span_ids.get(crew_id, []) if task_span_ids: last_task_span_id = task_span_ids[-1] - span_link = { - "span_id": last_task_span_id, - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "output", "to": "output"}, - } + span_link = _SpanLink( + span_id=last_task_span_id, + trace_id=format_trace_id(span.trace_id), + attributes={"from": "output", "to": "output"}, + ) curr_span_links = span._get_ctx_item(SPAN_LINKS) or [] span._set_ctx_item(SPAN_LINKS, curr_span_links + [span_link]) metadata = { @@ -160,11 +161,11 @@ def _llmobs_set_tags_task(self, span, args, kwargs, response): span_links = self._crews_to_tasks[crew_id].get(str(task_id), {}).get("span_links", []) if self._is_planning_task(span): parent_span = _get_nearest_llmobs_ancestor(span) - span_link = { - "span_id": str(parent_span.span_id), - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "input", "to": "input"}, - } + span_link = _SpanLink( + span_id=str(parent_span.span_id), + trace_id=format_trace_id(span.trace_id), + attributes={"from": "input", "to": "input"}, + ) span_links.append(span_link) curr_span_links = span._get_ctx_item(SPAN_LINKS) or [] span._set_ctx_item(SPAN_LINKS, curr_span_links + span_links) @@ -186,18 +187,18 @@ def _llmobs_set_tags_agent(self, span, args, kwargs, response): context = get_argument_value(args, kwargs, 1, "context", optional=True) or "" parent_span = _get_nearest_llmobs_ancestor(span) - parent_span_link = { - "span_id": str(span.span_id), - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "output", "to": "output"}, - } + parent_span_link = _SpanLink( + span_id=str(span.span_id), + trace_id=format_trace_id(span.trace_id), + attributes={"from": "output", "to": "output"}, + ) curr_span_links = parent_span._get_ctx_item(SPAN_LINKS) or [] parent_span._set_ctx_item(SPAN_LINKS, curr_span_links + [parent_span_link]) - span_link = { - "span_id": str(parent_span.span_id), - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "input", "to": "input"}, - } + span_link = _SpanLink( + span_id=str(parent_span.span_id), + trace_id=format_trace_id(span.trace_id), + attributes={"from": "input", "to": "input"}, + ) curr_span_links = span._get_ctx_item(SPAN_LINKS) or [] span._set_ctx_items( { @@ -306,11 +307,11 @@ def _llmobs_set_tags_flow_method(self, span, args, kwargs, response): ) if span.name in getattr(flow_instance, "_start_methods", []) and span._parent is not None: span_links.append( - { - "span_id": str(span._parent.span_id), - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "input", "to": "input"}, - } + _SpanLink( + span_id=str(span._parent.span_id), + trace_id=format_trace_id(span.trace_id), + attributes={"from": "input", "to": "input"}, + ) ) if span.name in getattr(flow_instance, "_routers", []): @@ -368,11 +369,11 @@ def _llmobs_set_span_link_on_flow(self, flow_span, args, kwargs, flow_instance): if condition_type != "AND": triggered = True span_links.append( - { - "span_id": str(trigger_span_dict["span_id"]), - "trace_id": format_trace_id(flow_span.trace_id), - "attributes": {"from": "output", "to": "input"}, - } + _SpanLink( + span_id=str(trigger_span_dict["span_id"]), + trace_id=format_trace_id(flow_span.trace_id), + attributes={"from": "output", "to": "input"}, + ) ) continue if any( @@ -384,11 +385,11 @@ def _llmobs_set_span_link_on_flow(self, flow_span, args, kwargs, flow_instance): for method in listener_triggers: method_span_dict = flow_methods_to_spans.get(method, {}) span_links.append( - { - "span_id": str(method_span_dict["span_id"]), - "trace_id": format_trace_id(flow_span.trace_id), - "attributes": {"from": "output", "to": "input"}, - } + _SpanLink( + span_id=str(method_span_dict["span_id"]), + trace_id=format_trace_id(flow_span.trace_id), + attributes={"from": "output", "to": "input"}, + ) ) flow_span_span_links = flow_span._get_ctx_item(SPAN_LINKS) or [] # Remove temporary output->output link since the AND has been triggered @@ -400,11 +401,11 @@ def _llmobs_set_span_link_on_flow(self, flow_span, args, kwargs, flow_instance): if triggered is False: flow_span_span_links = flow_span._get_ctx_item(SPAN_LINKS) or [] flow_span_span_links.append( - { - "span_id": str(trigger_span_dict["span_id"]), - "trace_id": format_trace_id(flow_span.trace_id), - "attributes": {"from": "output", "to": "output"}, - } + _SpanLink( + span_id=str(trigger_span_dict["span_id"]), + trace_id=format_trace_id(flow_span.trace_id), + attributes={"from": "output", "to": "output"}, + ) ) flow_span._set_ctx_item(SPAN_LINKS, flow_span_span_links) return @@ -434,7 +435,7 @@ def _llmobs_set_span_link_on_task(self, span, args, kwargs): crew_id = _get_crew_id(span, "crew") is_planning_crew_instance = crew_id in self._planning_crew_ids queued_task_node = self._crews_to_tasks.get(crew_id, {}).setdefault(str(queued_task_id), {}) - span_links = [] + span_links: List[_SpanLink] = [] if isinstance(getattr(queued_task, "context", None), Iterable): for finished_task in queued_task.context: @@ -442,31 +443,31 @@ def _llmobs_set_span_link_on_task(self, span, args, kwargs): finished_task_node = self._crews_to_tasks.get(crew_id, {}).get(str(finished_task_id), {}) finished_task_span_id = finished_task_node.get("span_id") span_links.append( - { - "span_id": finished_task_span_id, - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "output", "to": "input"}, - } + _SpanLink( + span_id=finished_task_span_id, + trace_id=format_trace_id(span.trace_id), + attributes={"from": "output", "to": "input"}, + ) ) queued_task_node["span_links"] = span_links return if not finished_task_outputs and not is_planning_crew_instance: queued_task_node["span_links"] = [ - { - "span_id": str(span.span_id) if span else ROOT_PARENT_ID, - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "input", "to": "input"}, - } + _SpanLink( + span_id=str(span.span_id) if span else ROOT_PARENT_ID, + trace_id=format_trace_id(span.trace_id), + attributes={"from": "input", "to": "input"}, + ) ] return if is_planning_crew_instance and self._crews_to_task_span_ids.get(crew_id, []): planning_task_span_id = self._crews_to_task_span_ids[crew_id][-1] queued_task_node["span_links"] = [ - { - "span_id": planning_task_span_id if span else ROOT_PARENT_ID, - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "output", "to": "input"}, - } + _SpanLink( + span_id=planning_task_span_id if span else ROOT_PARENT_ID, + trace_id=format_trace_id(span.trace_id), + attributes={"from": "output", "to": "input"}, + ) ] return @@ -475,11 +476,11 @@ def _llmobs_set_span_link_on_task(self, span, args, kwargs): for i in range(1, num_tasks_to_link + 1): # Iterate backwards through last n finished tasks finished_task_span_id = finished_task_spans[-i] span_links.append( - { - "span_id": finished_task_span_id, - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "output", "to": "input"}, - } + _SpanLink( + span_id=finished_task_span_id, + trace_id=format_trace_id(span.trace_id), + attributes={"from": "output", "to": "input"}, + ) ) queued_task_node["span_links"] = span_links return diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index ef230f85b95..664f9b7dfb9 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -20,6 +20,7 @@ from ddtrace.llmobs._integrations.google_utils import get_system_instructions_gemini_vertexai from ddtrace.llmobs._integrations.google_utils import llmobs_get_metadata_gemini_vertexai from ddtrace.llmobs._utils import _get_attr +from ddtrace.llmobs.types import Message from ddtrace.trace import Span @@ -47,9 +48,9 @@ def _llmobs_set_tags( system_instruction = get_system_instructions_gemini_vertexai(instance) input_contents = get_argument_value(args, kwargs, 0, "contents") - input_messages = self._extract_input_message(input_contents, system_instruction) + input_messages: List[Message] = self._extract_input_message(input_contents, system_instruction) - output_messages = [{"content": ""}] + output_messages: List[Message] = [Message(content="")] if response is not None: output_messages = self._extract_output_message(response) @@ -66,30 +67,30 @@ def _llmobs_set_tags( ) def _extract_input_message(self, contents, system_instruction=None): - messages = [] + messages: List[Message] = [] if system_instruction: for instruction in system_instruction: - messages.append({"content": instruction or "", "role": "system"}) + messages.append(Message(content=instruction or "", role="system")) if isinstance(contents, str): - messages.append({"content": contents}) + messages.append(Message(content=contents)) return messages if isinstance(contents, dict): - message = {"content": contents.get("text", "")} + message = Message(content=contents.get("text", "")) if contents.get("role", None): message["role"] = contents["role"] messages.append(message) return messages if not isinstance(contents, list): - messages.append({"content": "[Non-text content object: {}]".format(repr(contents))}) + messages.append(Message(content="[Non-text content object: {}]".format(repr(contents)))) return messages for content in contents: if isinstance(content, str): - messages.append({"content": content}) + messages.append(Message(content=content)) continue role = _get_attr(content, "role", None) parts = _get_attr(content, "parts", []) if not parts or not isinstance(parts, Iterable): - message = {"content": "[Non-text content object: {}]".format(repr(content))} + message = Message(content="[Non-text content object: {}]".format(repr(content))) if role: message["role"] = role messages.append(message) diff --git a/ddtrace/llmobs/_integrations/google_adk.py b/ddtrace/llmobs/_integrations/google_adk.py new file mode 100644 index 00000000000..5e6d974bad7 --- /dev/null +++ b/ddtrace/llmobs/_integrations/google_adk.py @@ -0,0 +1,157 @@ +from typing import Any +from typing import Dict +from typing import List +from typing import Optional + +from ddtrace.internal import core +from ddtrace.internal.constants import COMPONENT +from ddtrace.internal.utils import get_argument_value +from ddtrace.llmobs._constants import AGENT_MANIFEST +from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL +from ddtrace.llmobs._constants import INPUT_VALUE +from ddtrace.llmobs._constants import METADATA +from ddtrace.llmobs._constants import MODEL_NAME +from ddtrace.llmobs._constants import MODEL_PROVIDER +from ddtrace.llmobs._constants import NAME +from ddtrace.llmobs._constants import OUTPUT_VALUE +from ddtrace.llmobs._constants import SPAN_KIND +from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_google_genai +from ddtrace.llmobs._integrations.google_utils import extract_messages_from_adk_events +from ddtrace.llmobs._utils import safe_json +from ddtrace.trace import Span + + +class GoogleAdkIntegration(BaseLLMIntegration): + _integration_name = "google_adk" + + def _set_base_span_tags( + self, span: Span, model: Optional[Any] = None, provider: Optional[Any] = None, **kwargs + ) -> None: + span.set_tag_str(COMPONENT, self._integration_name) + if model: + span.set_tag("google_adk.request.model", model) + if provider: + span.set_tag("google_adk.request.provider", provider) + + def _llmobs_set_tags( + self, + span: Span, + args: List[Any], + kwargs: Dict[str, Any], + response: Optional[Any] = None, + operation: str = "", # being used for span kind: one of "agent", "tool", "code_execute" + ) -> None: + if operation == "agent": + self._llmobs_set_tags_agent(span, args, kwargs, response) + elif operation == "tool": + self._llmobs_set_tags_tool(span, args, kwargs, response) + elif operation == "code_execute": + self._llmobs_set_tags_code_execute(span, args, kwargs, response) + + span._set_ctx_items( + { + SPAN_KIND: operation, + MODEL_NAME: span.get_tag("google_adk.request.model") or "", + MODEL_PROVIDER: span.get_tag("google_adk.request.provider") or "", + } + ) + + def _llmobs_set_tags_agent( + self, span: Span, args: List[Any], kwargs: Dict[str, Any], response: Optional[Any] + ) -> None: + agent_instance = kwargs.get("instance", None) + agent_name = getattr(agent_instance, "name", None) + + self._tag_agent_manifest(span, kwargs, agent_instance) + new_message = get_argument_value(args, kwargs, 0, "new_message", optional=True) or [] + new_message_parts: list = getattr(new_message, "parts", []) + new_message_role: str = getattr(new_message, "role", "") + message = "" + for part in new_message_parts: + message += extract_message_from_part_google_genai(part, new_message_role).get("content", "") + result = extract_messages_from_adk_events(response) + + span._set_ctx_items( + { + NAME: agent_name or "Google ADK Agent", + INPUT_VALUE: message, + OUTPUT_VALUE: result, + } + ) + + def _llmobs_set_tags_tool( + self, span: Span, args: List[Any], kwargs: Dict[str, Any], response: Optional[Any] = None + ) -> None: + tool = get_argument_value(args, kwargs, 0, "tool") + tool_args = get_argument_value(args, kwargs, 1, "args") + tool_call_id = getattr(kwargs.get("tool_context", {}), "function_call_id", "") + + span._set_ctx_item(OUTPUT_VALUE, response) + tool_name = getattr(tool, "name", "") + tool_description = getattr(tool, "description", "") + + span._set_ctx_items( + { + NAME: tool_name, + METADATA: {"description": tool_description}, + INPUT_VALUE: tool_args, + } + ) + + if tool_call_id: + core.dispatch( + DISPATCH_ON_TOOL_CALL, + ( + tool_name, + safe_json(tool_args), + "function", + span, + tool_call_id, + ), + ) + + def _tag_agent_manifest(self, span: Span, kwargs: Dict[str, Any], agent: Any) -> None: + if not agent: + return + + manifest: Dict[str, Any] = {} + + manifest["framework"] = "Google ADK" + manifest["name"] = getattr(agent, "name", "") + manifest["model"] = getattr(getattr(agent, "model", ""), "model", "") + manifest["description"] = getattr(agent, "description", "") + manifest["instructions"] = getattr(agent, "instruction", "") + manifest["model_configuration"] = safe_json(getattr(agent, "model_config", "")) + manifest["session_management"] = { + "session_id": kwargs.get("session_id", ""), + "user_id": kwargs.get("user_id", ""), + } + manifest["tools"] = self._get_agent_tools(getattr(agent, "tools", [])) + + span._set_ctx_item(AGENT_MANIFEST, manifest) + + def _llmobs_set_tags_code_execute( + self, span: Span, args: List[Any], kwargs: Dict[str, Any], response: Optional[Any] = None + ) -> None: + stdout = getattr(response, "stdout", None) + stderr = getattr(response, "stderr", None) + output = "" + if stdout: + output += stdout + if stderr: + output += "/n" + stderr + + code_input = get_argument_value(args, kwargs, 1, "code_execution_input") + span._set_ctx_items( + { + NAME: "Google ADK Code Execute", + INPUT_VALUE: getattr(code_input, "code", ""), + OUTPUT_VALUE: output, + } + ) + + def _get_agent_tools(self, tools): + if not tools or not isinstance(tools, list): + return [] + return [{"name": tool.name, "description": tool.description} for tool in tools] diff --git a/ddtrace/llmobs/_integrations/google_genai.py b/ddtrace/llmobs/_integrations/google_genai.py index 8c89d282778..5c799d16a39 100644 --- a/ddtrace/llmobs/_integrations/google_genai.py +++ b/ddtrace/llmobs/_integrations/google_genai.py @@ -22,8 +22,9 @@ from ddtrace.llmobs._integrations.google_utils import extract_provider_and_model_name from ddtrace.llmobs._integrations.google_utils import normalize_contents_google_genai from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs.utils import Document -from ddtrace.llmobs.utils import ToolDefinition +from ddtrace.llmobs.types import Document +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolDefinition # https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/content-generation-parameters @@ -110,8 +111,8 @@ def _llmobs_set_tags_from_embedding(self, span, args, kwargs, response): } ) - def _extract_input_messages(self, args: List[Any], kwargs: Dict[str, Any], config) -> List[Dict[str, Any]]: - messages = [] + def _extract_input_messages(self, args: List[Any], kwargs: Dict[str, Any], config) -> List[Message]: + messages: List[Message] = [] system_instruction = _get_attr(config, "system_instruction", None) if system_instruction is not None: @@ -122,17 +123,17 @@ def _extract_input_messages(self, args: List[Any], kwargs: Dict[str, Any], confi return messages - def _extract_messages_from_contents(self, contents, default_role: str) -> List[Dict[str, Any]]: - messages = [] + def _extract_messages_from_contents(self, contents, default_role: str) -> List[Message]: + messages: List[Message] = [] for content in normalize_contents_google_genai(contents): role = content.get("role") or default_role for part in content.get("parts", []): messages.append(extract_message_from_part_google_genai(part, role)) return messages - def _extract_output_messages(self, response) -> List[Dict[str, Any]]: + def _extract_output_messages(self, response) -> List[Message]: if not response: - return [{"content": "", "role": GOOGLE_GENAI_DEFAULT_MODEL_ROLE}] + return [Message(content="", role=GOOGLE_GENAI_DEFAULT_MODEL_ROLE)] messages = [] candidates = _get_attr(response, "candidates", []) for candidate in candidates: @@ -156,7 +157,7 @@ def _extract_embedding_output_value(self, response) -> str: def _extract_embedding_input_documents(self, args, kwargs, config) -> List[Document]: contents = kwargs.get("contents") messages = self._extract_messages_from_contents(contents, "user") - documents = [Document(text=str(message["content"])) for message in messages] + documents = [Document(text=str(message.get("content", ""))) for message in messages] return documents def _extract_metadata(self, config, params) -> Dict[str, Any]: @@ -175,8 +176,8 @@ def _function_declaration_to_tool_definition(self, function_declaration) -> Tool schema = {"value": repr(schema)} return ToolDefinition( - name=_get_attr(function_declaration, "name", "") or "", - description=_get_attr(function_declaration, "description", "") or "", + name=str(_get_attr(function_declaration, "name", "") or ""), + description=str(_get_attr(function_declaration, "description", "") or ""), schema=schema, ) diff --git a/ddtrace/llmobs/_integrations/google_utils.py b/ddtrace/llmobs/_integrations/google_utils.py index 1d28bce97d3..76c12daeac8 100644 --- a/ddtrace/llmobs/_integrations/google_utils.py +++ b/ddtrace/llmobs/_integrations/google_utils.py @@ -12,8 +12,9 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs._utils import safe_json -from ddtrace.llmobs.utils import ToolCall -from ddtrace.llmobs.utils import ToolResult +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolCall +from ddtrace.llmobs.types import ToolResult # Google GenAI has roles "model" and "user", but in order to stay consistent with other integrations, @@ -113,10 +114,21 @@ def extract_content(content): def extract_generation_metrics_google_genai(response) -> Dict[str, Any]: + """ + Extract usage metrics from Google GenAI response or Google ADK Event object. + + Args: + response: Google GenAI response object or ADK Event object + + Returns: + Dictionary with token usage metrics + """ if not response: return {} usage_metadata = _get_attr(response, "usage_metadata", {}) + if not usage_metadata: + return {} usage = {} input_tokens = _get_attr(usage_metadata, "prompt_token_count", None) @@ -163,7 +175,7 @@ def extract_embedding_metrics_google_genai(response) -> Dict[str, Any]: return usage -def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]: +def extract_message_from_part_google_genai(part, role: str) -> Message: """part is a PartUnion = Union[File, Part, PIL_Image, str] returns a dict representing a message with format {"role": role, "content": content} @@ -171,7 +183,7 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]: if role == "model": role = GOOGLE_GENAI_DEFAULT_MODEL_ROLE - message: Dict[str, Any] = {"role": role} + message: Message = Message(role=role) if isinstance(part, str): message["content"] = part return message @@ -188,9 +200,9 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]: function_call = _get_attr(part, "function_call", None) if function_call: tool_call_info = ToolCall( - name=_get_attr(function_call, "name", "") or "", - arguments=_get_attr(function_call, "args", {}) or {}, - tool_id=_get_attr(function_call, "id", "") or "", + name=str(_get_attr(function_call, "name", "") or ""), + arguments=dict(_get_attr(function_call, "args", {}) or {}), + tool_id=str(_get_attr(function_call, "id", "") or ""), type="function_call", ) message["tool_calls"] = [tool_call_info] @@ -200,9 +212,9 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]: if function_response: result = _get_attr(function_response, "response", "") or "" tool_result_info = ToolResult( - name=_get_attr(function_response, "name", "") or "", + name=str(_get_attr(function_response, "name", "") or ""), result=result if isinstance(result, str) else json.dumps(result), - tool_id=_get_attr(function_response, "id", "") or "", + tool_id=str(_get_attr(function_response, "id", "") or ""), type="function_response", ) message["tool_results"] = [tool_result_info] @@ -212,17 +224,17 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]: if executable_code: language = _get_attr(executable_code, "language", "UNKNOWN") code = _get_attr(executable_code, "code", "") - message["content"] = safe_json({"language": str(language), "code": str(code)}) + message["content"] = safe_json({"language": str(language), "code": str(code)}) or "" return message code_execution_result = _get_attr(part, "code_execution_result", None) if code_execution_result: outcome = _get_attr(code_execution_result, "outcome", "OUTCOME_UNSPECIFIED") output = _get_attr(code_execution_result, "output", "") - message["content"] = safe_json({"outcome": str(outcome), "output": str(output)}) + message["content"] = safe_json({"outcome": str(outcome), "output": str(output)}) or "" return message - return {"content": "Unsupported file type: {}".format(type(part)), "role": role} + return Message(content="Unsupported file type: {}".format(type(part)), role=role) def llmobs_get_metadata_gemini_vertexai(kwargs, instance): @@ -241,11 +253,11 @@ def llmobs_get_metadata_gemini_vertexai(kwargs, instance): return metadata -def extract_message_from_part_gemini_vertexai(part, role=None): +def extract_message_from_part_gemini_vertexai(part, role=None) -> Message: text = _get_attr(part, "text", "") function_call = _get_attr(part, "function_call", None) function_response = _get_attr(part, "function_response", None) - message = {"content": text} + message = Message(content=str(text)) if role: message["role"] = role if function_call: @@ -310,3 +322,39 @@ def get_system_instructions_gemini_vertexai(model_instance): elif Part is not None and isinstance(elem, Part): system_instructions.append(_get_attr(elem, "text", "")) return system_instructions + + +def extract_messages_from_adk_events(events) -> List[Message]: + """ + Extract messages from Google ADK Event objects. + + Args: + events: List of ADK Event objects or single Event object + + Returns: + List of message dictionaries with format {"role": role, "content": content, ...} + """ + messages = [] + + # Handle both single event and list of events + if not isinstance(events, list): + events = [events] + + for event in events: + content = _get_attr(event, "content", None) + if not content: + continue + + role = _get_attr(content, "role", GOOGLE_GENAI_DEFAULT_MODEL_ROLE) + parts = _get_attr(content, "parts", []) + + if not isinstance(parts, list): + parts = [parts] + + for part in parts: + # Reuse the existing Google GenAI part extraction logic + message = extract_message_from_part_google_genai(part, role) + if message: + messages.append(message) + + return messages diff --git a/ddtrace/llmobs/_integrations/langchain.py b/ddtrace/llmobs/_integrations/langchain.py index b8670c3d6b7..21cc57744c7 100644 --- a/ddtrace/llmobs/_integrations/langchain.py +++ b/ddtrace/llmobs/_integrations/langchain.py @@ -35,14 +35,18 @@ from ddtrace.llmobs._constants import SPAN_LINKS from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.llmobs._integrations.utils import LANGCHAIN_ROLE_MAPPING from ddtrace.llmobs._integrations.utils import extract_instance_metadata_from_stack from ddtrace.llmobs._integrations.utils import format_langchain_io from ddtrace.llmobs._integrations.utils import update_proxy_workflow_input_output_value from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor +from ddtrace.llmobs._utils import _validate_prompt from ddtrace.llmobs._utils import safe_json -from ddtrace.llmobs._utils import validate_prompt -from ddtrace.llmobs.utils import Document +from ddtrace.llmobs.types import Document +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolCall +from ddtrace.llmobs.types import _SpanLink from ddtrace.trace import Span @@ -298,7 +302,7 @@ def _set_output_links(self, span: Span, parent_span: Span, invoker_spans: List[S This is done by removing span links of previous steps in the chain from the parent span (if it is a chain). We add output->output span links at every step. """ - parent_links = parent_span._get_ctx_item(SPAN_LINKS) or [] + parent_links: List[_SpanLink] = parent_span._get_ctx_item(SPAN_LINKS) or [] pop_indices = self._get_popped_span_link_indices(parent_span, parent_links, invoker_spans, from_output) self._set_span_links( @@ -310,7 +314,7 @@ def _set_output_links(self, span: Span, parent_span: Span, invoker_spans: List[S ) def _get_popped_span_link_indices( - self, parent_span: Span, parent_links: List[Dict[str, Any]], invoker_spans: List[Span], from_output: bool + self, parent_span: Span, parent_links: List[_SpanLink], invoker_spans: List[Span], from_output: bool ) -> List[int]: """ Returns a list of indices to pop from the parent span links list @@ -340,17 +344,17 @@ def _set_span_links( popped_span_link_indices: Optional[List[int]] = None, ) -> None: """Sets the span links on the given span along with the existing links.""" - existing_links = span._get_ctx_item(SPAN_LINKS) or [] + existing_links: List[_SpanLink] = span._get_ctx_item(SPAN_LINKS) or [] if popped_span_link_indices: existing_links = [link for i, link in enumerate(existing_links) if i not in popped_span_link_indices] - links = [ - { - "trace_id": format_trace_id(from_span.trace_id), - "span_id": str(from_span.span_id), - "attributes": {"from": link_from, "to": link_to}, - } + links: List[_SpanLink] = [ + _SpanLink( + trace_id=format_trace_id(from_span.trace_id), + span_id=str(from_span.span_id), + attributes={"from": link_from, "to": link_to}, + ) for from_span in from_spans if from_span is not None ] @@ -384,7 +388,7 @@ def _get_prompt_variable_name(self, instance: Any) -> str: instance=instance, internal_variable_names=["instance", "self", "step"], default_variable_name="unknown_prompt_template", - default_module_name="unknown_module", + default_module_name="langchain", frame_start_offset=2, frame_search_depth=10, ) @@ -440,7 +444,7 @@ def _llmobs_set_tags_from_llm( # chat and llm take the same input types for streamed calls input_messages = self._handle_stream_input_messages(prompts) else: - input_messages = [{"content": str(prompt)} for prompt in prompts] + input_messages = [Message(content=str(prompt)) for prompt in prompts] span._set_ctx_items( { @@ -454,12 +458,12 @@ def _llmobs_set_tags_from_llm( self._llmobs_set_metadata(span, kwargs) if span.error: - span._set_ctx_item(output_tag_key, [{"content": ""}]) + span._set_ctx_item(output_tag_key, [Message(content="")]) return if stream: - message_content = [{"content": completions}] # single completion for streams + message_content = [Message(content=completions)] # single completion for streams else: - message_content = [{"content": completion[0].text} for completion in completions.generations] + message_content = [Message(content=completion[0].text) for completion in completions.generations] if not is_workflow: input_tokens, output_tokens, total_tokens = self.check_token_usage_chat_or_llm_result(completions) if total_tokens > 0: @@ -493,7 +497,7 @@ def _llmobs_set_tags_from_chat_model( output_tag_key = OUTPUT_VALUE if is_workflow else OUTPUT_MESSAGES stream = span.get_tag("langchain.request.stream") - input_messages = [] + input_messages: List[Message] = [] if stream: chat_messages = get_argument_value(args, kwargs, 0, "input") input_messages = self._handle_stream_input_messages(chat_messages) @@ -507,7 +511,7 @@ def _llmobs_set_tags_from_chat_model( message.get("content", "") if isinstance(message, dict) else getattr(message, "content", "") ) role = getattr(message, "role", ROLE_MAPPING.get(getattr(message, "type", ""), "")) - input_messages.append({"content": str(content), "role": str(role)}) + input_messages.append(Message(content=str(content), role=str(role))) tool_call_id = _get_attr(message, "tool_call_id", "") if not is_workflow and tool_call_id: core.dispatch( @@ -520,14 +524,14 @@ def _llmobs_set_tags_from_chat_model( span._set_ctx_item(input_tag_key, input_messages) if span.error: - span._set_ctx_item(output_tag_key, [{"content": ""}]) + span._set_ctx_item(output_tag_key, [Message(content="")]) return - output_messages = [] + output_messages: List[Message] = [] if stream: content = chat_completions.content role = chat_completions.__class__.__name__.replace("MessageChunk", "").lower() # AIMessageChunk --> ai - span._set_ctx_item(output_tag_key, [{"content": content, "role": ROLE_MAPPING.get(role, "")}]) + span._set_ctx_item(output_tag_key, [Message(content=content, role=ROLE_MAPPING.get(role, ""))]) return input_tokens, output_tokens, total_tokens = 0, 0, 0 @@ -542,7 +546,7 @@ def _llmobs_set_tags_from_chat_model( for chat_completion in message_set: chat_completion_msg = chat_completion.message role = getattr(chat_completion_msg, "role", ROLE_MAPPING.get(chat_completion_msg.type, "")) - output_message = {"content": str(chat_completion.text), "role": role} + output_message = Message(content=str(chat_completion.text), role=role) tool_calls_info = self._extract_tool_calls(chat_completion_msg) if not is_workflow: for tool_call in tool_calls_info: @@ -589,30 +593,30 @@ def _llmobs_set_tags_from_chat_model( } span._set_ctx_item(METRICS, metrics) - def _extract_tool_calls(self, chat_completion_msg: Any) -> List[Dict[str, Any]]: + def _extract_tool_calls(self, chat_completion_msg: Any) -> List[ToolCall]: """Extracts tool calls from a langchain chat completion.""" tool_calls = getattr(chat_completion_msg, "tool_calls", None) - tool_calls_info = [] + tool_calls_info: List[ToolCall] = [] if tool_calls: if not isinstance(tool_calls, list): tool_calls = [tool_calls] for tool_call in tool_calls: - tool_call_info = { - "name": tool_call.get("name", ""), - "arguments": tool_call.get("args", {}), # this is already a dict - "tool_id": tool_call.get("id", ""), - } + tool_call_info = ToolCall( + name=tool_call.get("name", ""), + arguments=tool_call.get("args", {}), # this is already a dict + tool_id=tool_call.get("id", ""), + ) tool_calls_info.append(tool_call_info) return tool_calls_info - def _handle_stream_input_messages(self, inputs): - input_messages = [] + def _handle_stream_input_messages(self, inputs) -> List[Message]: + input_messages: List[Message] = [] if hasattr(inputs, "to_messages"): # isinstance(inputs, langchain_core.prompt_values.PromptValue) inputs = inputs.to_messages() elif not isinstance(inputs, list): inputs = [inputs] for inp in inputs: - inp_message = {} + inp_message = Message() content, role = None, None if isinstance(inp, dict): content = str(inp.get("content", "")) @@ -676,7 +680,7 @@ def _llmobs_set_meta_tags_from_embedding( else: if isinstance(input_texts, str): input_texts = [input_texts] - input_documents = [Document(text=str(doc)) for doc in input_texts] + input_documents: List[Document] = [Document(text=str(doc)) for doc in input_texts] span._set_ctx_item(input_tag_key, input_documents) except TypeError: log.warning("Failed to serialize embedding input data to JSON") @@ -725,7 +729,7 @@ def _llmobs_set_meta_tags_from_similarity_search( if is_workflow: span._set_ctx_item(OUTPUT_VALUE, "[{} document(s) retrieved]".format(len(output_documents))) return - documents = [] + documents: List[Document] = [] for d in output_documents: doc = Document(text=d.page_content) doc["id"] = getattr(d, "id", "") @@ -843,12 +847,60 @@ def _get_base_url(self, **kwargs: Dict[str, Any]) -> Optional[str]: def handle_prompt_template_invoke(self, instance, result, args: List[Any], kwargs: Dict[str, Any]): """On prompt template invoke, store the template on the result so its available to consuming .invoke().""" - template, variables = None, None + chat_template, template, variables = None, None, None if hasattr(instance, "template") and isinstance(instance.template, str): template = instance.template - variables = get_argument_value(args, kwargs, 0, "input", optional=True) - if not template or not variables or not isinstance(variables, dict): + if ( + isinstance(getattr(instance, "messages", None), list) + and result is not None + and isinstance(getattr(result, "messages", None), list) + ): + messages = [] + if len(instance.messages) != len(result.messages): + # langchain allows for message placeholder templates, but we don't support them yet + # these are templates where a template variable takes as its value several complete messages + log.debug( + "Instance messages and result messages have different lengths; message placeholder not supported" + ) + return + + for m, r in zip(instance.messages, result.messages): + # message templates do not have a role: the role is derived from the instance class + # instead, we use the resulting message + role = getattr(r, "role", None) + if not role and hasattr(r, "type"): + role = LANGCHAIN_ROLE_MAPPING.get(r.type) + if not role: + role = "unknown" + + if hasattr(m, "content"): + # a message in the template that is not a template itself + messages.append({"role": role, "content": m.content}) + elif hasattr(m, "prompt") and hasattr(m.prompt, "template"): + # a template message + messages.append({"role": role, "content": str(m.prompt.template or "")}) + else: + messages = [] + log.debug("Failed to parse template messages") + break + chat_template = messages if messages else None + + variables = get_argument_value(args, kwargs, 0, "input", optional=True) + if ( + isinstance(variables, str) + and isinstance(getattr(instance, "input_variables", None), list) + and len(instance.input_variables) == 1 + ): + # variables should be passed in as a dict, but a string is allowed if there is only one input variable + variables = {instance.input_variables[0]: variables} + + if ( + (not template and not chat_template) + or (template and chat_template) + or not variables + or not isinstance(variables, dict) + ): return prompt_id = self._get_prompt_variable_name(instance) @@ -856,6 +908,7 @@ def handle_prompt_template_invoke(self, instance, result, args: List[Any], kwarg prompt = { "variables": variables, "template": template, + "chat_template": chat_template, "id": prompt_id if prompt_id is not None else "unknown", "version": "0.0.0", "rag_context_variables": [], @@ -885,7 +938,7 @@ def llmobs_set_prompt_tag(self, instance, span: Span): if prompt_value_meta is not None: prompt = prompt_value_meta try: - prompt = validate_prompt(prompt) + prompt = _validate_prompt(prompt, strict_validation=True) span._set_ctx_item(INPUT_PROMPT, prompt) except Exception as e: log.debug("Failed to validate langchain prompt", e) diff --git a/ddtrace/llmobs/_integrations/langgraph.py b/ddtrace/llmobs/_integrations/langgraph.py index 6c12bd1ecf4..97c7b3a0a77 100644 --- a/ddtrace/llmobs/_integrations/langgraph.py +++ b/ddtrace/llmobs/_integrations/langgraph.py @@ -27,6 +27,7 @@ from ddtrace.llmobs._integrations.utils import format_langchain_io from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor +from ddtrace.llmobs.types import _SpanLink from ddtrace.trace import Span @@ -93,11 +94,11 @@ def _llmobs_set_tags( self._get_node_metadata_from_span(span, instance_id) if operation == "node" or is_subgraph else {} ) - span_links = [_default_span_link(span)] - invoked_node_span_links = invoked_node.get("span_links") - if invoked_node_span_links is not None: + span_links: List[_SpanLink] = [_default_span_link(span)] + invoked_node_span_links: List[_SpanLink] = invoked_node.get("span_links") or [] + if invoked_node_span_links: span_links = invoked_node_span_links - current_span_links = span._get_ctx_item(SPAN_LINKS) or [] + current_span_links: List[_SpanLink] = span._get_ctx_item(SPAN_LINKS) or [] def maybe_format_langchain_io(messages): if messages is None: @@ -230,23 +231,24 @@ def _handle_finished_graph(self, graph_span: Span, finished_tasks: dict[str, Any not whether it is a standalone graph (called internally during a node execution). """ graph_caller_span = _get_nearest_llmobs_ancestor(graph_span) if graph_span else None - output_span_links = [ - { - **self._graph_nodes_for_graph_by_task_id[graph_span][task_id]["span"], - "attributes": {"from": "output", "to": "output"}, - } + output_span_links: List[_SpanLink] = [ + _SpanLink( + span_id=self._graph_nodes_for_graph_by_task_id[graph_span][task_id]["span"]["span_id"], + trace_id=self._graph_nodes_for_graph_by_task_id[graph_span][task_id]["span"]["trace_id"], + attributes={"from": "output", "to": "output"}, + ) for task_id in finished_tasks.keys() ] - graph_span_span_links = graph_span._get_ctx_item(SPAN_LINKS) or [] + graph_span_span_links: List[_SpanLink] = graph_span._get_ctx_item(SPAN_LINKS) or [] graph_span._set_ctx_item(SPAN_LINKS, graph_span_span_links + output_span_links) if graph_caller_span is not None and not is_subgraph_node: - graph_caller_span_links = graph_caller_span._get_ctx_item(SPAN_LINKS) or [] - span_links = [ - { - "span_id": str(graph_span.span_id) or "undefined", - "trace_id": format_trace_id(graph_span.trace_id), - "attributes": {"from": "output", "to": "output"}, - } + graph_caller_span_links: List[_SpanLink] = graph_caller_span._get_ctx_item(SPAN_LINKS) or [] + span_links: List[_SpanLink] = [ + _SpanLink( + span_id=str(graph_span.span_id) or "undefined", + trace_id=format_trace_id(graph_span.trace_id), + attributes={"from": "output", "to": "output"}, + ) ] graph_caller_span._set_ctx_item(SPAN_LINKS, graph_caller_span_links + span_links) @@ -294,12 +296,12 @@ def _link_task_to_triggers( if not trigger_node_span: continue - span_link = { - "span_id": trigger_node_span.get("span_id", ""), - "trace_id": trigger_node_span.get("trace_id", ""), - "attributes": {"from": "output", "to": "input"}, - } - span_links: List[Dict[str, Any]] = queued_node.setdefault("span_links", []) + span_link = _SpanLink( + span_id=trigger_node_span.get("span_id", ""), + trace_id=trigger_node_span.get("trace_id", ""), + attributes={"from": "output", "to": "input"}, + ) + span_links: List[_SpanLink] = queued_node.setdefault("span_links", []) span_links.append(span_link) return trigger_ids @@ -311,7 +313,7 @@ def _link_standalone_terminal_tasks( Default handler that links any finished tasks not used as triggers for queued tasks to the outer graph span. """ standalone_terminal_task_ids = set(finished_tasks.keys()) - used_finished_tasks_ids - graph_span_links = graph_span._get_ctx_item(SPAN_LINKS) or [] + graph_span_links: List[_SpanLink] = graph_span._get_ctx_item(SPAN_LINKS) or [] for finished_task_id in standalone_terminal_task_ids: node = self._graph_nodes_for_graph_by_task_id.get(graph_span, {}).get(finished_task_id) if node is None: @@ -322,11 +324,11 @@ def _link_standalone_terminal_tasks( continue graph_span_links.append( - { - "span_id": span.get("span_id", ""), - "trace_id": span.get("trace_id", ""), - "attributes": {"from": "output", "to": "output"}, - } + _SpanLink( + span_id=span.get("span_id", ""), + trace_id=span.get("trace_id", ""), + attributes={"from": "output", "to": "output"}, + ) ) graph_span._set_ctx_item(SPAN_LINKS, graph_span_links) @@ -537,14 +539,14 @@ def _append_finished_task_to_channel_writes_map( tasks_for_trigger.append(finished_task_id) -def _default_span_link(span: Span) -> dict: +def _default_span_link(span: Span) -> _SpanLink: """ Create a default input-to-input span link for a given span, if there are no referenced spans that represent the causal link. In this case, we assume the span is linked to its parent's input. """ - return { - "span_id": span._get_ctx_item(PARENT_ID_KEY) or ROOT_PARENT_ID, - "trace_id": format_trace_id(span.trace_id), - "attributes": {"from": "input", "to": "input"}, - } + return _SpanLink( + span_id=span._get_ctx_item(PARENT_ID_KEY) or ROOT_PARENT_ID, + trace_id=format_trace_id(span.trace_id), + attributes={"from": "input", "to": "input"}, + ) diff --git a/ddtrace/llmobs/_integrations/openai.py b/ddtrace/llmobs/_integrations/openai.py index 35306d43ed6..75b769ccc00 100644 --- a/ddtrace/llmobs/_integrations/openai.py +++ b/ddtrace/llmobs/_integrations/openai.py @@ -27,7 +27,7 @@ from ddtrace.llmobs._integrations.utils import openai_set_meta_tags_from_response from ddtrace.llmobs._integrations.utils import update_proxy_workflow_input_output_value from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs.utils import Document +from ddtrace.llmobs.types import Document from ddtrace.trace import Span @@ -132,7 +132,7 @@ def _llmobs_set_meta_tags_from_embedding(span: Span, kwargs: Dict[str, Any], res embedding_inputs = kwargs.get("input", "") if isinstance(embedding_inputs, str) or isinstance(embedding_inputs[0], int): embedding_inputs = [embedding_inputs] - input_documents = [] + input_documents: List[Document] = [] for doc in embedding_inputs: input_documents.append(Document(text=str(doc))) span._set_ctx_items({METADATA: metadata, INPUT_DOCUMENTS: input_documents}) diff --git a/ddtrace/llmobs/_integrations/openai_agents.py b/ddtrace/llmobs/_integrations/openai_agents.py index e535c784136..33e73ae98d8 100644 --- a/ddtrace/llmobs/_integrations/openai_agents.py +++ b/ddtrace/llmobs/_integrations/openai_agents.py @@ -237,9 +237,9 @@ def _llmobs_set_response_attributes(self, span: Span, oai_span: OaiSpanAdapter) core.dispatch( DISPATCH_ON_LLM_TOOL_CHOICE, ( - tool_call_output["tool_id"], - tool_call_output["name"], - safe_json(tool_call_output["arguments"]), + tool_call_output.get("tool_id", ""), + tool_call_output.get("name", ""), + safe_json(tool_call_output.get("arguments", {})), { "trace_id": format_trace_id(span.trace_id), "span_id": str(span.span_id), diff --git a/ddtrace/llmobs/_integrations/utils.py b/ddtrace/llmobs/_integrations/utils.py index 3a1542fe794..a1e658bd307 100644 --- a/ddtrace/llmobs/_integrations/utils.py +++ b/ddtrace/llmobs/_integrations/utils.py @@ -29,9 +29,10 @@ from ddtrace.llmobs._utils import load_data_value from ddtrace.llmobs._utils import safe_json from ddtrace.llmobs._utils import safe_load_json -from ddtrace.llmobs.utils import ToolCall -from ddtrace.llmobs.utils import ToolDefinition -from ddtrace.llmobs.utils import ToolResult +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolCall +from ddtrace.llmobs.types import ToolDefinition +from ddtrace.llmobs.types import ToolResult try: @@ -227,7 +228,7 @@ def get_content_from_langchain_message(message) -> Union[str, Tuple[str, str]]: return str(message) -def get_messages_from_converse_content(role: str, content: List[Dict[str, Any]]): +def get_messages_from_converse_content(role: str, content: List[Dict[str, Any]]) -> List[Message]: """ Extracts out a list of messages from a converse `content` field. @@ -239,13 +240,11 @@ def get_messages_from_converse_content(role: str, content: List[Dict[str, Any]]) """ if not content or not isinstance(content, list) or not isinstance(content[0], dict): return [] - messages: List[Dict[str, Union[str, List[Dict[str, Any]], List[ToolCall], List[ToolResult]]]] = [] + messages: List[Message] = [] content_blocks = [] tool_calls_info = [] - tool_messages: List[Dict[str, Any]] = [] - unsupported_content_messages: List[ - Dict[str, Union[str, List[Dict[str, Any]], List[ToolCall], List[ToolResult]]] - ] = [] + tool_messages: List[Message] = [] + unsupported_content_messages: List[Message] = [] for content_block in content: if content_block.get("text") and isinstance(content_block.get("text"), str): content_blocks.append(content_block.get("text", "")) @@ -275,21 +274,22 @@ def get_messages_from_converse_content(role: str, content: List[Dict[str, Any]]) type="toolResult", ) tool_messages.append( - { - "tool_results": [tool_result_info], - "role": "user", - } + Message( + tool_results=[tool_result_info], + role="user", + ) ) else: content_type = ",".join(content_block.keys()) unsupported_content_messages.append( - {"content": "[Unsupported content type: {}]".format(content_type), "role": role} + Message(content="[Unsupported content type: {}]".format(content_type), role=role) ) - message: Dict[str, Union[str, List[Dict[str, Any]], List[ToolCall], List[ToolResult]]] = {} + message: Message = Message() if tool_calls_info: - message.update({"tool_calls": tool_calls_info}) + message["tool_calls"] = tool_calls_info if content_blocks: - message.update({"content": " ".join(content_blocks), "role": role}) + message["content"] = " ".join(content_blocks) + message["role"] = role if message: messages.append(message) if unsupported_content_messages: @@ -307,13 +307,13 @@ def openai_set_meta_tags_from_completion( if isinstance(prompt, str): prompt = [prompt] parameters = get_metadata_from_kwargs(kwargs, integration_name, "completion") - output_messages = [{"content": ""}] + output_messages = [Message(content="")] if not span.error and completions: choices = getattr(completions, "choices", completions) - output_messages = [{"content": _get_attr(choice, "text", "")} for choice in choices] + output_messages = [Message(content=str(_get_attr(choice, "text", ""))) for choice in choices] span._set_ctx_items( { - INPUT_MESSAGES: [{"content": str(p)} for p in prompt], + INPUT_MESSAGES: [Message(content=p) for p in prompt], METADATA: parameters, OUTPUT_MESSAGES: output_messages, } @@ -324,14 +324,11 @@ def openai_set_meta_tags_from_chat( span: Span, kwargs: Dict[str, Any], messages: Optional[Any], integration_name: str = "openai" ) -> None: """Extract prompt/response tags from a chat completion and set them as temporary "_ml_obs.meta.*" tags.""" - input_messages = [] + input_messages: List[Message] = [] for m in kwargs.get("messages", []): content = str(_get_attr(m, "content", "")) role = str(_get_attr(m, "role", "")) - processed_message: Dict[str, Union[str, List[ToolCall], List[ToolResult]]] = { - "content": content, - "role": role, - } + processed_message: Message = Message(content=content, role=role) tool_call_id = _get_attr(m, "tool_call_id", None) if tool_call_id: core.dispatch(DISPATCH_ON_TOOL_CALL_OUTPUT_USED, (tool_call_id, span)) @@ -358,16 +355,16 @@ def openai_set_meta_tags_from_chat( span._set_ctx_item(TOOL_DEFINITIONS, tools) if span.error or not messages: - span._set_ctx_item(OUTPUT_MESSAGES, [{"content": ""}]) + span._set_ctx_item(OUTPUT_MESSAGES, [Message(content="")]) return if isinstance(messages, list): # streamed response role = "" - output_messages = [] + output_messages: List[Message] = [] for streamed_message in messages: # litellm roles appear only on the first choice, so store it to be used for all choices role = streamed_message.get("role", "") or role content = streamed_message.get("content", "") - message = {"content": content, "role": role} + message = Message(content=content, role=role) extracted_tool_calls, _ = _openai_extract_tool_calls_and_results_chat( streamed_message, llm_span=span, dispatch_llm_choice=True @@ -391,7 +388,7 @@ def openai_set_meta_tags_from_chat( ) capture_plain_text_tool_usage(extracted_tool_calls, extracted_tool_results, content, span) - message = {"content": content, "role": role} + message = Message(content=str(content), role=str(role)) if extracted_tool_calls: message["tool_calls"] = extracted_tool_calls if extracted_tool_results: @@ -433,10 +430,10 @@ def _openai_extract_tool_calls_and_results_chat( raw_args = safe_load_json(raw_args) if isinstance(raw_args, str) else raw_args tool_call_info = ToolCall( - name=tool_name, + name=str(tool_name), arguments=raw_args, - tool_id=tool_id, - type=tool_type, + tool_id=str(tool_id), + type=str(tool_type), ) tool_calls.append(tool_call_info) @@ -444,10 +441,10 @@ def _openai_extract_tool_calls_and_results_chat( if _get_attr(message, "role", "") == "tool": result = _get_attr(message, "content", "") tool_result_info = ToolResult( - name=_get_attr(message, "name", ""), + name=str(_get_attr(message, "name", "")), result=str(result) if result else "", - tool_id=_get_attr(message, "tool_call_id", ""), - type=_get_attr(message, "type", "tool_result"), + tool_id=str(_get_attr(message, "tool_call_id", "")), + type=str(_get_attr(message, "type", "tool_result")), ) tool_results.append(tool_result_info) @@ -541,7 +538,7 @@ def get_metadata_from_kwargs( def openai_get_input_messages_from_response_input( messages: Optional[Union[str, List[Dict[str, Any]]]], -) -> List[Dict[str, Any]]: +) -> List[Message]: """Parses the input to openai responses api into a list of input messages Args: @@ -556,7 +553,7 @@ def openai_get_input_messages_from_response_input( def _openai_parse_input_response_messages( messages: Optional[Union[str, List[Dict[str, Any]]]], system_instructions: Optional[str] = None -) -> Tuple[List[Dict[str, Any]], List[str]]: +) -> Tuple[List[Message], List[str]]: """ Parses input messages from the openai responses api into a list of processed messages and a list of tool call IDs. @@ -568,20 +565,20 @@ def _openai_parse_input_response_messages( - A list of processed messages - A list of tool call IDs """ - processed: List[Dict[str, Any]] = [] + processed: List[Message] = [] tool_call_ids: List[str] = [] if system_instructions: - processed.append({"role": "system", "content": system_instructions}) + processed.append(Message(role="system", content=system_instructions)) if not messages: return processed, tool_call_ids if isinstance(messages, str): - return [{"role": "user", "content": messages}], tool_call_ids + return [Message(content=messages, role="user")], tool_call_ids for item in messages: - processed_item: Dict[str, Union[str, List[ToolCall], List[ToolResult]]] = {} + processed_item: Message = Message() # Handle regular message if "content" in item and "role" in item: processed_item_content = "" @@ -619,7 +616,7 @@ def _openai_parse_input_response_messages( output = safe_json(output) tool_result_info = ToolResult( tool_id=item["call_id"], - result=output, + result=str(output) if output else "", name=item.get("name", ""), type=item.get("type", "function_call_output"), ) @@ -636,7 +633,7 @@ def _openai_parse_input_response_messages( return processed, tool_call_ids -def openai_get_output_messages_from_response(response: Optional[Any]) -> List[Dict[str, Any]]: +def openai_get_output_messages_from_response(response: Optional[Any]) -> List[Message]: """ Parses the output to openai responses api into a list of output messages @@ -658,7 +655,7 @@ def openai_get_output_messages_from_response(response: Optional[Any]) -> List[Di return processed_messages -def _openai_parse_output_response_messages(messages: List[Any]) -> Tuple[List[Dict[str, Any]], List[ToolCall]]: +def _openai_parse_output_response_messages(messages: List[Any]) -> Tuple[List[Message], List[ToolCall]]: """ Parses output messages from the openai responses api into a list of processed messages and a list of tool call outputs. @@ -670,11 +667,11 @@ def _openai_parse_output_response_messages(messages: List[Any]) -> Tuple[List[Di - A list of processed messages - A list of tool call outputs """ - processed: List[Dict[str, Any]] = [] + processed: List[Message] = [] tool_call_outputs: List[ToolCall] = [] for item in messages: - message = {} + message: Message = Message() message_type = _get_attr(item, "type", "") if message_type == "message": @@ -682,7 +679,7 @@ def _openai_parse_output_response_messages(messages: List[Any]) -> Tuple[List[Di for content in _get_attr(item, "content", []) or []: text += str(_get_attr(content, "text", "") or "") text += str(_get_attr(content, "refusal", "") or "") - message.update({"role": _get_attr(item, "role", "assistant"), "content": text}) + message.update({"role": str(_get_attr(item, "role", "assistant")), "content": text}) elif message_type == "reasoning": message.update( { @@ -693,19 +690,20 @@ def _openai_parse_output_response_messages(messages: List[Any]) -> Tuple[List[Di "encrypted_content": _get_attr(item, "encrypted_content", ""), "id": _get_attr(item, "id", ""), } - ), + ) + or "", } ) elif message_type == "function_call" or message_type == "custom_tool_call": call_id = _get_attr(item, "call_id", "") name = _get_attr(item, "name", "") raw_arguments = _get_attr(item, "input", "") or _get_attr(item, "arguments", OAI_HANDOFF_TOOL_ARG) - arguments = safe_load_json(raw_arguments) + arguments = safe_load_json(str(raw_arguments)) tool_call_info = ToolCall( - tool_id=call_id, + tool_id=str(call_id), arguments=arguments, - name=name, - type=_get_attr(item, "type", "function"), + name=str(name), + type=str(_get_attr(item, "type", "function")), ) tool_call_outputs.append(tool_call_info) message.update( @@ -753,7 +751,7 @@ def openai_set_meta_tags_from_response(span: Span, kwargs: Dict[str, Any], respo input_messages = openai_get_input_messages_from_response_input(input_data) if "instructions" in kwargs: - input_messages.insert(0, {"content": str(kwargs["instructions"]), "role": "system"}) + input_messages.insert(0, Message(content=str(kwargs["instructions"]), role="system")) span._set_ctx_items( { @@ -763,14 +761,14 @@ def openai_set_meta_tags_from_response(span: Span, kwargs: Dict[str, Any], respo ) if span.error or not response: - span._set_ctx_item(OUTPUT_MESSAGES, [{"content": ""}]) + span._set_ctx_item(OUTPUT_MESSAGES, [Message(content="")]) return # The response potentially contains enriched metadata (ex. tool calls) not in the original request metadata = span._get_ctx_item(METADATA) or {} metadata.update(openai_get_metadata_from_response(response)) span._set_ctx_item(METADATA, metadata) - output_messages = openai_get_output_messages_from_response(response) + output_messages: List[Message] = openai_get_output_messages_from_response(response) span._set_ctx_item(OUTPUT_MESSAGES, output_messages) tools = _openai_get_tool_definitions(kwargs.get("tools") or []) if tools: @@ -784,24 +782,24 @@ def _openai_get_tool_definitions(tools: List[Any]) -> List[ToolDefinition]: if _get_attr(tool, "function", None): function = _get_attr(tool, "function", {}) tool_definition = ToolDefinition( - name=_get_attr(function, "name", ""), - description=_get_attr(function, "description", ""), + name=str(_get_attr(function, "name", "")), + description=str(_get_attr(function, "description", "")), schema=_get_attr(function, "parameters", {}), ) # chat API custom tool access elif _get_attr(tool, "custom", None): custom_tool = _get_attr(tool, "custom", {}) tool_definition = ToolDefinition( - name=_get_attr(custom_tool, "name", ""), - description=_get_attr(custom_tool, "description", ""), + name=str(_get_attr(custom_tool, "name", "")), + description=str(_get_attr(custom_tool, "description", "")), schema=_get_attr(custom_tool, "format", {}), # format is a dict ) # chat API function access and response API tool access # only handles FunctionToolParam and CustomToolParam for response API for now else: tool_definition = ToolDefinition( - name=_get_attr(tool, "name", ""), - description=_get_attr(tool, "description", ""), + name=str(_get_attr(tool, "name", "")), + description=str(_get_attr(tool, "description", "")), schema=_get_attr(tool, "parameters", {}) or _get_attr(tool, "format", {}), ) if not any(tool_definition.values()): @@ -1108,7 +1106,7 @@ def get_error_data(self) -> Optional[Dict[str, Any]]: return None return self.error.get("data") - def llmobs_input_messages(self) -> Tuple[List[Dict[str, Any]], List[str]]: + def llmobs_input_messages(self) -> Tuple[List[Message], List[str]]: """Returns processed input messages for LLM Obs LLM spans. Returns: @@ -1117,7 +1115,7 @@ def llmobs_input_messages(self) -> Tuple[List[Dict[str, Any]], List[str]]: """ return _openai_parse_input_response_messages(self.input, self.response_system_instructions) - def llmobs_output_messages(self) -> Tuple[List[Dict[str, Any]], List[ToolCall]]: + def llmobs_output_messages(self) -> Tuple[List[Message], List[ToolCall]]: """Returns processed output messages for LLM Obs LLM spans. Returns: @@ -1213,7 +1211,7 @@ class LLMObsTraceInfo: def get_final_message_converse_stream_message( message: Dict[str, Any], text_blocks: Dict[int, str], tool_blocks: Dict[int, Dict[str, Any]] -) -> Dict[str, Any]: +) -> Message: """Process a message and its content blocks into LLM Obs message format. Args: @@ -1225,7 +1223,7 @@ def get_final_message_converse_stream_message( Dict containing the processed message with content and optional tool calls """ indices = sorted(message.get("content_block_indicies", [])) - message_output = {"role": message["role"]} + message_output = Message(role=message["role"]) text_contents = [text_blocks[idx] for idx in indices if idx in text_blocks] message_output.update({"content": "".join(text_contents)} if text_contents else {}) diff --git a/ddtrace/llmobs/_integrations/vertexai.py b/ddtrace/llmobs/_integrations/vertexai.py index d982d423f0a..f392e608bc7 100644 --- a/ddtrace/llmobs/_integrations/vertexai.py +++ b/ddtrace/llmobs/_integrations/vertexai.py @@ -22,6 +22,7 @@ from ddtrace.llmobs._integrations.google_utils import get_system_instructions_gemini_vertexai from ddtrace.llmobs._integrations.google_utils import llmobs_get_metadata_gemini_vertexai from ddtrace.llmobs._utils import _get_attr +from ddtrace.llmobs.types import Message from ddtrace.trace import Span @@ -57,7 +58,7 @@ def _llmobs_set_tags( input_contents = get_argument_value(args, kwargs, 0, "contents") input_messages = self._extract_input_message(input_contents, history, system_instruction) - output_messages = [{"content": ""}] + output_messages: List[Message] = [Message(content="")] if response is not None: output_messages = self._extract_output_message(response) metrics = self._extract_metrics_from_response(response) @@ -109,28 +110,28 @@ def _extract_metrics_from_response(self, response): return metrics - def _extract_input_message(self, contents, history, system_instruction=None): + def _extract_input_message(self, contents, history, system_instruction=None) -> List[Message]: from vertexai.generative_models._generative_models import Part - messages = [] + messages: List[Message] = [] if system_instruction: for instruction in system_instruction: - messages.append({"content": instruction or "", "role": "system"}) + messages.append(Message(content=instruction or "", role="system")) for content in history: messages.extend(self._extract_messages_from_content(content)) if isinstance(contents, str): - messages.append({"content": contents}) + messages.append(Message(content=contents)) return messages if isinstance(contents, Part): message = extract_message_from_part_gemini_vertexai(contents) messages.append(message) return messages if not isinstance(contents, list): - messages.append({"content": "[Non-text content object: {}]".format(repr(contents))}) + messages.append(Message(content="[Non-text content object: {}]".format(repr(contents)))) return messages for content in contents: if isinstance(content, str): - messages.append({"content": content}) + messages.append(Message(content=content)) continue if isinstance(content, Part): message = extract_message_from_part_gemini_vertexai(content) @@ -139,8 +140,8 @@ def _extract_input_message(self, contents, history, system_instruction=None): messages.extend(self._extract_messages_from_content(content)) return messages - def _extract_output_message(self, generations): - output_messages = [] + def _extract_output_message(self, generations) -> List[Message]: + output_messages: List[Message] = [] # streamed responses will be a list of chunks if isinstance(generations, list): message_content = "" @@ -153,7 +154,7 @@ def _extract_output_message(self, generations): for message in messages: message_content += message.get("content", "") tool_calls.extend(message.get("tool_calls", [])) - message = {"content": message_content, "role": role} + message = Message(content=message_content, role=role) if tool_calls: message["tool_calls"] = tool_calls return [message] @@ -164,14 +165,14 @@ def _extract_output_message(self, generations): return output_messages @staticmethod - def _extract_messages_from_content(content): - messages = [] + def _extract_messages_from_content(content) -> List[Message]: + messages: List[Message] = [] role = _get_attr(content, "role", "") parts = _get_attr(content, "parts", []) if not parts or not isinstance(parts, Iterable): - message = {"content": "[Non-text content object: {}]".format(repr(content))} + message = Message(content="[Non-text content object: {}]".format(repr(content))) if role: - message["role"] = role + message["role"] = str(role) messages.append(message) return messages for part in parts: diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py index 9f520e8834f..a8bed76756e 100644 --- a/ddtrace/llmobs/_llmobs.py +++ b/ddtrace/llmobs/_llmobs.py @@ -13,13 +13,13 @@ from typing import Optional from typing import Set from typing import Tuple -from typing import TypedDict from typing import Union from typing import cast import ddtrace from ddtrace import config from ddtrace import patch +from ddtrace._trace.apm_filter import APMTracingEnabledFilter from ddtrace._trace.context import Context from ddtrace._trace.span import Span from ddtrace._trace.tracer import Tracer @@ -100,17 +100,23 @@ from ddtrace.llmobs._utils import _get_session_id from ddtrace.llmobs._utils import _get_span_name from ddtrace.llmobs._utils import _is_evaluation_span +from ddtrace.llmobs._utils import _validate_prompt from ddtrace.llmobs._utils import enforce_message_role from ddtrace.llmobs._utils import safe_json -from ddtrace.llmobs._utils import validate_prompt from ddtrace.llmobs._writer import LLMObsEvalMetricWriter from ddtrace.llmobs._writer import LLMObsEvaluationMetricEvent from ddtrace.llmobs._writer import LLMObsExperimentsClient from ddtrace.llmobs._writer import LLMObsSpanEvent from ddtrace.llmobs._writer import LLMObsSpanWriter from ddtrace.llmobs._writer import should_use_agentless +from ddtrace.llmobs.types import ExportedLLMObsSpan +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import Prompt +from ddtrace.llmobs.types import _ErrorField +from ddtrace.llmobs.types import _Meta +from ddtrace.llmobs.types import _MetaIO +from ddtrace.llmobs.types import _SpanField from ddtrace.llmobs.utils import Documents -from ddtrace.llmobs.utils import ExportedLLMObsSpan from ddtrace.llmobs.utils import Messages from ddtrace.llmobs.utils import extract_tool_definitions from ddtrace.propagation.http import HTTPPropagator @@ -124,6 +130,7 @@ "bedrock": "botocore", "openai": "openai", "langchain": "langchain", + "google_adk": "google_adk", "google_generativeai": "google_generativeai", "google_genai": "google_genai", "vertexai": "vertexai", @@ -161,10 +168,6 @@ def span_processor(span: LLMObsSpan) -> Optional[LLMObsSpan]: return span """ - class Message(TypedDict): - content: str - role: str - input: List[Message] = field(default_factory=list) output: List[Message] = field(default_factory=list) _tags: Dict[str, str] = field(default_factory=dict) @@ -268,9 +271,9 @@ def _llmobs_span_event(self, span: Span) -> Optional[LLMObsSpanEvent]: "apm_trace_id": format_trace_id(span.trace_id), } - meta: Dict[str, Any] = {"span.kind": span_kind, "input": {}, "output": {}} + meta = _Meta(span=_SpanField(kind=span_kind), input=_MetaIO(), output=_MetaIO()) if span_kind in ("llm", "embedding") and span._get_ctx_item(MODEL_NAME) is not None: - meta["model_name"] = span._get_ctx_item(MODEL_NAME) + meta["model_name"] = span._get_ctx_item(MODEL_NAME) or "" meta["model_provider"] = (span._get_ctx_item(MODEL_PROVIDER) or "custom").lower() metadata = span._get_ctx_item(METADATA) or {} if span_kind == "agent" and span._get_ctx_item(AGENT_MANIFEST) is not None: @@ -282,7 +285,7 @@ def _llmobs_span_event(self, span: Span) -> Optional[LLMObsSpanEvent]: if span._get_ctx_item(INPUT_VALUE) is not None: input_type = "value" llmobs_span.input = [ - {"content": safe_json(span._get_ctx_item(INPUT_VALUE), ensure_ascii=False), "role": ""} + Message(content=safe_json(span._get_ctx_item(INPUT_VALUE), ensure_ascii=False) or "", role="") ] if span.context.get_baggage_item(EXPERIMENT_ID_KEY): @@ -303,23 +306,23 @@ def _llmobs_span_event(self, span: Span) -> Optional[LLMObsSpanEvent]: input_messages = span._get_ctx_item(INPUT_MESSAGES) if span_kind == "llm" and input_messages is not None: input_type = "messages" - llmobs_span.input = cast(List[LLMObsSpan.Message], enforce_message_role(input_messages)) + llmobs_span.input = cast(List[Message], enforce_message_role(input_messages)) if span._get_ctx_item(OUTPUT_VALUE) is not None: output_type = "value" llmobs_span.output = [ - {"content": safe_json(span._get_ctx_item(OUTPUT_VALUE), ensure_ascii=False), "role": ""} + Message(content=safe_json(span._get_ctx_item(OUTPUT_VALUE), ensure_ascii=False) or "", role="") ] output_messages = span._get_ctx_item(OUTPUT_MESSAGES) if span_kind == "llm" and output_messages is not None: output_type = "messages" - llmobs_span.output = cast(List[LLMObsSpan.Message], enforce_message_role(output_messages)) + llmobs_span.output = cast(List[Message], enforce_message_role(output_messages)) if span_kind == "embedding" and span._get_ctx_item(INPUT_DOCUMENTS) is not None: - meta["input"]["documents"] = span._get_ctx_item(INPUT_DOCUMENTS) + meta["input"]["documents"] = span._get_ctx_item(INPUT_DOCUMENTS) or [] if span_kind == "retrieval" and span._get_ctx_item(OUTPUT_DOCUMENTS) is not None: - meta["output"]["documents"] = span._get_ctx_item(OUTPUT_DOCUMENTS) + meta["output"]["documents"] = span._get_ctx_item(OUTPUT_DOCUMENTS) or [] if span._get_ctx_item(INPUT_PROMPT) is not None: prompt_json_str = span._get_ctx_item(INPUT_PROMPT) @@ -328,7 +331,8 @@ def _llmobs_span_event(self, span: Span) -> Optional[LLMObsSpanEvent]: "Dropping prompt on non-LLM span kind, annotating prompts is only supported for LLM span kinds." ) else: - meta["input"]["prompt"] = prompt_json_str + prompt_dict = cast(Prompt, prompt_json_str) + meta["input"]["prompt"] = prompt_dict elif span_kind == "llm": parent_span = _get_nearest_llmobs_ancestor(span) if parent_span is not None: @@ -337,14 +341,12 @@ def _llmobs_span_event(self, span: Span) -> Optional[LLMObsSpanEvent]: meta["input"]["prompt"] = parent_prompt if span._get_ctx_item(TOOL_DEFINITIONS) is not None: - meta["tool_definitions"] = span._get_ctx_item(TOOL_DEFINITIONS) + meta["tool_definitions"] = span._get_ctx_item(TOOL_DEFINITIONS) or [] if span.error: - meta.update( - { - ERROR_MSG: span.get_tag(ERROR_MSG), - ERROR_STACK: span.get_tag(ERROR_STACK), - ERROR_TYPE: span.get_tag(ERROR_TYPE), - } + meta["error"] = _ErrorField( + message=span.get_tag(ERROR_MSG) or "", + stack=span.get_tag(ERROR_STACK) or "", + type=span.get_tag(ERROR_TYPE) or "", ) if self._user_span_processor: @@ -369,12 +371,12 @@ def _llmobs_span_event(self, span: Span) -> Optional[LLMObsSpanEvent]: if input_type == "messages": meta["input"]["messages"] = llmobs_span.input elif input_type == "value": - meta["input"]["value"] = llmobs_span.input[0]["content"] + meta["input"]["value"] = llmobs_span.input[0].get("content", "") if llmobs_span.output is not None: if output_type == "messages": meta["output"]["messages"] = llmobs_span.output elif output_type == "value": - meta["output"]["value"] = llmobs_span.output[0]["content"] + meta["output"]["value"] = llmobs_span.output[0].get("content", "") if not meta["input"]: meta.pop("input") @@ -603,6 +605,11 @@ def enable( # override the default _instance with a new tracer cls._instance = cls(tracer=_tracer, span_processor=span_processor) + + # Add APM trace filter to drop all APM traces when DD_APM_TRACING_ENABLED is falsy + apm_filter = APMTracingEnabledFilter() + cls._instance.tracer._span_aggregator.dd_processors.append(apm_filter) + cls.enabled = True cls._instance.start() @@ -751,6 +758,13 @@ def experiment( description: str = "", tags: Optional[Dict[str, str]] = None, config: Optional[ExperimentConfigType] = None, + summary_evaluators: Optional[ + List[ + Callable[ + [List[DatasetRecordInputType], List[JSONType], List[JSONType], Dict[str, List[JSONType]]], JSONType + ] + ] + ] = None, ) -> Experiment: """Initializes an Experiment to run a task on a Dataset and evaluators. @@ -776,9 +790,21 @@ def experiment( for evaluator in evaluators: sig = inspect.signature(evaluator) params = sig.parameters - required_params = ("input_data", "output_data", "expected_output") - if not all(param in params for param in required_params): - raise TypeError("Evaluator function must have parameters {}.".format(required_params)) + evaluator_required_params = ("input_data", "output_data", "expected_output") + if not all(param in params for param in evaluator_required_params): + raise TypeError("Evaluator function must have parameters {}.".format(evaluator_required_params)) + + if summary_evaluators and not all(callable(summary_evaluator) for summary_evaluator in summary_evaluators): + raise TypeError("Summary evaluators must be a list of callable functions.") + if summary_evaluators: + for summary_evaluator in summary_evaluators: + sig = inspect.signature(summary_evaluator) + params = sig.parameters + summary_evaluator_required_params = ("inputs", "outputs", "expected_outputs", "evaluators_results") + if not all(param in params for param in summary_evaluator_required_params): + raise TypeError( + "Summary evaluator function must have parameters {}.".format(summary_evaluator_required_params) + ) return Experiment( name, task, @@ -789,6 +815,7 @@ def experiment( description=description, config=config, _llmobs_instance=cls._instance, + summary_evaluators=summary_evaluators, ) @classmethod @@ -841,7 +868,10 @@ def _tag_span_links(self, span, span_links): @classmethod def annotation_context( - cls, tags: Optional[Dict[str, Any]] = None, prompt: Optional[dict] = None, name: Optional[str] = None + cls, + tags: Optional[Dict[str, Any]] = None, + prompt: Optional[Union[dict, Prompt]] = None, + name: Optional[str] = None, ) -> AnnotationContext: """ Sets specified attributes on all LLMObs spans created while the returned AnnotationContext is active. @@ -850,10 +880,16 @@ def annotation_context( :param tags: Dictionary of JSON serializable key-value tag pairs to set or update on the LLMObs span regarding the span's context. :param prompt: A dictionary that represents the prompt used for an LLM call in the following form: - `{"template": "...", "id": "...", "version": "...", "variables": {"variable_1": "...", ...}}`. + `{ + "id": "...", + "version": "...", + "chat_template": [{"content": "...", "role": "..."}, ...], + "variables": {"variable_1": "...", ...}}`. + "tags": {"key1": "value1", "key2": "value2"}, + }` Can also be set using the `ddtrace.llmobs.utils.Prompt` constructor class. - This argument is only applicable to LLM spans. - - The dictionary may contain two optional keys relevant to RAG applications: + - The dictionary may contain optional keys relevant to Templates and RAG applications: `rag_context_variables` - a list of variable key names that contain ground truth context information `rag_query_variables` - a list of variable key names that contains query @@ -1289,7 +1325,14 @@ def annotate( :param Span span: Span to annotate. If no span is provided, the current active span will be used. Must be an LLMObs-type span, i.e. generated by the LLMObs SDK. :param prompt: A dictionary that represents the prompt used for an LLM call in the following form: - `{"template": "...", "id": "...", "version": "...", "variables": {"variable_1": "...", ...}}`. + `{ + "id": "...", + "template": "...", + "chat_template": [{"content": "...", "role": "..."}, ...]) + "version": "...", + "variables": {"variable_1": "...", ...}, + tags": {"tag_1": "...", ...}, + }`. Can also be set using the `ddtrace.llmobs.utils.Prompt` constructor class. - This argument is only applicable to LLM spans. - The dictionary may contain two optional keys relevant to RAG applications: @@ -1373,11 +1416,11 @@ def annotate( span.name = _name if prompt is not None: try: - validated_prompt = validate_prompt(prompt) + validated_prompt = _validate_prompt(prompt, strict_validation=False) cls._set_dict_attribute(span, INPUT_PROMPT, validated_prompt) - except TypeError: + except (ValueError, TypeError) as e: error = "invalid_prompt" - log.warning("Failed to validate prompt with error: ", exc_info=True) + log.warning("Failed to validate prompt with error:", str(e), exc_info=True) if not span_kind: log.debug("Span kind not specified, skipping annotation for input/output data") return diff --git a/ddtrace/llmobs/_telemetry.py b/ddtrace/llmobs/_telemetry.py index 3509b801540..463fe545eb0 100644 --- a/ddtrace/llmobs/_telemetry.py +++ b/ddtrace/llmobs/_telemetry.py @@ -44,7 +44,7 @@ def _find_tag_value_from_tags(tags, tag_key): def _get_tags_from_span_event(event: LLMObsSpanEvent): - span_kind = event.get("meta", {}).get("span.kind", "") + span_kind = event.get("meta", {}).get("span", {}).get("kind", "") integration = _find_tag_value_from_tags(event.get("tags", []), "integration") ml_app = _find_tag_value_from_tags(event.get("tags", []), "ml_app") autoinstrumented = integration is not None diff --git a/ddtrace/llmobs/_utils.py b/ddtrace/llmobs/_utils.py index f0a34b3735a..444279c07cc 100644 --- a/ddtrace/llmobs/_utils.py +++ b/ddtrace/llmobs/_utils.py @@ -2,6 +2,7 @@ from dataclasses import dataclass from dataclasses import is_dataclass import json +from typing import Any from typing import Dict from typing import List from typing import Optional @@ -14,6 +15,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.utils.formats import format_trace_id from ddtrace.llmobs._constants import CREWAI_APM_SPAN_NAME +from ddtrace.llmobs._constants import DEFAULT_PROMPT_NAME from ddtrace.llmobs._constants import GEMINI_APM_SPAN_NAME from ddtrace.llmobs._constants import INTERNAL_CONTEXT_VARIABLE_KEYS from ddtrace.llmobs._constants import INTERNAL_QUERY_VARIABLE_KEYS @@ -27,11 +29,15 @@ from ddtrace.llmobs._constants import SESSION_ID from ddtrace.llmobs._constants import SPAN_LINKS from ddtrace.llmobs._constants import VERTEXAI_APM_SPAN_NAME +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import Prompt +from ddtrace.llmobs.types import _SpanLink from ddtrace.trace import Span log = get_logger(__name__) +ValidatedPromptDict = Dict[str, Union[str, Dict[str, Any], List[str], List[Dict[str, str]], List[Message]]] STANDARD_INTEGRATION_SPAN_NAMES = ( CREWAI_APM_SPAN_NAME, @@ -42,50 +48,100 @@ ) -def validate_prompt(prompt: dict) -> Dict[str, Union[str, dict, List[str]]]: - validated_prompt = {} # type: Dict[str, Union[str, dict, List[str]]] +def _validate_prompt(prompt: Union[Dict[str, Any], Prompt], strict_validation: bool) -> ValidatedPromptDict: if not isinstance(prompt, dict): - raise TypeError("Prompt must be a dictionary") + raise TypeError(f"Prompt must be a dictionary, received {type(prompt).__name__}.") + + ml_app = config._llmobs_ml_app + prompt_id = prompt.get("id") + version = prompt.get("version") + tags = prompt.get("tags") variables = prompt.get("variables") template = prompt.get("template") - version = prompt.get("version") - prompt_id = prompt.get("id") + chat_template = prompt.get("chat_template") ctx_variable_keys = prompt.get("rag_context_variables") - rag_query_variable_keys = prompt.get("rag_query_variables") - if variables is not None: + query_variable_keys = prompt.get("rag_query_variables") + + if strict_validation: + if prompt_id is None: + raise ValueError("'id' must be provided") + if template is None and chat_template is None: + raise ValueError("One of 'template' or 'chat_template' must be provided to annotate a prompt.") + + if template and chat_template: + raise ValueError("Only one of 'template' or 'chat_template' can be provided, not both.") + + final_prompt_id = prompt_id or f"{ml_app}_{DEFAULT_PROMPT_NAME}" + final_ctx_variable_keys = ctx_variable_keys or ["context"] + final_query_variable_keys = query_variable_keys or ["question"] + + if not isinstance(final_prompt_id, str): + raise TypeError(f"prompt_id {final_prompt_id} must be a string, received {type(final_prompt_id).__name__}") + + if not (isinstance(final_ctx_variable_keys, list) and all(isinstance(i, str) for i in final_ctx_variable_keys)): + raise TypeError(f"ctx_variables must be a list of strings, received {type(final_ctx_variable_keys).__name__}") + + if not (isinstance(final_query_variable_keys, list) and all(isinstance(i, str) for i in final_query_variable_keys)): + raise TypeError( + f"query_variables must be a list of strings, received {type(final_query_variable_keys).__name__}" + ) + + if version and not isinstance(version, str): + raise TypeError(f"version: {version} must be a string, received {type(version).__name__}") + + if tags: + if not isinstance(tags, dict): + raise TypeError( + f"tags: {tags} must be a dictionary of string key-value pairs, received {type(tags).__name__}" + ) + if not all(isinstance(k, str) for k in tags): + raise TypeError("Keys of 'tags' must all be strings.") + if not all(isinstance(k, str) for k in tags.values()): + raise TypeError("Values of 'tags' must all be strings.") + + if template and not isinstance(template, str): + raise TypeError(f"template: {template} must be a string, received {type(template).__name__}") + + if chat_template: + if not isinstance(chat_template, list): + raise TypeError("chat_template must be a list of dictionaries with string-string key value pairs.") + for ct in chat_template: + if not (isinstance(ct, dict) and all(k in ct for k in ("role", "content"))): + raise TypeError( + "Each 'chat_template' entry should be a string-string dictionary with role and content keys." + ) + + if variables: if not isinstance(variables, dict): - raise TypeError("Prompt variables must be a dictionary.") - if not any(isinstance(k, str) or isinstance(v, str) for k, v in variables.items()): - raise TypeError("Prompt variable keys and values must be strings.") + raise TypeError( + f"variables: {variables} must be a dictionary with string keys, received {type(variables).__name__}" + ) + if not all(isinstance(k, str) for k in variables): + raise TypeError("Keys of 'variables' must all be strings.") + + final_chat_template = [] + if chat_template: + for msg in chat_template: + final_chat_template.append(Message(role=msg["role"], content=msg["content"])) + + validated_prompt: ValidatedPromptDict = {} + if final_prompt_id: + validated_prompt["id"] = final_prompt_id + if version: + validated_prompt["version"] = version + if variables: validated_prompt["variables"] = variables - if template is not None: - if not isinstance(template, str): - raise TypeError("Prompt template must be a string") + if template: validated_prompt["template"] = template - if version is not None: - if not isinstance(version, str): - raise TypeError("Prompt version must be a string.") - validated_prompt["version"] = version - if prompt_id is not None: - if not isinstance(prompt_id, str): - raise TypeError("Prompt id must be a string.") - validated_prompt["id"] = prompt_id - if ctx_variable_keys is not None: - if not isinstance(ctx_variable_keys, list): - raise TypeError("Prompt field `context_variable_keys` must be a list of strings.") - if not all(isinstance(k, str) for k in ctx_variable_keys): - raise TypeError("Prompt field `context_variable_keys` must be a list of strings.") - validated_prompt[INTERNAL_CONTEXT_VARIABLE_KEYS] = ctx_variable_keys - else: - validated_prompt[INTERNAL_CONTEXT_VARIABLE_KEYS] = ["context"] - if rag_query_variable_keys is not None: - if not isinstance(rag_query_variable_keys, list): - raise TypeError("Prompt field `rag_query_variables` must be a list of strings.") - if not all(isinstance(k, str) for k in rag_query_variable_keys): - raise TypeError("Prompt field `rag_query_variables` must be a list of strings.") - validated_prompt[INTERNAL_QUERY_VARIABLE_KEYS] = rag_query_variable_keys - else: - validated_prompt[INTERNAL_QUERY_VARIABLE_KEYS] = ["question"] + if final_chat_template: + validated_prompt["chat_template"] = final_chat_template + if tags: + validated_prompt["tags"] = tags + if final_ctx_variable_keys: + validated_prompt[INTERNAL_CONTEXT_VARIABLE_KEYS] = final_ctx_variable_keys + if final_query_variable_keys: + validated_prompt[INTERNAL_QUERY_VARIABLE_KEYS] = final_query_variable_keys + return validated_prompt @@ -243,13 +299,13 @@ def format_tool_call_arguments(tool_args: str) -> str: def add_span_link(span: Span, span_id: str, trace_id: str, from_io: str, to_io: str) -> None: - current_span_links = span._get_ctx_item(SPAN_LINKS) or [] + current_span_links: List[_SpanLink] = span._get_ctx_item(SPAN_LINKS) or [] current_span_links.append( - { - "span_id": span_id, - "trace_id": trace_id, - "attributes": {"from": from_io, "to": to_io}, - } + _SpanLink( + span_id=span_id, + trace_id=trace_id, + attributes={"from": from_io, "to": to_io}, + ) ) span._set_ctx_item(SPAN_LINKS, current_span_links) diff --git a/ddtrace/llmobs/_writer.py b/ddtrace/llmobs/_writer.py index 4e574a399bb..3b62ea3bdda 100644 --- a/ddtrace/llmobs/_writer.py +++ b/ddtrace/llmobs/_writer.py @@ -45,6 +45,8 @@ from ddtrace.llmobs._experiment import Project from ddtrace.llmobs._experiment import UpdatableDatasetRecord from ddtrace.llmobs._utils import safe_json +from ddtrace.llmobs.types import _Meta +from ddtrace.llmobs.types import _SpanLink from ddtrace.settings._agent import config as agent_config @@ -56,7 +58,7 @@ class _LLMObsSpanEventOptional(TypedDict, total=False): service: str status_message: str collection_errors: List[str] - span_links: List[Dict[str, str]] + span_links: List[_SpanLink] class LLMObsSpanEvent(_LLMObsSpanEventOptional): @@ -68,7 +70,7 @@ class LLMObsSpanEvent(_LLMObsSpanEventOptional): start_ns: int duration: int status: str - meta: Dict[str, Any] + meta: _Meta metrics: Dict[str, Any] _dd: Dict[str, str] @@ -87,6 +89,7 @@ class LLMObsEvaluationMetricEvent(TypedDict, total=False): class LLMObsExperimentEvalMetricEvent(TypedDict, total=False): + metric_source: str span_id: str trace_id: str timestamp_ms: int @@ -309,7 +312,7 @@ class LLMObsExperimentsClient(BaseLLMObsWriter): EVP_SUBDOMAIN_HEADER_VALUE = EXP_SUBDOMAIN_NAME AGENTLESS_BASE_URL = AGENTLESS_EXP_BASE_URL ENDPOINT = "" - TIMEOUT = 5.0 + TIMEOUT = 10.0 BULK_UPLOAD_TIMEOUT = 60.0 LIST_RECORDS_TIMEOUT = 20 SUPPORTED_UPLOAD_EXTS = {"csv"} diff --git a/ddtrace/llmobs/types.py b/ddtrace/llmobs/types.py new file mode 100644 index 00000000000..09072a36e51 --- /dev/null +++ b/ddtrace/llmobs/types.py @@ -0,0 +1,110 @@ +from typing import Any +from typing import Dict +from typing import List +from typing import TypedDict +from typing import Union + + +class ExportedLLMObsSpan(TypedDict): + span_id: str + trace_id: str + + +class Document(TypedDict, total=False): + name: str + id: str + text: str + score: float + + +class ToolCall(TypedDict, total=False): + name: str + arguments: Dict[str, Any] + tool_id: str + type: str + + +class ToolResult(TypedDict, total=False): + name: str + result: str + tool_id: str + type: str + + +class ToolDefinition(TypedDict, total=False): + name: str + description: str + schema: Dict[str, Any] + + +class Message(TypedDict, total=False): + id: str + role: str + content: str + tool_calls: List[ToolCall] + tool_results: List[ToolResult] + tool_id: str + + +class _SpanField(TypedDict): + kind: str + + +class _ErrorField(TypedDict, total=False): + message: str + stack: str + type: str + + +class Prompt(TypedDict, total=False): + """ + A Prompt object that contains the information needed to render a prompt. + id: str - the id of the prompt set by the user. Should be unique per ml_app. + version: str - user tag for the version of the prompt. + variables: Dict[str, str] - a dictionary of variables that will be used to render the prompt + chat_template: Optional[Union[List[Dict[str, str]], List[Message]]] + - A list of dicts of (role,template) + where role is the role of the prompt and template is the template string + template: Optional[str] + - It also accepts a string that represents the template for the prompt. Will default to "user" for a role + tags: Optional[Dict[str, str]] + - List of tags to add to the prompt run. + rag_context_variables: List[str] - a list of variable key names that contain ground truth context information + rag_query_variables: List[str] - a list of variable key names that contains query information + """ + + version: str + id: str + template: str + chat_template: Union[List[Dict[str, str]], List[Message]] + variables: Dict[str, str] + tags: Dict[str, str] + rag_context_variables: List[str] + rag_query_variables: List[str] + + +class _MetaIO(TypedDict, total=False): + parameters: Dict[str, Any] + value: str + messages: List[Message] + prompt: Prompt + documents: List[Document] + + +class _Meta(TypedDict, total=False): + model_name: str + model_provider: str + span: _SpanField + error: _ErrorField + metadata: Dict[str, Any] + input: _MetaIO + output: _MetaIO + expected_output: _MetaIO + evaluations: Any + tool_definitions: List[ToolDefinition] + + +class _SpanLink(TypedDict): + span_id: str + trace_id: str + attributes: Dict[str, str] diff --git a/ddtrace/llmobs/utils.py b/ddtrace/llmobs/utils.py index df70a9fca92..5d007dfa4a9 100644 --- a/ddtrace/llmobs/utils.py +++ b/ddtrace/llmobs/utils.py @@ -1,10 +1,14 @@ from typing import Any from typing import Dict from typing import List -from typing import TypedDict # noqa:F401 from typing import Union from ddtrace.internal.logger import get_logger +from ddtrace.llmobs.types import Document +from ddtrace.llmobs.types import Message +from ddtrace.llmobs.types import ToolCall +from ddtrace.llmobs.types import ToolDefinition +from ddtrace.llmobs.types import ToolResult log = get_logger(__name__) @@ -68,58 +72,6 @@ def _extract_tool_result(tool_result: Dict[str, Any]) -> "ToolResult": return formatted_tool_result -ExportedLLMObsSpan = TypedDict("ExportedLLMObsSpan", {"span_id": str, "trace_id": str}) -Document = TypedDict("Document", {"name": str, "id": str, "text": str, "score": float}, total=False) -Message = TypedDict( - "Message", - {"content": str, "role": str, "tool_calls": List["ToolCall"], "tool_results": List["ToolResult"]}, - total=False, -) -Prompt = TypedDict( - "Prompt", - { - "variables": Dict[str, str], - "template": str, - "id": str, - "version": str, - "rag_context_variables": List[ - str - ], # a list of variable key names that contain ground truth context information - "rag_query_variables": List[str], # a list of variable key names that contains query information - }, - total=False, -) -ToolCall = TypedDict( - "ToolCall", - { - "name": str, - "arguments": Dict[str, Any], - "tool_id": str, - "type": str, - }, - total=False, -) -ToolResult = TypedDict( - "ToolResult", - { - "name": str, - "result": str, - "tool_id": str, - "type": str, - }, - total=False, -) -ToolDefinition = TypedDict( - "ToolDefinition", - { - "name": str, - "description": str, - "schema": Dict[str, Any], - }, - total=False, -) - - def extract_tool_definitions(tool_definitions: List[Dict[str, Any]]) -> List[ToolDefinition]: """Return a list of validated tool definitions.""" if not isinstance(tool_definitions, list): diff --git a/ddtrace/profiling/collector/__init__.py b/ddtrace/profiling/collector/__init__.py index 0affbf3deaa..4b066483460 100644 --- a/ddtrace/profiling/collector/__init__.py +++ b/ddtrace/profiling/collector/__init__.py @@ -1,4 +1,6 @@ # -*- encoding: utf-8 -*- +import typing + from ddtrace.internal import periodic from ddtrace.internal import service from ddtrace.settings.profiling import config @@ -15,11 +17,11 @@ class CollectorUnavailable(CollectorError): class Collector(service.Service): """A profile collector.""" - def __init__(self, *args, **kwargs): + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: super().__init__(*args, **kwargs) @staticmethod - def snapshot(): + def snapshot() -> None: """Take a snapshot of collected data, to be exported.""" @@ -28,11 +30,11 @@ class PeriodicCollector(Collector, periodic.PeriodicService): __slots__ = () - def periodic(self): + def periodic(self) -> None: # This is to simply override periodic.PeriodicService.periodic() self.collect() - def collect(self): + def collect(self) -> None: """Collect the actual data.""" raise NotImplementedError @@ -44,15 +46,15 @@ def __init__(self, capture_pct: float = 100.0): if capture_pct < 0 or capture_pct > 100: raise ValueError("Capture percentage should be between 0 and 100 included") self.capture_pct: float = capture_pct - self._counter: int = 0 + self._counter: float = 0 - def __repr__(self): + def __repr__(self) -> str: class_name = self.__class__.__name__ attrs = {k: v for k, v in self.__dict__.items() if not k.startswith("_")} attrs_str = ", ".join(f"{k}={v!r}" for k, v in attrs.items()) return f"{class_name}({attrs_str})" - def capture(self): + def capture(self) -> bool: self._counter += self.capture_pct if self._counter >= 100: self._counter -= 100 @@ -61,7 +63,7 @@ def capture(self): class CaptureSamplerCollector(Collector): - def __init__(self, capture_pct=config.capture_pct, *args, **kwargs): + def __init__(self, capture_pct: float = config.capture_pct, *args: typing.Any, **kwargs: typing.Any) -> None: super().__init__(*args, **kwargs) self.capture_pct = capture_pct self._capture_sampler = CaptureSampler(self.capture_pct) diff --git a/ddtrace/profiling/collector/_lock.py b/ddtrace/profiling/collector/_lock.py index e261664ad78..5a82ebd27c5 100644 --- a/ddtrace/profiling/collector/_lock.py +++ b/ddtrace/profiling/collector/_lock.py @@ -19,8 +19,10 @@ from ddtrace.trace import Tracer -def _current_thread(): - # type: (...) -> typing.Tuple[int, str] +T = typing.TypeVar("T") + + +def _current_thread() -> typing.Tuple[int, str]: thread_id = _thread.get_ident() return thread_id, _threading.get_thread_name(thread_id) @@ -58,13 +60,13 @@ def __init__( self._self_init_loc = "%s:%d" % (os.path.basename(code.co_filename), frame.f_lineno) self._self_name: typing.Optional[str] = None - def __aenter__(self, *args, **kwargs): + def __aenter__(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Any: return self._acquire(self.__wrapped__.__aenter__, *args, **kwargs) - def __aexit__(self, *args, **kwargs): + def __aexit__(self, *args: typing.Any, **kwargs: typing.Any) -> None: return self._release(self.__wrapped__.__aexit__, *args, **kwargs) - def _acquire(self, inner_func, *args, **kwargs): + def _acquire(self, inner_func: typing.Callable[..., T], *args: typing.Any, **kwargs: typing.Any) -> T: if not self._self_capture_sampler.capture(): return inner_func(*args, **kwargs) @@ -100,16 +102,18 @@ def _acquire(self, inner_func, *args, **kwargs): if self._self_tracer is not None: handle.push_span(self._self_tracer.current_span()) - for frame in frames: - handle.push_frame(frame.function_name, frame.file_name, 0, frame.lineno) + + for f in frames: + handle.push_frame(f.function_name, f.file_name, 0, f.lineno) + handle.flush_sample() except Exception: pass # nosec - def acquire(self, *args, **kwargs): + def acquire(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Any: return self._acquire(self.__wrapped__.acquire, *args, **kwargs) - def _release(self, inner_func, *args, **kwargs): + def _release(self, inner_func: typing.Callable[..., typing.Any], *args: typing.Any, **kwargs: typing.Any) -> None: # type (typing.Any, typing.Any) -> None # The underlying threading.Lock class is implemented using C code, and @@ -158,22 +162,23 @@ def _release(self, inner_func, *args, **kwargs): if self._self_tracer is not None: handle.push_span(self._self_tracer.current_span()) - for frame in frames: - handle.push_frame(frame.function_name, frame.file_name, 0, frame.lineno) + + for f in frames: + handle.push_frame(f.function_name, f.file_name, 0, f.lineno) handle.flush_sample() - def release(self, *args, **kwargs): + def release(self, *args: typing.Any, **kwargs: typing.Any) -> None: return self._release(self.__wrapped__.release, *args, **kwargs) acquire_lock = acquire - def __enter__(self, *args, **kwargs): + def __enter__(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Any: return self._acquire(self.__wrapped__.__enter__, *args, **kwargs) - def __exit__(self, *args, **kwargs): + def __exit__(self, *args: typing.Any, **kwargs: typing.Any) -> None: self._release(self.__wrapped__.__exit__, *args, **kwargs) - def _find_self_name(self, var_dict: typing.Dict): + def _find_self_name(self, var_dict: typing.Dict) -> typing.Optional[str]: for name, value in var_dict.items(): if name.startswith("__") or isinstance(value, types.ModuleType): continue @@ -188,7 +193,7 @@ def _find_self_name(self, var_dict: typing.Dict): # Get lock acquire/release call location and variable name the lock is assigned to # This function propagates ValueError if the frame depth is <= 3. - def _maybe_update_self_name(self): + def _maybe_update_self_name(self) -> None: if self._self_name is not None: return # We expect the call stack to be like this: @@ -223,60 +228,66 @@ class FunctionWrapper(wrapt.FunctionWrapper): # Override the __get__ method: whatever happens, _allocate_lock is always considered by Python like a "static" # method, even when used as a class attribute. Python never tried to "bind" it to a method, because it sees it is a # builtin function. Override default wrapt behavior here that tries to detect bound method. - def __get__(self, instance, owner=None): + def __get__( + self, + instance: typing.Optional[typing.Any], + owner: typing.Optional[typing.Any] = None, + ) -> "FunctionWrapper": return self class LockCollector(collector.CaptureSamplerCollector): """Record lock usage.""" + PROFILED_LOCK_CLASS: typing.Type[typing.Any] + def __init__( self, - nframes=config.max_frames, - endpoint_collection_enabled=config.endpoint_collection, - tracer=None, - *args, - **kwargs, - ): + nframes: int = config.max_frames, + endpoint_collection_enabled: bool = config.endpoint_collection, + tracer: typing.Optional[Tracer] = None, + *args: typing.Any, + **kwargs: typing.Any, + ) -> None: super().__init__(*args, **kwargs) self.nframes = nframes self.endpoint_collection_enabled = endpoint_collection_enabled self.tracer = tracer - self._original = None + self._original: typing.Optional[typing.Any] = None @abc.abstractmethod - def _get_patch_target(self): - # type: (...) -> typing.Any - pass + def _get_patch_target(self) -> typing.Type[typing.Any]: + raise NotImplementedError @abc.abstractmethod def _set_patch_target( self, - value, # type: typing.Any - ): - # type: (...) -> None - pass + value: typing.Any, + ) -> None: + raise NotImplementedError - def _start_service(self): - # type: (...) -> None + def _start_service(self) -> None: """Start collecting lock usage.""" self.patch() super(LockCollector, self)._start_service() # type: ignore[safe-super] - def _stop_service(self): - # type: (...) -> None + def _stop_service(self) -> None: """Stop collecting lock usage.""" super(LockCollector, self)._stop_service() # type: ignore[safe-super] self.unpatch() - def patch(self): - # type: (...) -> None + def patch(self) -> None: """Patch the module for tracking lock allocation.""" # We only patch the lock from the `threading` module. # Nobody should use locks from `_thread`; if they do so, then it's deliberate and we don't profile. self._original = self._get_patch_target() - def _allocate_lock(wrapped, instance, args, kwargs): + def _allocate_lock( + wrapped: typing.Any, + instance: typing.Any, + args: typing.Tuple[typing.Any, ...], + kwargs: typing.Dict[str, typing.Any], + ) -> typing.Any: lock = wrapped(*args, **kwargs) return self.PROFILED_LOCK_CLASS( lock, @@ -288,7 +299,6 @@ def _allocate_lock(wrapped, instance, args, kwargs): self._set_patch_target(FunctionWrapper(self._original, _allocate_lock)) - def unpatch(self): - # type: (...) -> None + def unpatch(self) -> None: """Unpatch the threading module for tracking lock allocation.""" self._set_patch_target(self._original) diff --git a/ddtrace/profiling/collector/asyncio.py b/ddtrace/profiling/collector/asyncio.py index 67f828807aa..3c8e610da0f 100644 --- a/ddtrace/profiling/collector/asyncio.py +++ b/ddtrace/profiling/collector/asyncio.py @@ -1,4 +1,5 @@ -import typing # noqa:F401 +from asyncio.locks import Lock +import typing from .. import collector from . import _lock @@ -13,8 +14,7 @@ class AsyncioLockCollector(_lock.LockCollector): PROFILED_LOCK_CLASS = _ProfiledAsyncioLock - def _start_service(self): - # type: (...) -> None + def _start_service(self) -> None: """Start collecting lock usage.""" try: import asyncio @@ -23,12 +23,11 @@ def _start_service(self): self._asyncio_module = asyncio return super(AsyncioLockCollector, self)._start_service() - def _get_patch_target(self): - # type: (...) -> typing.Any + def _get_patch_target(self) -> typing.Type[Lock]: return self._asyncio_module.Lock def _set_patch_target( - self, value # type: typing.Any - ): - # type: (...) -> None + self, + value: typing.Any, + ) -> None: self._asyncio_module.Lock = value # type: ignore[misc] diff --git a/ddtrace/profiling/collector/memalloc.py b/ddtrace/profiling/collector/memalloc.py index 5317689cd65..7cef93806d7 100644 --- a/ddtrace/profiling/collector/memalloc.py +++ b/ddtrace/profiling/collector/memalloc.py @@ -1,10 +1,17 @@ # -*- encoding: utf-8 -*- -from collections import namedtuple import logging import os import threading -import typing # noqa:F401 +from types import TracebackType +from typing import List +from typing import NamedTuple from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import cast + +from ddtrace.profiling.event import DDFrame try: @@ -21,10 +28,16 @@ LOG = logging.getLogger(__name__) -MemorySample = namedtuple( - "MemorySample", - ("frames", "size", "count", "in_use_size", "alloc_size", "thread_id"), -) + +class MemorySample(NamedTuple): + frames: List[DDFrame] + size: int + count: ( # pyright: ignore[reportIncompatibleMethodOverride] (count is a method of tuple) + int # type: ignore[assignment] + ) + in_use_size: int + alloc_size: int + thread_id: int class MemoryCollector: @@ -35,13 +48,15 @@ def __init__( max_nframe: Optional[int] = None, heap_sample_size: Optional[int] = None, ignore_profiler: Optional[bool] = None, - ): - self.max_nframe: int = max_nframe if max_nframe is not None else config.max_frames - self.heap_sample_size: int = heap_sample_size if heap_sample_size is not None else config.heap.sample_size - self.ignore_profiler: bool = ignore_profiler if ignore_profiler is not None else config.ignore_profiler - - def start(self): - # type: (...) -> None + ) -> None: + self.max_nframe = cast(int, max_nframe if max_nframe is not None else config.max_frames) + self.heap_sample_size = cast( + int, + heap_sample_size if heap_sample_size is not None else config.heap.sample_size, # pyright: ignore + ) + self.ignore_profiler = cast(bool, ignore_profiler if ignore_profiler is not None else config.ignore_profiler) + + def start(self) -> None: """Start collecting memory profiles.""" if _memalloc is None: raise collector.CollectorUnavailable @@ -55,25 +70,28 @@ def start(self): _memalloc.stop() _memalloc.start(self.max_nframe, self.heap_sample_size) - def __enter__(self): + def __enter__(self) -> None: self.start() - def __exit__(self, exc_type, exc_value, traceback): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: self.stop() - def join(self): + def join(self) -> None: pass - def stop(self): - # type: () -> None + def stop(self) -> None: if _memalloc is not None: try: _memalloc.stop() except RuntimeError: LOG.debug("Failed to stop memalloc profiling on shutdown", exc_info=True) - def _get_thread_id_ignore_set(self): - # type: () -> typing.Set[int] + def _get_thread_id_ignore_set(self) -> Set[int]: # This method is not perfect and prone to race condition in theory, but very little in practice. # Anyhow it's not a big deal — it's a best effort feature. return { @@ -82,12 +100,14 @@ def _get_thread_id_ignore_set(self): if getattr(thread, "_ddtrace_profiling_ignore", False) and thread.ident is not None } - def snapshot(self): + def snapshot(self) -> Tuple[MemorySample, ...]: thread_id_ignore_set = self._get_thread_id_ignore_set() try: + if _memalloc is None: + raise ValueError("Memalloc is not initialized") events = _memalloc.heap() - except RuntimeError: + except (RuntimeError, ValueError): # DEV: This can happen if either _memalloc has not been started or has been stopped. LOG.debug("Unable to collect heap events from process %d", os.getpid(), exc_info=True) return tuple() @@ -116,19 +136,22 @@ def snapshot(self): # DEV: This might happen if the memalloc sofile is unlinked and relinked without module # re-initialization. LOG.debug("Invalid state detected in memalloc module, suppressing profile") + return tuple() - def test_snapshot(self): + def test_snapshot(self) -> Tuple[MemorySample, ...]: thread_id_ignore_set = self._get_thread_id_ignore_set() try: + if _memalloc is None: + raise ValueError("Memalloc is not initialized") events = _memalloc.heap() - except RuntimeError: + except (RuntimeError, ValueError): # DEV: This can happen if either _memalloc has not been started or has been stopped. LOG.debug("Unable to collect heap events from process %d", os.getpid(), exc_info=True) return tuple() - samples = [] + samples: List[MemorySample] = [] for event in events: (frames, thread_id), in_use_size, alloc_size, count = event @@ -139,5 +162,5 @@ def test_snapshot(self): return tuple(samples) - def collect(self): + def collect(self) -> Tuple[MemorySample, ...]: return tuple() diff --git a/ddtrace/profiling/collector/stack.pyi b/ddtrace/profiling/collector/stack.pyi index 237744eb9d4..f99d134d52c 100644 --- a/ddtrace/profiling/collector/stack.pyi +++ b/ddtrace/profiling/collector/stack.pyi @@ -1,7 +1,7 @@ import typing -import ddtrace +from ddtrace.trace import Tracer from ddtrace.profiling import collector class StackCollector(collector.PeriodicCollector): - tracer: typing.Optional[ddtrace.trace.Tracer] + tracer: typing.Optional[Tracer] diff --git a/ddtrace/profiling/collector/threading.py b/ddtrace/profiling/collector/threading.py index ab6bf66e6f3..c49ccc5dd1a 100644 --- a/ddtrace/profiling/collector/threading.py +++ b/ddtrace/profiling/collector/threading.py @@ -1,7 +1,7 @@ from __future__ import absolute_import import threading -import typing # noqa:F401 +import typing from ddtrace.internal._unpatched import _threading as ddtrace_threading from ddtrace.internal.datadog.profiling import stack_v2 @@ -19,35 +19,33 @@ class ThreadingLockCollector(_lock.LockCollector): PROFILED_LOCK_CLASS = _ProfiledThreadingLock - def _get_patch_target(self): - # type: (...) -> typing.Any + def _get_patch_target(self) -> typing.Type[threading.Lock]: return threading.Lock def _set_patch_target( self, - value, # type: typing.Any - ): - # type: (...) -> None + value: typing.Any, + ) -> None: threading.Lock = value # Also patch threading.Thread so echion can track thread lifetimes -def init_stack_v2(): +def init_stack_v2() -> None: if config.stack.v2_enabled and stack_v2.is_available: - _thread_set_native_id = ddtrace_threading.Thread._set_native_id - _thread_bootstrap_inner = ddtrace_threading.Thread._bootstrap_inner + _thread_set_native_id = ddtrace_threading.Thread._set_native_id # type: ignore[attr-defined] + _thread_bootstrap_inner = ddtrace_threading.Thread._bootstrap_inner # type: ignore[attr-defined] - def thread_set_native_id(self, *args, **kswargs): - _thread_set_native_id(self, *args, **kswargs) + def thread_set_native_id(self, *args, **kwargs): + _thread_set_native_id(self, *args, **kwargs) stack_v2.register_thread(self.ident, self.native_id, self.name) def thread_bootstrap_inner(self, *args, **kwargs): _thread_bootstrap_inner(self, *args, **kwargs) stack_v2.unregister_thread(self.ident) - ddtrace_threading.Thread._set_native_id = thread_set_native_id - ddtrace_threading.Thread._bootstrap_inner = thread_bootstrap_inner + ddtrace_threading.Thread._set_native_id = thread_set_native_id # type: ignore[attr-defined] + ddtrace_threading.Thread._bootstrap_inner = thread_bootstrap_inner # type: ignore[attr-defined] # Instrument any living threads - for thread_id, thread in ddtrace_threading._active.items(): + for thread_id, thread in ddtrace_threading._active.items(): # type: ignore[attr-defined] stack_v2.register_thread(thread_id, thread.native_id, thread.name) diff --git a/ddtrace/profiling/profiler.py b/ddtrace/profiling/profiler.py index 48f953e7856..57c72b52697 100644 --- a/ddtrace/profiling/profiler.py +++ b/ddtrace/profiling/profiler.py @@ -56,6 +56,12 @@ def start(self, stop_on_exit=True, profile_children=True): except uwsgi.uWSGIMasterProcess: # Do nothing, the start() method will be called in each worker subprocess return + except uwsgi.uWSGIConfigDeprecationWarning: + LOG.warning("uWSGI configuration deprecation warning", exc_info=True) + # Turn off profiling in this case, this is mostly for + # uwsgi<2.0.30 when --skip-atexit is not set with --lazy-apps + # or --lazy. See uwsgi.check_uwsgi() for details. + return self._profiler.start() diff --git a/ddtrace/settings/_config.py b/ddtrace/settings/_config.py index ef111e3a355..abd789ed992 100644 --- a/ddtrace/settings/_config.py +++ b/ddtrace/settings/_config.py @@ -100,6 +100,7 @@ "flask", "google_generativeai", "google_genai", + "google_adk", "urllib3", "subprocess", "kafka", @@ -661,6 +662,14 @@ def __init__(self): self._inferred_proxy_services_enabled = _get_config("DD_TRACE_INFERRED_PROXY_SERVICES_ENABLED", False, asbool) self._trace_safe_instrumentation_enabled = _get_config("DD_TRACE_SAFE_INSTRUMENTATION_ENABLED", False, asbool) + # Resource renaming + self._trace_resource_renaming_enabled = _get_config( + "DD_TRACE_RESOURCE_RENAMING_ENABLED", default=False, modifier=asbool + ) + self._trace_resource_renaming_always_simplified_endpoint = _get_config( + "DD_TRACE_RESOURCE_RENAMING_ALWAYS_SIMPLIFIED_ENDPOINT", default=False, modifier=asbool + ) + def __getattr__(self, name) -> Any: if name in self._config: return self._config[name].value() diff --git a/ddtrace/settings/asm.py b/ddtrace/settings/asm.py index f7133dfb2b6..49ae7e6dd9e 100644 --- a/ddtrace/settings/asm.py +++ b/ddtrace/settings/asm.py @@ -309,9 +309,7 @@ def reset(self): def _eval_asm_can_be_enabled(self) -> None: self._asm_can_be_enabled = APPSEC_ENV not in os.environ and tracer_config._remote_config_enabled - self._load_modules = bool( - self._iast_enabled or (self._ep_enabled and (self._asm_enabled or self._asm_can_be_enabled)) - ) + self._load_modules = bool(self._ep_enabled and (self._asm_enabled or self._asm_can_be_enabled)) self._asm_rc_enabled = (self._asm_enabled and tracer_config._remote_config_enabled) or self._asm_can_be_enabled @property diff --git a/ddtrace/settings/profiling.py b/ddtrace/settings/profiling.py index b1684b782d5..751932edce2 100644 --- a/ddtrace/settings/profiling.py +++ b/ddtrace/settings/profiling.py @@ -1,6 +1,7 @@ import itertools import math import os +import sys import typing as t from ddtrace.ext.git import COMMIT_SHA @@ -64,6 +65,9 @@ def _check_for_stack_v2_available(): def _parse_profiling_enabled(raw: str) -> bool: + if sys.version_info >= (3, 14): + return False + # Try to derive whether we're enabled via DD_INJECTION_ENABLED # - Are we injected (DD_INJECTION_ENABLED set) # - Is profiling enabled ("profiler" in the list) @@ -257,7 +261,8 @@ class ProfilingConfigStack(DDConfig): _v2_enabled = DDConfig.v( bool, "v2_enabled", - default=True, + # Not yet supported on 3.14 + default=sys.version_info < (3, 14), help_type="Boolean", help="Whether to enable the v2 stack profiler. Also enables the libdatadog collector.", ) @@ -370,12 +375,14 @@ class ProfilingConfigPytorch(DDConfig): # We need to check if ddup is available, and turn off profiling if it is not. if not ddup_is_available: - msg = ddup_failure_msg or "libdd not available" - logger.warning("Failed to load ddup module (%s), disabling profiling", msg) - telemetry_writer.add_log( - TELEMETRY_LOG_LEVEL.ERROR, - "Failed to load ddup module (%s), disabling profiling" % ddup_failure_msg, - ) + # We know it is not supported on 3.14, so don't report the error, but still disable + if sys.version_info < (3, 14): + msg = ddup_failure_msg or "libdd not available" + logger.warning("Failed to load ddup module (%s), disabling profiling", msg) + telemetry_writer.add_log( + TELEMETRY_LOG_LEVEL.ERROR, + "Failed to load ddup module (%s), disabling profiling" % ddup_failure_msg, + ) config.enabled = False # We also need to check if stack_v2 module is available, and turn if off diff --git a/ddtrace/sourcecode/_utils.py b/ddtrace/sourcecode/_utils.py index 8397327664c..f41a9f7cce2 100644 --- a/ddtrace/sourcecode/_utils.py +++ b/ddtrace/sourcecode/_utils.py @@ -1,5 +1,5 @@ +import os import re -import subprocess from urllib import parse @@ -56,15 +56,23 @@ def normalize_repository_url(url): def _query_git(args): + import subprocess # don't import subprocess (and maybe activate the integration) if not needed + ver = subprocess.Popen(["git"] + args, stdout=subprocess.PIPE).communicate()[0] return ver.strip().decode("utf-8") def get_commit_sha(): + commit_sha = os.environ.get("DD_GIT_COMMIT_SHA") + if commit_sha: + return commit_sha return _query_git(["rev-parse", "HEAD"]) def get_repository_url(): + repository_url = os.environ.get("DD_GIT_REPOSITORY_URL") + if repository_url: + return repository_url return _query_git(["config", "--get", "remote.origin.url"]) diff --git a/ddtrace/vendor/ply/yacc.py b/ddtrace/vendor/ply/yacc.py index 88188a1e8ea..c1a354982b4 100644 --- a/ddtrace/vendor/ply/yacc.py +++ b/ddtrace/vendor/ply/yacc.py @@ -3177,7 +3177,7 @@ def validate_pfunctions(self): for g in parsed_g: grammar.append((name, g)) except SyntaxError as e: - self.log.error(str(e)) + self.log.error(str(e), extra={"send_to_telemetry": False}) self.error = True # Looks like a valid grammar rule @@ -3351,7 +3351,7 @@ def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, star else: grammar.set_start(start) except GrammarError as e: - errorlog.error(str(e)) + errorlog.error(str(e), extra={"send_to_telemetry": False}) errors = True if errors: diff --git a/docker-compose.yml b/docker-compose.yml index 7455575317f..75130f0838a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -121,7 +121,7 @@ services: volumes: - ddagent:/tmp/ddagent:rw testagent: - image: ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.29.1 + image: ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.34.0 ports: - "127.0.0.1:9126:8126" volumes: diff --git a/docker/Dockerfile b/docker/Dockerfile index 5858a9c1b60..85b44f9cff9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -34,6 +34,7 @@ RUN apt-get update \ libbz2-dev \ libenchant-2-dev \ libffi-dev \ + libjemalloc2 \ liblzma-dev \ libmariadb-dev \ libmariadb-dev-compat \ diff --git a/docs/configuration.rst b/docs/configuration.rst index 0f8701a81ad..0fef0184826 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -461,8 +461,8 @@ Metrics version_added: v3.11.0: -AppSec ------- +Application & API Security +-------------------------- .. ddtrace-configuration-options:: @@ -528,6 +528,29 @@ AppSec default: True description: Whether to enable stack traces in reports for ASM. Currently used for exploit prevention reports. + DD_APPSEC_WAF_TIMEOUT: + type: Float + default: 5.0 (unit:milliseconds) + description: | + Each time the WAF is run to analyze a possible threat, this timeout duration is used to limit the WAF analysis. + You can increase this value if you're expecting large request payloads to be analyzed. + Please note that the WAF can be queried multiple times in a single trace. + + DD_API_SECURITY_MAX_DOWNSTREAM_REQUEST_BODY_ANALYSIS: + type: Integer + default: 1 + description: Maximum number of downstream requests per request whose (request and response) bodies will be analyzed by the WAF + + DD_API_SECURITY_DOWNSTREAM_REQUEST_BODY_ANALYSIS_SAMPLE_RATE: + type: Float + default: 0.5 (between 0. and 1.) + description: sampling rate for body analysis of downstream requests. Default value is 50%. + +Code Security +------------- + +.. ddtrace-configuration-options:: + DD_IAST_ENABLED: type: Boolean default: False diff --git a/docs/contributing.rst b/docs/contributing.rst index 7cacf8b007e..143b6e082ae 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -38,6 +38,23 @@ Correctness and code style are automatically checked in continuous integration, various tools including Flake8, Black, and MyPy. This means that code reviews don't need to worry about style and can focus on substance. +Pull Request Requirements +~~~~~~~~~~~~~~~~~~~~~~~~~ + +When submitting a pull request, ensure the following requirements are met: + +* **PR title follows conventional commit standard** - See the `Branches and Pull Requests`_ section for details. +* **All changes are related to the pull request's stated goal** - Keep changes focused and avoid scope creep. +* **The change includes tests OR the PR description describes a testing strategy** - All code changes should be tested appropriately. +* **The change includes or references documentation updates if necessary** - Update user-facing documentation when adding new features or changing behavior. +* **Backport labels are set if applicable** - Apply appropriate backport labels for fixes and CI changes as described in the `Backporting`_ section. +* **Avoids breaking API changes** - Follow the :doc:`versioning policy ` to maintain backward compatibility. +* **The PR description includes an overview of the change** - Clearly describe what the change does and why it's needed. +* **The PR description articulates the motivation for the change** - Explain the problem being solved or the improvement being made. +* **The PR description notes risks associated with the change, if any** - Document any potential impacts or considerations. +* **Newly-added code is easy to change** - Write maintainable code that follows established patterns. +* **Testing strategy adequately addresses listed risks** - Ensure tests cover the scenarios and edge cases relevant to your change. + Branches and Pull Requests -------------------------- @@ -54,6 +71,7 @@ in pull request names are enumerated :ref:`in the release notes documentation`. If your pull request doesn't change the public API, apply the ``no-changelog`` label. +Release notes should be written from the user's perspective and clearly explain the impact or benefit of the change. Once approved, pull requests should be merged with the "Squash and Merge" option. At this time, do not use the merge queue option. diff --git a/docs/index.rst b/docs/index.rst index ef35f9f8cd8..ac54303fc31 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,199 +37,203 @@ contacting support. .. |AUTO| replace:: Automatically Instrumented -+--------------------------------------------------+---------------+----------------+ -| Integration | |SUPPVER| | |AUTO| [1]_ | -+==================================================+===============+================+ -| :ref:`aiobotocore` | >= 1.0.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`aiohttp` (client) | >= 3.7 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`aiohttp` (server) | >= 3.7 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`aiohttp_jinja2` | >= 1.5.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`aiomysql` | >= 0.1.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`aiopg` | >= 0.16.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`algoliasearch` | >= 2.5.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`anthropic` | >= 0.28.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`aredis` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`asgi` | >= 3.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`asyncio` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`asyncpg` | >= 0.22.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`avro` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`azure_functions` | >= 1.10.1 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`botocore` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`bottle` | >= 0.12 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`cassandra` | >= 3.24 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`celery` | >= 4.4 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`cherrypy` | >= 17.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`confluent-kafka ` | >= 1.9.2 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`consul` | >= 1.1 | Yes [2]_ | -+--------------------------------------------------+---------------+----------------+ -| :ref:`coverage` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`crewai` | >= 0.102 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`datadog_lambda` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`django` | >= 2.2 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`djangorestframework ` | >= 3.11 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`dogpile.cache` | >= 0.6 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`dramatiq` | >= 1.10.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`elasticsearch` | >= 1.1.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`falcon` | >= 3.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`fastapi` | >= 0.64 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`flask` | >= 1.1 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`flask_cache` | >= 0.13 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`freezegun` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`futures` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`gevent` (greenlet>=1.0) | >= 20.12 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`google_generativeai` | >= 0.7.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`grpc` | >= 1.34 | Yes [4]_ | -+--------------------------------------------------+---------------+----------------+ -| :ref:`graphene ` | >= 3.0.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`graphql-core ` | >= 3.1 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`gunicorn ` | >= 20.0.04 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`httplib` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`httpx` | >= 0.17 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`jinja2` | >= 2.10 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`kombu` | >= 4.6 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`langchain` | >= 0.1 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`langgraph` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`litellm` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`logbook` | >= 1.0.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`logging` | \* | Yes [5]_ | -+--------------------------------------------------+---------------+----------------+ -| :ref:`loguru` | >= 0.4.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`mako` | >= 1.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`mcp` | >= 1.10.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`mariadb` | >= 1.0.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`molten` | >= 1.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`mongoengine` | >= 0.23 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`mysql-connector` | >= 8.0.5 | True | -+--------------------------------------------------+---------------+----------------+ -| :ref:`mysqldb` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`openai` | >= 1.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`openai-agents` | >= 0.0.2 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`opensearch-py ` | >= 1.1 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`protobuf` | \* | Yes [6]_ | -+--------------------------------------------------+---------------+----------------+ -| :ref:`psycopg` | >= 2.8 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pylibmc` | >= 1.6.2 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pymemcache` | >= 3.4 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pymongo` | >= 3.8 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pymysql` | >= 0.10 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pynamodb` | >= 5.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pyodbc` | >= 4.0.31 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pyramid` | >= 1.10 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pytest` | >= 6.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pytest_bdd` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`pytest_benchmark` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`redis` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`rediscluster` | >= 2.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`requests` | >= 2.20 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`rq` | >= 1.8 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`sanic` | >= 20.12.0 | Yes [3]_ | -+--------------------------------------------------+---------------+----------------+ -| :ref:`selenium` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`snowflake` | >= 2.3.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`sqlalchemy` | >= 1.3 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`sqlite` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`starlette` | >= 0.14.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`structlog` | >= 20.2.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`subprocess` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`tornado` | >= 6.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`unittest` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`urllib` | \* | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`urllib3` | >= 1.25.0 | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`valkey` | >= 6.0.0 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`vertexai` | >= 1.71.1 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`vertica` | >= 0.6 | Yes | -+--------------------------------------------------+---------------+----------------+ -| :ref:`webbrowser` | \* | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`wsgi` | \* | No | -+--------------------------------------------------+---------------+----------------+ -| :ref:`yaaredis` | >= 2.0.0 | Yes | -+--------------------------------------------------+---------------+----------------+ ++--------------------------------------------------+------------+----------+------+ +| Integration | | | [1]_ | ++==================================================+============+==========+======+ +| :ref:`aiobotocore` | >= 1.0.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`aiohttp` (client) | >= 3.7 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`aiohttp` (server) | >= 3.7 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`aiohttp_jinja2` | >= 1.5.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`aiomysql` | >= 0.1.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`aiopg` | >= 0.16.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`algoliasearch` | >= 2.5.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`anthropic` | >= 0.28.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`aredis` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`asgi` | >= 3.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`asyncio` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`asyncpg` | >= 0.22.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`avro` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`azure_functions` | >= 1.10.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`botocore` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`bottle` | >= 0.12 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`cassandra` | >= 3.24 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`celery` | >= 4.4 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`cherrypy` | >= 17.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`confluent-kafka ` | >= 1.9.2 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`consul` | >= 1.1 | Yes [2]_ | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`coverage` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`crewai` | >= 0.102 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`datadog_lambda` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`django` | >= 2.2 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`djangorestframework ` | >= 3.11 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`dogpile.cache` | >= 0.6 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`dramatiq` | >= 1.10.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`elasticsearch` | >= 1.1.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`falcon` | >= 3.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`fastapi` | >= 0.64 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`flask` | >= 1.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`flask_cache` | >= 0.13 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`freezegun` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`futures` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`gevent` (greenlet>=1.0) | >= 20.12 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`google_adk` | >= 1.0.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`google_genai` | >= 1.21.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`google_generativeai` | >= 0.7.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`grpc` | >= 1.34 | Yes [4]_ | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`graphene ` | >= 3.0.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`graphql-core ` | >= 3.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`gunicorn ` | >= 20.0.04 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`httplib` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`httpx` | >= 0.17 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`jinja2` | >= 2.10 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`kombu` | >= 4.6 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`langchain` | >= 0.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`langgraph` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`litellm` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`logbook` | >= 1.0.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`logging` | \* | Yes [5]_ | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`loguru` | >= 0.4.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`mako` | >= 1.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`mcp` | >= 1.10.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`mariadb` | >= 1.0.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`molten` | >= 1.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`mongoengine` | >= 0.23 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`mysql-connector` | >= 8.0.5 | True | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`mysqldb` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`openai` | >= 1.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`openai-agents` | >= 0.0.2 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`opensearch-py ` | >= 1.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`protobuf` | \* | Yes [6]_ | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`psycopg` | >= 2.8 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pylibmc` | >= 1.6.2 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pymemcache` | >= 3.4 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pymongo` | >= 3.8 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pymysql` | >= 0.10 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pynamodb` | >= 5.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pyodbc` | >= 4.0.31 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pyramid` | >= 1.10 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pytest` | >= 6.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pytest_bdd` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`pytest_benchmark` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`redis` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`rediscluster` | >= 2.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`requests` | >= 2.20 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`rq` | >= 1.8 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`sanic` | >= 20.12.0 | Yes [3]_ | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`selenium` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`snowflake` | >= 2.3.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`sqlalchemy` | >= 1.3 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`sqlite` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`starlette` | >= 0.14.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`structlog` | >= 20.2.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`subprocess` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`tornado` | >= 6.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`unittest` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`urllib` | \* | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`urllib3` | >= 1.25.0 | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`valkey` | >= 6.0.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`vertexai` | >= 1.71.1 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`vertica` | >= 0.6 | Yes | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`webbrowser` | \* | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`wsgi` | \* | No | | ++--------------------------------------------------+------------+----------+------+ +| :ref:`yaaredis` | >= 2.0.0 | Yes | | ++--------------------------------------------------+------------+----------+------+ .. [1] Libraries that are automatically instrumented when the diff --git a/docs/integrations.rst b/docs/integrations.rst index 6ec454907dc..28696bb363d 100644 --- a/docs/integrations.rst +++ b/docs/integrations.rst @@ -237,6 +237,13 @@ gevent .. automodule:: ddtrace.contrib.internal.gevent +.. _google_adk: + +google-adk +^^^^^^^^^^^^ +.. automodule:: ddtrace.contrib.internal.google_adk + + .. _google_genai: google-genai diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 09265483f3d..19d3e99b2e8 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -1,5 +1,6 @@ AArch +adk agentless aiobotocore aiohttp @@ -122,6 +123,7 @@ genai generativeai gevent Gitlab +google-adk GPU graphene graphql diff --git a/experimental_debug_string_dump.json b/experimental_debug_string_dump.json new file mode 100644 index 00000000000..c34a2ffd05f --- /dev/null +++ b/experimental_debug_string_dump.json @@ -0,0 +1,99 @@ +{ + "ucontext": "ucontext_t { uc_flags: 7, uc_link: 0x0, uc_stack: stack_t { ss_sp: 0x743deadc0000, ss_flags: 0, ss_size: 65536 }, uc_mcontext: mcontext_t { gregs: [0, 127809331044922, 0, 127809325475264, 140726899625856, 140726899626064, 1, 0, 0, -1, 0, 4294967295, 127809325530248, 0, 140726899625160, 140726899625368, 127809330854360, 66179, 12103423998558259, 4, 14, 0, 0], fpregs: 0x743deadcf540, __private: [0, 0, 0, 0, 0, 0, 0, 0] }, uc_sigmask: sigset_t { __val: [0, 11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }, __private: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 31, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99, 0, 116, 0, 111, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 0, 114, 0, 0, 0, 0, 0, 99, 0, 111, 0, 0, 0, 0, 0, 105, 0, 101, 0, 111, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 117, 0, 0, 0, 99, 0, 0, 0, 104, 0, 0, 0, 32, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 111, 0, 0, 0, 114, 0, 0, 0, 32, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 128, 199, 48, 49, 93, 136, 85, 59, 0, 0, 0, 0, 0, 0, 0, 0, 100, 171, 99, 130, 7, 91, 229, 191, 0, 0, 0, 0, 0, 0, 0, 0, 27, 99, 108, 213, 49, 161, 233, 63, 0, 0, 0, 0, 0, 0, 0, 0, 233, 69, 72, 155, 91, 73, 242, 191, 0, 0, 0, 0, 0, 0, 0, 0, 52, 121, 227, 150, 79, 248, 140, 67, 143, 13, 128, 21, 35, 58, 40, 13, 211, 19, 203, 193, 101, 194, 7, 72, 227, 201, 177, 122, 54, 74, 46, 67, 23, 38, 50, 64, 173, 107, 173, 144, 4, 8, 252, 226, 102, 193, 248, 241, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, 88, 80, 70, 140, 10, 0, 0, 255, 2, 0, 0, 0, 0, 0, 0, 136, 10, 0, 0, 0, 0, 0, 0] }", + "runtime_stack": { + "format": "Datadog Runtime Callback 1.0", + "frames": [ + { + "function": "string_at", + "file": "/home/bits/.pyenv/versions/3.11.13/lib/python3.11/ctypes/__init__.py", + "line": 519 + }, + { + "function": "func16", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 724 + }, + { + "function": "func15", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 721 + }, + { + "function": "func14", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 718 + }, + { + "function": "func13", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 715 + }, + { + "function": "func12", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 712 + }, + { + "function": "func11", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 709 + }, + { + "function": "func10", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 706 + }, + { + "function": "func9", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 703 + }, + { + "function": "func8", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 700 + }, + { + "function": "func7", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 697 + }, + { + "function": "func6", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 694 + }, + { + "function": "func5", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 691 + }, + { + "function": "func4", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 688 + }, + { + "function": "func3", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 685 + }, + { + "function": "func2", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 682 + }, + { + "function": "func1", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 679 + }, + { + "function": "", + "file": "tests/internal/crashtracker/test_crashtracker.py", + "line": 734 + } + ], + "runtime_type": "python" + } +} diff --git a/hatch.toml b/hatch.toml index 5908ea3c476..62d1575c6c2 100644 --- a/hatch.toml +++ b/hatch.toml @@ -56,6 +56,7 @@ checks = [ "security", "test", "suitespec-check", + "error-log-check", "sg", "sg-test", ] @@ -77,6 +78,9 @@ riot = [ suitespec-check = [ "python scripts/check_suitespec_coverage.py" ] +error-log-check = [ + "python scripts/check_constant_log_message.py" +] sg = [ "ast-grep scan {args:.}", ] diff --git a/lib-injection/sources/denied_executable_modules.txt b/lib-injection/sources/denied_executable_modules.txt new file mode 100644 index 00000000000..fce2382ab4e --- /dev/null +++ b/lib-injection/sources/denied_executable_modules.txt @@ -0,0 +1,8 @@ +# Python modules run from the interpreter that should be denied +# These are module names (without -m prefix) that will be checked +# when Python interpreters are executed with the -m flag +py_compile + +# Additional modules can be added here in the future +# For example: +# some_other_problematic_module \ No newline at end of file diff --git a/lib-injection/sources/denied_executables.txt b/lib-injection/sources/denied_executables.txt index 67aef135e03..aca2cd8dbc1 100644 --- a/lib-injection/sources/denied_executables.txt +++ b/lib-injection/sources/denied_executables.txt @@ -1204,4 +1204,4 @@ usr/libexec/grepconf.sh # Python tools uwsgi # crashtracker receiver -_dd_crashtracker_receiver +_dd_crashtracker_receiver \ No newline at end of file diff --git a/lib-injection/sources/sitecustomize.py b/lib-injection/sources/sitecustomize.py index 27b43afd802..862224d58fd 100644 --- a/lib-injection/sources/sitecustomize.py +++ b/lib-injection/sources/sitecustomize.py @@ -60,11 +60,13 @@ def parse_version(version): RESULT_REASON = "unknown" RESULT_CLASS = "unknown" EXECUTABLES_DENY_LIST = set() +EXECUTABLE_MODULES_DENY_LIST = set() REQUIREMENTS_FILE_LOCATIONS = ( os.path.abspath(os.path.join(SCRIPT_DIR, "../datadog-lib/requirements.csv")), os.path.abspath(os.path.join(SCRIPT_DIR, "requirements.csv")), ) EXECUTABLE_DENY_LOCATION = os.path.abspath(os.path.join(SCRIPT_DIR, "denied_executables.txt")) +EXECUTABLE_MODULES_DENY_LOCATION = os.path.abspath(os.path.join(SCRIPT_DIR, "denied_executable_modules.txt")) SITE_PKGS_MARKER = "site-packages-ddtrace-py" BOOTSTRAP_MARKER = "bootstrap" @@ -147,6 +149,24 @@ def build_denied_executables(): return denied_executables +def build_denied_executable_modules(): + denied_modules = set() + _log("Checking denied-executable-modules list", level="debug") + try: + if os.path.exists(EXECUTABLE_MODULES_DENY_LOCATION): + with open(EXECUTABLE_MODULES_DENY_LOCATION, "r") as denyfile: + _log("Found modules deny-list file", level="debug") + for line in denyfile.readlines(): + cleaned = line.strip("\n").strip() + # Skip empty lines and comments + if cleaned and not cleaned.startswith("#"): + denied_modules.add(cleaned) + _log("Built denied-executable-modules list of %s entries" % (len(denied_modules),), level="debug") + except Exception as e: + _log("Failed to build denied-executable-modules list: %s" % e, level="debug") + return denied_modules + + def create_count_metric(metric, tags=None): if tags is None: tags = [] @@ -262,12 +282,30 @@ def get_first_incompatible_sysarg(): _log("Checking sys.args: len(sys.argv): %s" % (len(sys.argv),), level="debug") if len(sys.argv) <= 1: return + + # Check the main executable first argument = sys.argv[0] _log("Is argument %s in deny-list?" % (argument,), level="debug") if argument in EXECUTABLES_DENY_LIST or os.path.basename(argument) in EXECUTABLES_DENY_LIST: _log("argument %s is in deny-list" % (argument,), level="debug") return argument + # Check for "-m module" patterns, but only for Python interpreters + if len(sys.argv) >= 3: + executable_basename = os.path.basename(argument) + if executable_basename.startswith("python"): + try: + m_index = sys.argv.index("-m") + if m_index + 1 < len(sys.argv): + module_name = sys.argv[m_index + 1] + if module_name in EXECUTABLE_MODULES_DENY_LIST: + _log("Module %s is in deny-list" % (module_name,), level="debug") + return "-m %s" % module_name + except ValueError: + # "-m" not found in sys.argv, continue normally + pass + return None + def _inject(): global DDTRACE_VERSION @@ -276,6 +314,7 @@ def _inject(): global PYTHON_RUNTIME global DDTRACE_REQUIREMENTS global EXECUTABLES_DENY_LIST + global EXECUTABLE_MODULES_DENY_LIST global TELEMETRY_DATA global RESULT global RESULT_REASON @@ -287,6 +326,7 @@ def _inject(): INSTALLED_PACKAGES = build_installed_pkgs() DDTRACE_REQUIREMENTS = build_requirements(PYTHON_VERSION) EXECUTABLES_DENY_LIST = build_denied_executables() + EXECUTABLE_MODULES_DENY_LIST = build_denied_executable_modules() dependency_incomp = False runtime_incomp = False spec = None diff --git a/releasenotes/notes/add-apm-and-llmobs-support-for-google-adk-8a6bb3842b464da4.yaml b/releasenotes/notes/add-apm-and-llmobs-support-for-google-adk-8a6bb3842b464da4.yaml new file mode 100644 index 00000000000..5b10d4ee6ee --- /dev/null +++ b/releasenotes/notes/add-apm-and-llmobs-support-for-google-adk-8a6bb3842b464da4.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + google-adk: Adds APM tracing and LLM Observability support for the Google ADK library (google-adk). Support includes APM tracing and LLM Observability for agent runs, tool calls, and code execution. diff --git a/releasenotes/notes/add-django-tracing-minimal-d995af59e59e748e.yaml b/releasenotes/notes/add-django-tracing-minimal-d995af59e59e748e.yaml new file mode 100644 index 00000000000..b90b8e1c900 --- /dev/null +++ b/releasenotes/notes/add-django-tracing-minimal-d995af59e59e748e.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + django: This introduces the ``DD_DJANGO_TRACING_MINIMAL`` environment variable for performance-sensitive applications. + When enabled, this disables Django ORM, cache, and template instrumentation while keeping middleware instrumentation enabled. + This significantly reduces overhead by removing Django-specific spans while preserving visibility into the underlying + database drivers, cache clients, and other integrations. For example, with this enabled, Django ORM query spans are + disabled but database driver spans (e.g., psycopg, MySQLdb) will still be created. To enable minimal tracing, + set ``DD_DJANGO_TRACING_MINIMAL=true``. \ No newline at end of file diff --git a/releasenotes/notes/add-partition-tag-b423877806ab7271.yaml b/releasenotes/notes/add-partition-tag-b423877806ab7271.yaml new file mode 100644 index 00000000000..274273cc659 --- /dev/null +++ b/releasenotes/notes/add-partition-tag-b423877806ab7271.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + AWS: adds ``aws.partition`` tag onto AWS traces based on the region for the boto, botocore, and aiobotocore integrations. + diff --git a/releasenotes/notes/asyncio_fix-d279a20c05a2bf24.yaml b/releasenotes/notes/asyncio_fix-d279a20c05a2bf24.yaml new file mode 100644 index 00000000000..7c970ed258e --- /dev/null +++ b/releasenotes/notes/asyncio_fix-d279a20c05a2bf24.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + AAP: This fix resolves an issue where stream endpoints with daphne/django where unresponsive due to an asyncio error. diff --git a/releasenotes/notes/chore-debugger-agent-check-uploader-5d644d20cf9b4af5.yaml b/releasenotes/notes/chore-debugger-agent-check-uploader-5d644d20cf9b4af5.yaml new file mode 100644 index 00000000000..81dfbd07623 --- /dev/null +++ b/releasenotes/notes/chore-debugger-agent-check-uploader-5d644d20cf9b4af5.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + dynamic instrumentation/exception replay/code origin for spans: added + support for the latest Datadog agent intake for snapshots. This requires a + minimum agent version of 7.49.0. diff --git a/releasenotes/notes/ci_visibility_fix_itr_count-7f4c5da42df18aea.yaml b/releasenotes/notes/ci_visibility_fix_itr_count-7f4c5da42df18aea.yaml new file mode 100644 index 00000000000..547caa23fb7 --- /dev/null +++ b/releasenotes/notes/ci_visibility_fix_itr_count-7f4c5da42df18aea.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + CI Visibility: This fix solves an issue where the ITR skip count metric was aggregating skipped tests even when skipping level was set to suite. It will now count appropriately (skipped suites or skipped tests) depending on ITR skip level. diff --git a/releasenotes/notes/feat-azure-servicebus-batch-distributed-tracing-83d382edd4927104.yaml b/releasenotes/notes/feat-azure-servicebus-batch-distributed-tracing-83d382edd4927104.yaml new file mode 100644 index 00000000000..1aa1ae56e23 --- /dev/null +++ b/releasenotes/notes/feat-azure-servicebus-batch-distributed-tracing-83d382edd4927104.yaml @@ -0,0 +1,5 @@ +features: + - | + azure_servicebus: Add distributed tracing support for sending batches with Azure Service Bus producers. + - | + azure_functions: Use span links to connect Service Bus trigger consumers to the producers that send the messages. diff --git a/releasenotes/notes/feat-resource-renaming-e5911d3975c220e3.yaml b/releasenotes/notes/feat-resource-renaming-e5911d3975c220e3.yaml new file mode 100644 index 00000000000..6763c553f3e --- /dev/null +++ b/releasenotes/notes/feat-resource-renaming-e5911d3975c220e3.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + tracing: Added support for resource renaming, an experimental feature that lets the Datadog platform adjust the resource field on web request spans when the endpoint cannot be correctly deduced. Enable the feature by setting ``DD_TRACE_RESOURCE_RENAMING_ENABLED="true"`` diff --git a/releasenotes/notes/fix-di-celery-exception-capturing-93e54eb0214ece66.yaml b/releasenotes/notes/fix-di-celery-exception-capturing-93e54eb0214ece66.yaml new file mode 100644 index 00000000000..5b3ce95c55d --- /dev/null +++ b/releasenotes/notes/fix-di-celery-exception-capturing-93e54eb0214ece66.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + exception replay: prevent Celery from crashing when a task raises a custom + exception with mandatory arguments. diff --git a/releasenotes/notes/fix-di-copy-pending-probes-be21850231275165.yaml b/releasenotes/notes/fix-di-copy-pending-probes-be21850231275165.yaml new file mode 100644 index 00000000000..8d2ee95d202 --- /dev/null +++ b/releasenotes/notes/fix-di-copy-pending-probes-be21850231275165.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + dynamic instrumentation: fix an issue that prevented multiple probes on the + same location from being instrumented. diff --git a/releasenotes/notes/fix-span-encoding-bytes-5c285cf9e55db509.yaml b/releasenotes/notes/fix-span-encoding-bytes-5c285cf9e55db509.yaml new file mode 100644 index 00000000000..e8df8b088ce --- /dev/null +++ b/releasenotes/notes/fix-span-encoding-bytes-5c285cf9e55db509.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + tracing: Fixes encoding bytes objects as span attributes by truncating byte string, rather than throwing PyErr_Format. diff --git a/releasenotes/notes/flask_endpoint_discovery_fix-7f98200c2fa342c4.yaml b/releasenotes/notes/flask_endpoint_discovery_fix-7f98200c2fa342c4.yaml new file mode 100644 index 00000000000..304d3fe4a45 --- /dev/null +++ b/releasenotes/notes/flask_endpoint_discovery_fix-7f98200c2fa342c4.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + AAP: This fix resolves an issue where the endpoint discovery feature could generate a crash for flask at startup. diff --git a/releasenotes/notes/iast-feat-untrusted-serialization-6e702b92672c9442.yaml b/releasenotes/notes/iast-feat-untrusted-serialization-6e702b92672c9442.yaml new file mode 100644 index 00000000000..e4622ecfb39 --- /dev/null +++ b/releasenotes/notes/iast-feat-untrusted-serialization-6e702b92672c9442.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Code Security (IAST): Untrusted Serialization detection, which will be displayed on your DataDog Vulnerability + Explorer dashboard. + See the `Application Vulnerability Management `_ documentation for more information about this feature. diff --git a/releasenotes/notes/libinjection-denytlist-modules-a5e0407ed6b8166a.yaml b/releasenotes/notes/libinjection-denytlist-modules-a5e0407ed6b8166a.yaml new file mode 100644 index 00000000000..ad5965ca1fd --- /dev/null +++ b/releasenotes/notes/libinjection-denytlist-modules-a5e0407ed6b8166a.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + libinjection: allow python module executed with ``-m`` entries in the denylist. diff --git a/releasenotes/notes/llmobs-apm-tracing-8cd612f8a3af4960.yaml b/releasenotes/notes/llmobs-apm-tracing-8cd612f8a3af4960.yaml new file mode 100644 index 00000000000..bc3abb2f79b --- /dev/null +++ b/releasenotes/notes/llmobs-apm-tracing-8cd612f8a3af4960.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + LLM Observability: ensures APM is disabled when DD_APM_TRACING_ENABLED=0 when using LLM Observability. \ No newline at end of file diff --git a/releasenotes/notes/profiling-echion-upgrade-04aa2bb0bdf4882d.yaml b/releasenotes/notes/profiling-echion-upgrade-04aa2bb0bdf4882d.yaml new file mode 100644 index 00000000000..775c7ffcf0c --- /dev/null +++ b/releasenotes/notes/profiling-echion-upgrade-04aa2bb0bdf4882d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + profiling: Upgrades echion to resolve segmentation faults that can happen + on services with a lot of ``asyncio.Task``s. diff --git a/releasenotes/notes/profiling-uwsgi-atexit-978289df89d67ccb.yaml b/releasenotes/notes/profiling-uwsgi-atexit-978289df89d67ccb.yaml new file mode 100644 index 00000000000..93a91b4e1f2 --- /dev/null +++ b/releasenotes/notes/profiling-uwsgi-atexit-978289df89d67ccb.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + profiling: Profiling won't load if ``--skip-atexit`` is not set when + ``--lazy`` or ``--lazy-apps`` is set on uWSGI<2.0.30. This is to prevent + crashes from profiling native extension modules. See + https://github.com/unbit/uwsgi/pull/2726 for details. + diff --git a/releasenotes/notes/sourcecode-check-env-vars-cab3d9b6311c2695.yaml b/releasenotes/notes/sourcecode-check-env-vars-cab3d9b6311c2695.yaml new file mode 100644 index 00000000000..9f96e3dae8a --- /dev/null +++ b/releasenotes/notes/sourcecode-check-env-vars-cab3d9b6311c2695.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + source code integration: check that ``DD_GIT_COMMIT_SHA`` and ``DD_GIT_REPOSITORY_URL`` are defined before + using the git command. diff --git a/releasenotes/notes/update-prompt-annotation-0fa90edf6829fe1d.yaml b/releasenotes/notes/update-prompt-annotation-0fa90edf6829fe1d.yaml new file mode 100644 index 00000000000..16619b3fce6 --- /dev/null +++ b/releasenotes/notes/update-prompt-annotation-0fa90edf6829fe1d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + LLM Observability: Extends the prompt structure to add ``tags`` and ``chat_template``. + A new ``Prompt`` TypedDict class that would be used in annotation and annotation_context. \ No newline at end of file diff --git a/riotfile.py b/riotfile.py index c410d9dbc98..f1a5e05a189 100644 --- a/riotfile.py +++ b/riotfile.py @@ -154,8 +154,9 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="appsec_integrations_packages", pys=select_pys(), - command="python -m pytest -vvv -s -n 8 --no-cov --no-ddtrace tests/appsec/integrations/packages_tests/", + command="python -m pytest -v tests/appsec/integrations/packages_tests/", pkgs={ + "gevent": latest, "pytest-xdist": latest, "pytest-asyncio": latest, "requests": latest, @@ -218,7 +219,11 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT command="pytest -vvv {cmdargs} tests/appsec/integrations/django_tests/", pkgs={ "requests": latest, + "gunicorn": latest, + "gevent": latest, "pylibmc": latest, + "PyYAML": latest, + "dill": latest, "bcrypt": "==4.2.1", "pytest-django[testing]": "==3.10.0", }, @@ -319,8 +324,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="appsec_iast_default", - # TODO(avara1986): remove "-vvv --no-ddtrace --no-cov" when CI visibility errors were fixed in #14581 - command="pytest -vvv --no-ddtrace --no-cov {cmdargs} tests/appsec/iast/", + command="pytest -v {cmdargs} tests/appsec/iast/", pys=select_pys(), pkgs={ "requests": latest, @@ -515,7 +519,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="lib_injection", - command="pytest {cmdargs} tests/lib_injection/test_guardrails.py", + command="pytest {cmdargs} tests/lib_injection/", venvs=[ Venv( pys=select_pys(), @@ -2196,7 +2200,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, ), Venv( - pys=select_pys(min_version="3.8"), + pys=select_pys(min_version="3.8", max_version="3.13"), pkgs={ "pytest-asyncio": ["==0.23.7"], }, @@ -2221,11 +2225,17 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, venvs=[ Venv( - pys=select_pys(min_version="3.8"), + pys=select_pys(min_version="3.8", max_version="3.12"), pkgs={ "pytest-asyncio": ["==0.23.7"], }, ), + Venv( + pys=select_pys(min_version="3.13"), + pkgs={ + "pytest-asyncio": [">=1.0.0"], + }, + ), ], ), Venv( @@ -2624,11 +2634,23 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="asyncio", command="pytest {cmdargs} tests/contrib/asyncio", - pys=select_pys(), pkgs={ "pytest-randomly": latest, - "pytest-asyncio": "==0.21.1", }, + venvs=[ + Venv( + pys=select_pys(max_version="3.12"), + pkgs={ + "pytest-asyncio": "==0.21.1", + }, + ), + Venv( + pys=select_pys(min_version="3.13"), + pkgs={ + "pytest-asyncio": ">=1.0.0", + }, + ), + ], ), Venv( name="openai", @@ -2976,6 +2998,17 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "google-ai-generativelanguage": [latest], }, ), + Venv( + name="google_adk", + command="pytest {cmdargs} tests/contrib/google_adk", + pys=select_pys(min_version="3.9", max_version="3.13"), + pkgs={ + "pytest-asyncio": latest, + "google-adk": ["~=1.0.0", latest], + "vcrpy": latest, + "deprecated": latest, + }, + ), Venv( name="google_genai", command="pytest {cmdargs} tests/contrib/google_genai", @@ -3102,13 +3135,22 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "requests": latest, }, ), + Venv( + name="azure_functions:servicebus", + command="pytest {cmdargs} tests/contrib/azure_functions_servicebus", + pys=select_pys(min_version="3.8", max_version="3.11"), + pkgs={ + "azure.functions": ["~=1.10.1", latest], + "azure.servicebus": latest, + }, + ), Venv( name="azure_servicebus", command="pytest {cmdargs} tests/contrib/azure_servicebus", pys=select_pys(min_version="3.8", max_version="3.13"), pkgs={ "azure.servicebus": ["~=7.14.0", latest], - "pytest-asyncio": "==0.24.0", + "pytest-asyncio": "==0.23.7", }, ), Venv( @@ -3307,6 +3349,11 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "pytest-randomly": latest, }, venvs=[ + Venv( + command="python -m pytest {cmdargs} tests/profiling_v2/test_uwsgi.py", + pys=select_pys(), + pkgs={"uwsgi": "<2.0.30"}, + ), # Python 3.8 + 3.9 Venv( pys=["3.8", "3.9"], @@ -3568,7 +3615,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="appsec_threats_flask", - command="pytest tests/appsec/contrib_appsec/test_flask.py {cmdargs}", + command="pytest -vv tests/appsec/contrib_appsec/test_flask.py {cmdargs}", pkgs={ "pytest": latest, "pytest-cov": latest, diff --git a/scripts/check_constant_log_message.py b/scripts/check_constant_log_message.py new file mode 100644 index 00000000000..5f12a86bd00 --- /dev/null +++ b/scripts/check_constant_log_message.py @@ -0,0 +1,123 @@ +""" +Check that log.error() and add_error_log calls use constant string literals as first argument. +This script scans all Python files in ddtrace/ and reports violations. +Exceptions can be specified in the EXCEPTIONS set using: +- "filepath:line" to exclude a specific line in a file +""" + +import ast +import pathlib +import sys +from typing import List +from typing import Tuple + + +# Line-specific exceptions to exclude from checking +# Format: "filepath:line" to exclude a specific line in a file +EXCEPTIONS = { + # only constant message can be log.error() + "ddtrace/internal/telemetry/logging.py:18", + # log.exception calls use constant messages + "ddtrace/contrib/internal/aws_lambda/patch.py:36", + # log.error in _probe/registry.py ends up with a log.debug() + "ddtrace/debugging/_probe/registry.py:137", + "ddtrace/debugging/_probe/registry.py:146", + # we added a constant check for the wrapping method of add_error_log + "ddtrace/appsec/_iast/_metrics.py:53", + # we added a constant check for the wrapping method of iast_error + "ddtrace/appsec/_iast/_logs.py:41", + "ddtrace/appsec/_iast/_logs.py:45", + # the non constant part is an object type + "ddtrace/appsec/_iast/_taint_tracking/_taint_objects_base.py:75", +} + + +class LogMessageChecker(ast.NodeVisitor): + def __init__(self, filepath: str): + self.filepath = filepath + self.errors: List[Tuple[int, int]] = [] + + def _has_send_to_telemetry_false(self, node: ast.Call) -> bool: + """Check if the call has extra={'send_to_telemetry': False}.""" + for keyword in node.keywords: + if keyword.arg == "extra" and isinstance(keyword.value, ast.Dict): + for key, value in zip(keyword.value.keys, keyword.value.values): + if ( + isinstance(key, ast.Constant) + and key.value == "send_to_telemetry" + and isinstance(value, ast.Constant) + and value.value is False + ): + return True + return False + + def visit_Call(self, node: ast.Call) -> None: + """Check if this is a log.error(), add_error_log, or iast_error call with non-constant first arg.""" + fn = node.func + + # Check for add_error_log calls + is_add_integration_error = isinstance(fn, ast.Attribute) and fn.attr == "add_error_log" + # Check for log.error() calls (simple check for .error() on any variable) + is_log_error = isinstance(fn, ast.Attribute) and (fn.attr == "error" or fn.attr == "exception") + # Check for iast_error calls + is_iast_log = isinstance(fn, ast.Name) and ( + fn.id == "iast_error" + or fn.id == "iast_instrumentation_ast_patching_errorr_log" + or fn.id == "iast_propagation_error_log" + ) + is_target = is_add_integration_error or is_log_error or is_iast_log + + if is_target and node.args: + msg = node.args[0] + is_constant_string = isinstance(msg, ast.Constant) and isinstance(msg.value, str) + + # Skip constant string check if send_to_telemetry is False for log.error/exception calls + if not is_constant_string and is_log_error and self._has_send_to_telemetry_false(node): + pass + elif not is_constant_string and not self._is_line_exception(node.lineno): + self.errors.append((node.lineno, node.col_offset)) + + self.generic_visit(node) + + def _is_line_exception(self, line_no: int) -> bool: + """Check if this specific line is in the exceptions list.""" + return f"{str(self.filepath)}:{line_no}" in EXCEPTIONS + + +def check_file(filepath: pathlib.Path) -> List[Tuple[int, int]]: + try: + source = filepath.read_text(encoding="utf-8") + tree = ast.parse(source, filename=str(filepath)) + checker = LogMessageChecker(str(filepath)) + checker.visit(tree) + return checker.errors + except (OSError, UnicodeDecodeError) as e: + print(f"Error reading {filepath}: {e}", file=sys.stderr) + return [] + except SyntaxError as e: + print(f"Syntax error in {filepath}:{e.lineno}:{e.offset}: {e.msg}", file=sys.stderr) + return [] + + +def main() -> int: + contrib_path = pathlib.Path("ddtrace") + python_files = list(contrib_path.rglob("*.py")) + + total_errors = 0 + + for filepath in python_files: + errors = check_file(filepath) + for line_no, col_no in errors: + print(f"{filepath}:{line_no}:{col_no}: " "LOG001 first argument to logging call must be a constant string") + total_errors += 1 + + if total_errors > 0: + print(f"\nFound {total_errors} violation(s)", file=sys.stderr) + return 1 + + print("All logging calls use constant strings ✓") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/gen_gitlab_config.py b/scripts/gen_gitlab_config.py index 0947f206a73..53335c20a79 100644 --- a/scripts/gen_gitlab_config.py +++ b/scripts/gen_gitlab_config.py @@ -265,6 +265,11 @@ def check(name: str, command: str, paths: t.Set[str]) -> None: command="hatch run lint:suitespec-check", paths={"*"}, ) + check( + name="Check integration error logs", + command="hatch run lint:error-log-check", + paths={"ddtrace/contrib/**/*.py"}, + ) if not checks: return diff --git a/scripts/run-tests b/scripts/run-tests index cd3be7c638f..d8a281fc712 100755 --- a/scripts/run-tests +++ b/scripts/run-tests @@ -1,7 +1,9 @@ #!/usr/bin/env uv run --script +# -*- mode: python -*- # /// script # requires-python = ">=3.8" # dependencies = [ +# "riot>=0.20.1", # "ruamel.yaml>=0.17.21", # ] # /// @@ -21,8 +23,7 @@ import re import subprocess import sys from pathlib import Path -from typing import Dict, List, Set, Optional, Tuple, NamedTuple -import tempfile +from typing import Dict, List, Set, NamedTuple # Add project root and tests to Python path to import suitespec and riotfile ROOT = Path(__file__).parents[1] diff --git a/setup.py b/setup.py index 1fbf6ff72ea..d73aee01c2d 100644 --- a/setup.py +++ b/setup.py @@ -192,6 +192,13 @@ def is_64_bit_python(): return sys.maxsize > (1 << 32) +rust_features = [] +if CURRENT_OS in ("Linux", "Darwin") and is_64_bit_python(): + rust_features.append("crashtracker") + if sys.version_info[:2] < (3, 14): + rust_features.append("profiling") + + class PatchedDistribution(Distribution): def __init__(self, attrs=None): super().__init__(attrs) @@ -211,9 +218,7 @@ def __init__(self, attrs=None): py_limited_api="auto", binding=Binding.PyO3, debug=COMPILE_MODE.lower() == "debug", - features=( - ["crashtracker", "profiling"] if CURRENT_OS in ("Linux", "Darwin") and is_64_bit_python() else [] - ), + features=rust_features, ) ] @@ -961,32 +966,33 @@ def get_exts_for(name): ) if CURRENT_OS in ("Linux", "Darwin") and is_64_bit_python(): - ext_modules.append( - CMakeExtension( - "ddtrace.internal.datadog.profiling.ddup._ddup", - source_dir=DDUP_DIR, - extra_source_dirs=[ - DDUP_DIR / ".." / "cmake", - DDUP_DIR / ".." / "dd_wrapper", - ], - optional=False, - dependencies=[ - DDUP_DIR.parent / "libdd_wrapper", - ], + if sys.version_info < (3, 14): + ext_modules.append( + CMakeExtension( + "ddtrace.internal.datadog.profiling.ddup._ddup", + source_dir=DDUP_DIR, + extra_source_dirs=[ + DDUP_DIR / ".." / "cmake", + DDUP_DIR / ".." / "dd_wrapper", + ], + optional=False, + dependencies=[ + DDUP_DIR.parent / "libdd_wrapper", + ], + ) ) - ) - ext_modules.append( - CMakeExtension( - "ddtrace.internal.datadog.profiling.stack_v2._stack_v2", - source_dir=STACK_V2_DIR, - extra_source_dirs=[ - STACK_V2_DIR / ".." / "cmake", - STACK_V2_DIR / ".." / "dd_wrapper", - ], - optional=False, - ), - ) + ext_modules.append( + CMakeExtension( + "ddtrace.internal.datadog.profiling.stack_v2._stack_v2", + source_dir=STACK_V2_DIR, + extra_source_dirs=[ + STACK_V2_DIR / ".." / "cmake", + STACK_V2_DIR / ".." / "dd_wrapper", + ], + optional=False, + ), + ) else: diff --git a/src/native/Cargo.lock b/src/native/Cargo.lock index d2055ea2f06..83c8af453c9 100644 --- a/src/native/Cargo.lock +++ b/src/native/Cargo.lock @@ -220,7 +220,7 @@ dependencies = [ [[package]] name = "build_common" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "cbindgen", "serde", @@ -287,7 +287,7 @@ dependencies = [ [[package]] name = "cc_utils" version = "0.1.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "cc", @@ -489,7 +489,7 @@ dependencies = [ [[package]] name = "data-pipeline" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "arc-swap", @@ -519,7 +519,7 @@ dependencies = [ [[package]] name = "datadog-alloc" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "allocator-api2", "libc", @@ -529,7 +529,7 @@ dependencies = [ [[package]] name = "datadog-crashtracker" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "backtrace", @@ -562,7 +562,7 @@ dependencies = [ [[package]] name = "datadog-ddsketch" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "prost", ] @@ -570,7 +570,7 @@ dependencies = [ [[package]] name = "datadog-library-config" version = "0.0.2" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "memfd", @@ -581,10 +581,22 @@ dependencies = [ "serde_yaml", ] +[[package]] +name = "datadog-log" +version = "21.0.0" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" +dependencies = [ + "chrono", + "ddcommon-ffi", + "tracing", + "tracing-appender", + "tracing-subscriber", +] + [[package]] name = "datadog-profiling" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "bitmaps", @@ -614,7 +626,7 @@ dependencies = [ [[package]] name = "datadog-profiling-ffi" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "build_common", @@ -633,7 +645,7 @@ dependencies = [ [[package]] name = "datadog-profiling-protobuf" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "prost", ] @@ -641,7 +653,7 @@ dependencies = [ [[package]] name = "datadog-trace-normalization" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "datadog-trace-protobuf", @@ -650,7 +662,7 @@ dependencies = [ [[package]] name = "datadog-trace-protobuf" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "prost", "serde", @@ -660,7 +672,7 @@ dependencies = [ [[package]] name = "datadog-trace-utils" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "bytes", @@ -685,7 +697,7 @@ dependencies = [ [[package]] name = "ddcommon" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "cc", @@ -718,7 +730,7 @@ dependencies = [ [[package]] name = "ddcommon-ffi" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "build_common", @@ -732,7 +744,7 @@ dependencies = [ [[package]] name = "ddtelemetry" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "base64", @@ -759,14 +771,18 @@ version = "0.1.0" dependencies = [ "anyhow", "build_common", + "cc", "data-pipeline", "datadog-crashtracker", "datadog-ddsketch", "datadog-library-config", + "datadog-log", "datadog-profiling-ffi", "ddcommon", "pyo3", "pyo3-build-config", + "pyo3-ffi", + "tracing", ] [[package]] @@ -800,7 +816,7 @@ dependencies = [ [[package]] name = "dogstatsd-client" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "anyhow", "cadence", @@ -1332,6 +1348,15 @@ dependencies = [ "twox-hash", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "memchr" version = "2.7.5" @@ -1757,8 +1782,17 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -1769,9 +1803,15 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -2063,6 +2103,15 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -2211,6 +2260,15 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + [[package]] name = "time" version = "0.3.41" @@ -2245,7 +2303,7 @@ dependencies = [ [[package]] name = "tinybytes" version = "21.0.0" -source = "git+https://github.com/DataDog/libdatadog?rev=v21.0.0#4e1d7bb865885b48b4671fabc6643f0b5386f832" +source = "git+https://github.com/DataDog/libdatadog?rev=730d1ae2ca002e6861b94d62275844e0e70c4c29#730d1ae2ca002e6861b94d62275844e0e70c4c29" dependencies = [ "serde", ] @@ -2359,6 +2417,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-core" version = "0.1.34" @@ -2366,6 +2436,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "thread_local", + "tracing", + "tracing-core", + "tracing-serde", ] [[package]] @@ -2444,6 +2543,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "version_check" version = "0.9.5" diff --git a/src/native/Cargo.toml b/src/native/Cargo.toml index 4cbf3fabbfc..e09a9a621ce 100644 --- a/src/native/Cargo.toml +++ b/src/native/Cargo.toml @@ -16,19 +16,22 @@ profiling = ["dep:datadog-profiling-ffi"] [dependencies] anyhow = { version = "1.0", optional = true } -datadog-crashtracker = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0", optional = true } -datadog-ddsketch = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0" } -datadog-library-config = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0" } -data-pipeline = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0" } -datadog-profiling-ffi = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0", optional = true, features = [ +datadog-crashtracker = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29", optional = true } +datadog-ddsketch = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29" } +datadog-log = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29"} +datadog-library-config = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29" } +data-pipeline = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29" } +datadog-profiling-ffi = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29", optional = true, features = [ "cbindgen", ] } -ddcommon = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0" } +ddcommon = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29" } pyo3 = { version = "0.25", features = ["extension-module", "anyhow"] } - +pyo3-ffi = "0.25" +tracing = { version = "0.1", default-features = false } [build-dependencies] pyo3-build-config = "0.25" -build_common = { git = "https://github.com/DataDog/libdatadog", rev = "v21.0.0", features = [ +cc = "1.0" +build_common = { git = "https://github.com/DataDog/libdatadog", rev = "730d1ae2ca002e6861b94d62275844e0e70c4c29", features = [ "cbindgen", ] } diff --git a/src/native/build.rs b/src/native/build.rs index 91f43089a84..58da2d5f483 100644 --- a/src/native/build.rs +++ b/src/native/build.rs @@ -5,4 +5,69 @@ fn main() { if cfg!(target_os = "macos") { pyo3_build_config::add_extension_module_link_args(); } + + // Compile the C wrapper for CPython internal APIs + // This file defines Py_BUILD_CORE and provides access to internal functions + + // Get Python include directory using the cross-compilation info + let include_dir = match std::env::var("PYO3_CROSS_INCLUDE_DIR") { + Ok(dir) => std::path::PathBuf::from(dir), + Err(_) => { + // Fallback to using Python's sysconfig + let output = std::process::Command::new("python") + .args([ + "-c", + "import sysconfig; print(sysconfig.get_path('include'))", + ]) + .output() + .expect("Failed to run python to get include directory"); + std::path::PathBuf::from(String::from_utf8(output.stdout).unwrap().trim()) + } + }; + + // Add internal headers path for CPython internal APIs + let internal_headers_dir = include_dir.join("internal"); + + cc::Build::new() + .file("cpython_internal.c") + .include(&include_dir) + .include(&internal_headers_dir) // Add internal headers directory + .define("Py_BUILD_CORE", "1") + .compile("cpython_internal"); + + // Tell rustc to link the compiled C library + println!("cargo:rustc-link-lib=static=cpython_internal"); + + // Force linking to libpython to access internal symbols + // PyO3 normally avoids linking to libpython on Unix, but we need it for internal APIs + if !cfg!(target_os = "macos") { + // Get Python version and library info + let output = std::process::Command::new("python3") + .args(["-c", "import sysconfig; version = sysconfig.get_config_var('VERSION'); ldlibrary = sysconfig.get_config_var('LDLIBRARY'); libdir = sysconfig.get_config_var('LIBDIR'); print(f'{version}:{ldlibrary}:{libdir}')"]) + .output() + .expect("Failed to get Python library info"); + + let version_info = String::from_utf8(output.stdout).unwrap(); + let parts: Vec<&str> = version_info.trim().split(':').collect(); + + if parts.len() == 3 { + let version = parts[0]; + let ldlibrary = parts[1]; + let libdir = parts[2]; + + // Add library directory to search path + println!("cargo:rustc-link-search=native={}", libdir); + + // Extract library name from LDLIBRARY (e.g., "libpython3.11.so" -> "python3.11") + if let Some(lib_name) = ldlibrary + .strip_prefix("lib") + .and_then(|s| s.strip_suffix(".so")) + { + println!("cargo:rustc-link-lib={}", lib_name); + } else { + // Fallback to version-based naming + println!("cargo:rustc-link-lib=python{}", version); + } + } + } } diff --git a/src/native/cpython_internal.c b/src/native/cpython_internal.c new file mode 100644 index 00000000000..f1dd0929adb --- /dev/null +++ b/src/native/cpython_internal.c @@ -0,0 +1,17 @@ +// CPython internal API wrapper +// This file defines Py_BUILD_CORE to access internal CPython functions +// and provides a safe C interface for Rust FFI + +#define Py_BUILD_CORE 1 +#include +#include + +const char *crashtracker_dump_traceback_threads(int fd, + PyInterpreterState *interp, + PyThreadState *current_tstate) { + return _Py_DumpTracebackThreads(fd, interp, current_tstate); +} + +PyThreadState *crashtracker_get_current_tstate(void) { + return PyGILState_GetThisThreadState(); +} diff --git a/src/native/cpython_internal.h b/src/native/cpython_internal.h new file mode 100644 index 00000000000..5d4a4927828 --- /dev/null +++ b/src/native/cpython_internal.h @@ -0,0 +1,26 @@ +// CPython internal API wrapper header +// This provides C function declarations for accessing CPython internal APIs + +#ifndef CPYTHON_INTERNAL_H +#define CPYTHON_INTERNAL_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Wrapper function to call _Py_DumpTracebackThreads +// Returns error message on failure, NULL on success +const char *crashtracker_dump_traceback_threads(int fd, + PyInterpreterState *interp, + PyThreadState *current_tstate); + +// Wrapper to get the current thread state safely during crashes +PyThreadState *crashtracker_get_current_tstate(void); + +#ifdef __cplusplus +} +#endif + +#endif // CPYTHON_INTERNAL_H \ No newline at end of file diff --git a/src/native/crashtracker.rs b/src/native/crashtracker.rs index f20c6906b3a..23689667a49 100644 --- a/src/native/crashtracker.rs +++ b/src/native/crashtracker.rs @@ -1,15 +1,39 @@ use anyhow; use std::collections::HashMap; +use std::ffi::{c_char, c_int, c_void}; +use std::ptr; use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::Once; use std::time::Duration; +// Removed unused imports for debug logging use datadog_crashtracker::{ - CrashtrackerConfiguration, CrashtrackerReceiverConfig, Metadata, StacktraceCollection, + get_registered_runtime_type_ptr, is_runtime_callback_registered, + register_runtime_stack_callback, CallbackError, CallbackType, CrashtrackerConfiguration, + CrashtrackerReceiverConfig, Metadata, RuntimeStackFrame, RuntimeType, StacktraceCollection, }; use ddcommon::Endpoint; use pyo3::prelude::*; +extern "C" { + fn crashtracker_dump_traceback_threads( + fd: c_int, + interp: *mut pyo3_ffi::PyInterpreterState, + current_tstate: *mut pyo3_ffi::PyThreadState, + ) -> *const c_char; + + fn crashtracker_get_current_tstate() -> *mut pyo3_ffi::PyThreadState; + + fn pipe(pipefd: *mut [c_int; 2]) -> c_int; + fn read(fd: c_int, buf: *mut c_void, count: usize) -> isize; + fn close(fd: c_int) -> c_int; + fn fcntl(fd: c_int, cmd: c_int, arg: c_int) -> c_int; +} + +// Constants for fcntl +const F_SETFL: c_int = 4; +const O_NONBLOCK: c_int = 0o4000; + pub trait RustWrapper { type Inner; const INNER_TYPE_NAME: &'static str; @@ -178,7 +202,7 @@ impl CrashtrackerMetadataPy { } impl RustWrapper for CrashtrackerMetadataPy { - type Inner = Metadata; + type Inner = datadog_crashtracker::Metadata; const INNER_TYPE_NAME: &'static str = "Metadata"; fn take_inner(&mut self) -> Option { @@ -288,3 +312,425 @@ pub fn crashtracker_status() -> anyhow::Result { pub fn crashtracker_receiver() -> anyhow::Result<()> { datadog_crashtracker::receiver_entry_point_stdin() } + +/// Result type for runtime callback operations +#[pyclass( + eq, + eq_int, + name = "CallbackResult", + module = "datadog.internal._native" +)] +#[derive(Debug, PartialEq, Eq)] +pub enum CallbackResult { + Ok, + NullCallback, + UnknownError, +} + +impl From for CallbackResult { + fn from(error: CallbackError) -> Self { + match error { + CallbackError::NullCallback => CallbackResult::NullCallback, + } + } +} + +/// Runtime-specific stack frame representation for FFI +/// +/// This struct is used to pass runtime stack frame information from language +/// runtimes to the crashtracker during crash handling. +#[pyclass(name = "RuntimeStackFrame", module = "datadog.internal._native")] +#[derive(Debug, Clone)] +pub struct RuntimeStackFramePy { + pub function_name: Option, + pub file_name: Option, + pub line_number: u32, + pub column_number: u32, + pub class_name: Option, + pub module_name: Option, +} + +#[pymethods] +impl RuntimeStackFramePy { + #[new] + fn new( + function_name: Option, + file_name: Option, + line_number: u32, + column_number: u32, + class_name: Option, + module_name: Option, + ) -> Self { + Self { + function_name, + file_name, + line_number, + column_number, + class_name, + module_name, + } + } + + #[getter] + fn get_function_name(&self) -> Option { + self.function_name.clone() + } + + #[getter] + fn get_file_name(&self) -> Option { + self.file_name.clone() + } + + #[getter] + fn get_line_number(&self) -> u32 { + self.line_number + } + + #[getter] + fn get_column_number(&self) -> u32 { + self.column_number + } + + #[getter] + fn get_class_name(&self) -> Option { + self.class_name.clone() + } + + #[getter] + fn get_module_name(&self) -> Option { + self.module_name.clone() + } +} + +// Constants for signal-safe operation +const MAX_FRAMES: usize = 64; +const MAX_STRING_LEN: usize = 256; +const MAX_TRACEBACK_SIZE: usize = 64 * 1024; // 64KB buffer for traceback text + +// Stack-allocated buffer for signal-safe string handling +struct StackBuffer { + data: [u8; MAX_STRING_LEN], + len: usize, +} + +impl StackBuffer { + const fn new() -> Self { + Self { + data: [0u8; MAX_STRING_LEN], + len: 0, + } + } + + fn as_ptr(&self) -> *const c_char { + self.data.as_ptr() as *const c_char + } + + fn set_from_str(&mut self, s: &str) { + let bytes = s.as_bytes(); + let copy_len = bytes.len().min(MAX_STRING_LEN - 1); + self.data[..copy_len].copy_from_slice(&bytes[..copy_len]); + self.data[copy_len] = 0; + self.len = copy_len; + } +} + +// Parse a single traceback line into frame information +// ' File "/path/to/file.py", line 42, in function_name' +fn parse_traceback_line( + line: &str, + function_buf: &mut StackBuffer, + file_buf: &mut StackBuffer, +) -> u32 { + let trimmed = line.trim(); + + // Look for the pattern: File "filename", line number, in function_name + if let Some(file_start) = trimmed.find('"') { + if let Some(file_end) = trimmed[file_start + 1..].find('"') { + let file_path = &trimmed[file_start + 1..file_start + 1 + file_end]; + file_buf.set_from_str(file_path); + + let after_file = &trimmed[file_start + file_end + 2..]; + if let Some(line_start) = after_file.find("line ") { + let line_part = &after_file[line_start + 5..]; + + // Try to find comma first ("line 42, in func") + let line_num = if let Some(line_end) = line_part.find(',') { + let line_str = line_part[..line_end].trim(); + line_str.parse::().unwrap_or(0) + } else { + // No comma, try space ("line 42 in func") + if let Some(space_pos) = line_part.find(' ') { + let line_str = line_part[..space_pos].trim(); + line_str.parse::().unwrap_or(0) + } else { + // Just numbers until end + let line_str = line_part.trim(); + line_str.parse::().unwrap_or(0) + } + }; + + // Look for function name + if let Some(in_pos) = after_file.find(" in ") { + let func_name = after_file[in_pos + 4..].trim(); + function_buf.set_from_str(func_name); + } else { + function_buf.set_from_str(""); + } + + return line_num; + } + } + } + + // Fallback parsing + function_buf.set_from_str(""); + file_buf.set_from_str(""); + 0 +} + +// Parse traceback text and emit frames +unsafe fn parse_and_emit_traceback( + traceback_text: &str, + emit_frame: unsafe extern "C" fn(*mut c_void, *const RuntimeStackFrame), + writer_ctx: *mut c_void, +) { + let lines: Vec<&str> = traceback_text.lines().collect(); + let mut frame_count = 0; + + for line in lines { + if frame_count >= MAX_FRAMES { + break; + } + + // Look for lines that start with " File " - these are stack frame lines + if line.trim_start().starts_with("File ") { + let mut function_buf = StackBuffer::new(); + let mut file_buf = StackBuffer::new(); + + let line_number = parse_traceback_line(line, &mut function_buf, &mut file_buf); + + let c_frame = RuntimeStackFrame { + function_name: function_buf.as_ptr(), + file_name: file_buf.as_ptr(), + line_number, + column_number: 0, + class_name: ptr::null(), + module_name: ptr::null(), + }; + + emit_frame(writer_ctx, &c_frame); + frame_count += 1; + } + } +} + +unsafe fn dump_python_traceback_via_cpython_api( + emit_frame: unsafe extern "C" fn(*mut c_void, *const RuntimeStackFrame), + writer_ctx: *mut c_void, +) { + let mut pipefd: [c_int; 2] = [0, 0]; + if pipe(&mut pipefd as *mut [c_int; 2]) != 0 { + emit_fallback_frame(emit_frame, writer_ctx, ""); + return; + } + + let read_fd = pipefd[0]; + let write_fd = pipefd[1]; + + // Make the read end non-blocking + fcntl(read_fd, F_SETFL, O_NONBLOCK); + + // Get the current thread state safely - same approach as CPython's faulthandler + // SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL are synchronous signals and + // are thus delivered to the thread that caused the fault. + let current_tstate = crashtracker_get_current_tstate(); + + // Call the CPython internal API via our C wrapper + // Pass NULL for interpreter state since _Py_DumpTracebackThreads handle it internally + let error_msg = crashtracker_dump_traceback_threads(write_fd, ptr::null_mut(), current_tstate); + + close(write_fd); + + if !error_msg.is_null() { + close(read_fd); + let error_str = std::ffi::CStr::from_ptr(error_msg); + if let Ok(error_string) = error_str.to_str() { + emit_fallback_frame(emit_frame, writer_ctx, error_string); + } else { + emit_fallback_frame(emit_frame, writer_ctx, ""); + } + return; + } + + let mut buffer = vec![0u8; MAX_TRACEBACK_SIZE]; + let bytes_read = read( + read_fd, + buffer.as_mut_ptr() as *mut c_void, + MAX_TRACEBACK_SIZE, + ); + + close(read_fd); + + if bytes_read > 0 { + buffer.truncate(bytes_read as usize); + if let Ok(traceback_text) = std::str::from_utf8(&buffer) { + parse_and_emit_traceback(traceback_text, emit_frame, writer_ctx); + return; + } + } + + // If we get here, something went wrong with reading the output + emit_fallback_frame(emit_frame, writer_ctx, ""); +} + +// Helper function to emit a fallback frame with error information +unsafe fn emit_fallback_frame( + emit_frame: unsafe extern "C" fn(*mut c_void, *const RuntimeStackFrame), + writer_ctx: *mut c_void, + error_msg: &str, +) { + let mut function_buf = StackBuffer::new(); + let mut file_buf = StackBuffer::new(); + function_buf.set_from_str(error_msg); + file_buf.set_from_str(""); + + let fallback_frame = RuntimeStackFrame { + function_name: function_buf.as_ptr(), + file_name: file_buf.as_ptr(), + line_number: 0, + column_number: 0, + class_name: ptr::null(), + module_name: ptr::null(), + }; + + emit_frame(writer_ctx, &fallback_frame); +} + +/// Dump Python traceback as a complete string +/// +/// This function captures the Python traceback via CPython's internal API +/// and emits it as a single string instead of parsing into individual frames. +/// This is more efficient and preserves the original Python formatting. +unsafe fn dump_python_traceback_as_string( + emit_stacktrace_string: unsafe extern "C" fn(*mut c_void, *const c_char), + writer_ctx: *mut c_void, +) { + // Create a pipe to capture CPython internal traceback dump + let mut pipefd: [c_int; 2] = [0, 0]; + if pipe(&mut pipefd as *mut [c_int; 2]) != 0 { + emit_stacktrace_string( + writer_ctx, + "\0".as_ptr() as *const c_char, + ); + return; + } + + let read_fd = pipefd[0]; + let write_fd = pipefd[1]; + + // Make the read end non-blocking + fcntl(read_fd, F_SETFL, O_NONBLOCK); + + // Get the current thread state safely - same approach as CPython's faulthandler + // SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL are synchronous signals and + // are thus delivered to the thread that caused the fault. + let current_tstate = crashtracker_get_current_tstate(); + + // Call the CPython internal API via our C wrapper + // Pass NULL for interpreter state - let _Py_DumpTracebackThreads handle it internally + let error_msg = crashtracker_dump_traceback_threads(write_fd, ptr::null_mut(), current_tstate); + + close(write_fd); + + // Check for errors from _Py_DumpTracebackThreads + if !error_msg.is_null() { + close(read_fd); + // Note: We can't format the error message because we're in a signal context + // Just emit a generic error message + emit_stacktrace_string( + writer_ctx, + "\0".as_ptr() as *const c_char, + ); + return; + } + + // Read the traceback output + let mut buffer = vec![0u8; MAX_TRACEBACK_SIZE]; + let bytes_read = read( + read_fd, + buffer.as_mut_ptr() as *mut c_void, + MAX_TRACEBACK_SIZE, + ); + + close(read_fd); + + if bytes_read > 0 { + buffer.truncate(bytes_read as usize); + if let Ok(traceback_text) = std::str::from_utf8(&buffer) { + emit_stacktrace_string(writer_ctx, traceback_text.as_ptr() as *const c_char); + return; + } + } + + emit_stacktrace_string( + writer_ctx, + "\0".as_ptr() as *const c_char, + ); +} + +unsafe extern "C" fn native_runtime_stack_callback( + emit_frame: unsafe extern "C" fn(*mut c_void, *const RuntimeStackFrame), + _emit_stacktrace_string: unsafe extern "C" fn(*mut c_void, *const c_char), + writer_ctx: *mut c_void, +) { + // dump_python_traceback_as_string(emit_stacktrace_string, writer_ctx); + dump_python_traceback_via_cpython_api(emit_frame, writer_ctx); +} + +/// Register the native runtime stack collection callback +/// +/// This function registers a native callback that directly collects Python runtime +/// stack traces without requiring Python callback functions. It uses frame-by-frame +/// collection for detailed stack information. +/// +/// # Returns +/// - `CallbackResult::Ok` if registration succeeds (replaces any existing callback) +#[pyfunction(name = "crashtracker_register_native_runtime_callback")] +pub fn crashtracker_register_native_runtime_callback() -> CallbackResult { + match register_runtime_stack_callback( + native_runtime_stack_callback, + RuntimeType::Python, + CallbackType::Frame, + ) { + Ok(()) => CallbackResult::Ok, + Err(e) => e.into(), + } +} + +/// Check if a runtime callback is currently registered +/// +/// # Returns +/// - `True` if a callback is registered +/// - `False` if no callback is registered +#[pyfunction(name = "crashtracker_is_runtime_callback_registered")] +pub fn crashtracker_is_runtime_callback_registered() -> bool { + is_runtime_callback_registered() +} + +/// Get the runtime type of the currently registered callback +/// +/// # Returns +/// - The runtime type string if a callback is registered +/// - `None` if no callback is registered +#[pyfunction(name = "crashtracker_get_registered_runtime_type")] +pub fn crashtracker_get_registered_runtime_type() -> Option { + unsafe { + let ptr = get_registered_runtime_type_ptr(); + if ptr.is_null() { + None + } else { + let c_str = std::ffi::CStr::from_ptr(ptr); + c_str.to_str().ok().map(|s| s.to_string()) + } + } +} diff --git a/src/native/data_pipeline/mod.rs b/src/native/data_pipeline/mod.rs index 2c5f47ee29a..415383d58a9 100644 --- a/src/native/data_pipeline/mod.rs +++ b/src/native/data_pipeline/mod.rs @@ -156,6 +156,11 @@ impl TraceExporterBuilderPy { Ok(slf.into()) } + fn enable_health_metrics(mut slf: PyRefMut<'_, Self>) -> PyResult> { + slf.try_as_mut()?.enable_health_metrics(); + Ok(slf.into()) + } + /// Consumes the wrapped builder. /// /// The builder shouldn't be reused diff --git a/src/native/lib.rs b/src/native/lib.rs index a84ebf2c46a..917cf173e38 100644 --- a/src/native/lib.rs +++ b/src/native/lib.rs @@ -5,9 +5,9 @@ pub use datadog_profiling_ffi::*; mod data_pipeline; mod ddsketch; mod library_config; +mod log; use pyo3::prelude::*; -use pyo3::wrap_pyfunction; /// Dummy function to check if imported lib is generated on windows builds. #[no_mangle] @@ -25,14 +25,33 @@ fn _native(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_class::()?; + m.add_class::()?; + m.add_class::()?; m.add_function(wrap_pyfunction!(crashtracker::crashtracker_init, m)?)?; m.add_function(wrap_pyfunction!(crashtracker::crashtracker_on_fork, m)?)?; m.add_function(wrap_pyfunction!(crashtracker::crashtracker_status, m)?)?; m.add_function(wrap_pyfunction!(crashtracker::crashtracker_receiver, m)?)?; + m.add_function(wrap_pyfunction!( + crashtracker::crashtracker_register_native_runtime_callback, + m + )?)?; + m.add_function(wrap_pyfunction!( + crashtracker::crashtracker_is_runtime_callback_registered, + m + )?)?; + m.add_function(wrap_pyfunction!( + crashtracker::crashtracker_get_registered_runtime_type, + m + )?)?; } m.add_class::()?; m.add_class::()?; m.add_wrapped(wrap_pyfunction!(library_config::store_metadata))?; data_pipeline::register_data_pipeline(m)?; + + // Add logger submodule + let logger_module = pyo3::wrap_pymodule!(log::logger); + m.add_wrapped(logger_module)?; + Ok(()) } diff --git a/src/native/library_config.rs b/src/native/library_config.rs index 16f2b76b45a..ce6b39d7a69 100644 --- a/src/native/library_config.rs +++ b/src/native/library_config.rs @@ -118,6 +118,8 @@ pub fn store_metadata(data: &PyTracerMetadata) -> PyResult>) -> PyResult<()> { + let output; + + if let Some(kwargs) = kwds { + output = kwargs + .get_item("output")? + .ok_or_else(|| PyValueError::new_err("Missing output argument"))? + .extract()?; + } else { + output = "stdout".to_string(); + } + + match output.as_str() { + "stdout" => logger_configure_std(StdConfig { + target: StdTarget::Out, + }) + .map_err(|e| PyValueError::new_err(e.to_string())), + "stderr" => logger_configure_std(StdConfig { + target: StdTarget::Err, + }) + .map_err(|e| PyValueError::new_err(e.to_string())), + "file" => { + let kwargs = + kwds.ok_or_else(|| PyValueError::new_err("Missing arguments for file"))?; + + let path: String = kwargs + .get_item("path")? + .ok_or_else(|| { + PyValueError::new_err("Missing required argument for file: path") + })? + .extract()?; + + let max_files: u64 = kwargs + .get_item("max_files")? + .map(|v| v.extract()) + .transpose()? + .unwrap_or(0); + + let max_size_bytes: u64 = kwargs + .get_item("max_size_bytes")? + .map(|v| v.extract()) + .transpose()? + .unwrap_or(0); + + let cfg = FileConfig { + path, + max_files, + max_size_bytes, + }; + logger_configure_file(cfg).map_err(|e| PyValueError::new_err(e.to_string())) + } + other => Err(PyValueError::new_err(format!("Invalid output: {other}"))), + } + } + + /// Disable logging output by type: "file", "stdout", "stderr" + #[pyfunction] + fn disable(output: &str) -> PyResult<()> { + match output { + "file" => logger_disable_file().map_err(|e| PyValueError::new_err(e.to_string())), + "stdout" | "stderr" => { + logger_disable_std().map_err(|e| PyValueError::new_err(e.to_string())) + } + other => Err(PyValueError::new_err(format!("Invalid output: {other}"))), + } + } + + /// Set log level (trace, debug, info, warn, error) + #[pyfunction] + fn set_log_level(level: &str) -> PyResult<()> { + let rust_level = match level.to_lowercase().as_str() { + "trace" => LogEventLevel::Trace, + "debug" => LogEventLevel::Debug, + "info" => LogEventLevel::Info, + "warn" => LogEventLevel::Warn, + "error" => LogEventLevel::Error, + other => return Err(PyValueError::new_err(format!("Invalid log level: {other}"))), + }; + logger_set_log_level(rust_level).map_err(|e| PyValueError::new_err(e.to_string())) + } + + /// Logs a message + #[pyfunction] + fn log(level: &str, message: &str) -> PyResult<()> { + match level.to_lowercase().as_str() { + "trace" => trace!("{}", message), + "debug" => debug!("{}", message), + "info" => info!("{}", message), + "warn" => warn!("{}", message), + "error" => error!("{}", message), + other => return Err(PyValueError::new_err(format!("Invalid log level: {other}"))), + } + Ok(()) + } +} diff --git a/supported_versions_output.json b/supported_versions_output.json index 38febdedae5..6230e8db51b 100644 --- a/supported_versions_output.json +++ b/supported_versions_output.json @@ -10,7 +10,7 @@ "dependency": "aiohttp", "integration": "aiohttp", "minimum_tracer_supported": "3.7.4.post0", - "max_tracer_supported": "3.12.11", + "max_tracer_supported": "3.12.15", "auto-instrumented": true }, { @@ -68,7 +68,7 @@ "dependency": "pytest-asyncio", "integration": "asyncio", "minimum_tracer_supported": "0.21.1", - "max_tracer_supported": "0.21.1", + "max_tracer_supported": "1.2.0", "pinned": "true", "auto-instrumented": true }, @@ -315,6 +315,13 @@ "max_tracer_supported": "24.11.1", "auto-instrumented": true }, + { + "dependency": "google-adk", + "integration": "google_adk", + "minimum_tracer_supported": "1.0.0", + "max_tracer_supported": "1.14.1", + "auto-instrumented": true + }, { "dependency": "google-genai", "integration": "google_genai", @@ -432,7 +439,7 @@ "dependency": "mariadb", "integration": "mariadb", "minimum_tracer_supported": "1.0.11", - "max_tracer_supported": "1.1.12", + "max_tracer_supported": "1.1.13", "auto-instrumented": true }, { diff --git a/supported_versions_table.csv b/supported_versions_table.csv index cbcc671143f..96c13be8b85 100644 --- a/supported_versions_table.csv +++ b/supported_versions_table.csv @@ -1,6 +1,6 @@ dependency,integration,minimum_tracer_supported,max_tracer_supported,auto-instrumented aiobotocore,aiobotocore,1.0.7,2.16.0,False -aiohttp,aiohttp,3.7.4.post0,3.12.11,True +aiohttp,aiohttp,3.7.4.post0,3.12.15,True aiohttp-jinja2,aiohttp_jinja2,1.5.1,1.6,True aiohttp_jinja2,aiohttp_jinja2,1.5.1,1.6,True aiomysql,aiomysql,0.1.1,0.2.0,True @@ -8,7 +8,7 @@ aiopg,aiopg *,0.16.0,1.4.0,True algoliasearch,algoliasearch *,2.5.0,2.6.3,True anthropic,anthropic,0.28.1,0.52.2,True aredis,aredis,1.1.8,1.1.8,True -pytest-asyncio,asyncio *,0.21.1,0.21.1,True +pytest-asyncio,asyncio *,0.21.1,1.2.0,True asyncpg,asyncpg,0.22.0,0.30.0,True avro,avro,1.12.0,1.12.0,True datadog-lambda,aws_lambda,6.105.0,6.105.0,True @@ -43,6 +43,7 @@ flask-cache,flask_cache,0.13.1,0.13.1,False flask-caching,flask_cache,1.10.1,2.3.0,False freezegun,freezegun *,1.3.1,1.5.2,False gevent,gevent,20.12.1,24.11.1,True +google-adk,google_adk,1.0.0,1.14.1,True google-genai,google_genai,1.21.1,1.21.1,True google-generativeai,google_generativeai,0.7.2,0.8.3,True graphql-core,graphql,3.1.7,3.2.6,True @@ -59,7 +60,7 @@ litellm,litellm *,1.65.4,1.65.4,True logbook,logbook,1.0.0,1.8.1,True loguru,loguru,0.4.1,0.7.2,True mako,mako,1.0.14,1.3.8,True -mariadb,mariadb,1.0.11,1.1.12,True +mariadb,mariadb,1.0.11,1.1.13,True mcp,mcp,1.10.1,1.11.0,True molten,molten,1.0.2,1.0.2,True mongoengine,mongoengine,0.23.1,0.29.1,True diff --git a/tests/appsec/app.py b/tests/appsec/app.py index f7ba53e076f..eb7c977defa 100644 --- a/tests/appsec/app.py +++ b/tests/appsec/app.py @@ -1,10 +1,10 @@ -"""This Flask application is imported on tests.appsec.appsec_utils.gunicorn_server""" +"""This Flask application is imported on tests.appsec.appsec_utils.gunicorn_flask_server""" import ddtrace.auto # noqa: F401 # isort: skip import copy import os import re import shlex -import subprocess # nosec +import subprocess from flask import Flask from flask import Response @@ -13,7 +13,6 @@ import urllib3 from wrapt import FunctionWrapper -import ddtrace from ddtrace import tracer from ddtrace.appsec._iast import ddtrace_iast_flask_patch from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled @@ -98,9 +97,6 @@ import tests.appsec.integrations.flask_tests.module_with_import_errors as module_with_import_errors -# Patch urllib3 since they are not patched automatically -ddtrace.patch_all(urllib3=True) # type: ignore - app = Flask(__name__) app.register_blueprint(pkg_aiohttp) app.register_blueprint(pkg_aiosignal) @@ -980,10 +976,17 @@ def iast_ast_patching_non_re_search(): return resp -@app.route("/common-modules-patch-read", methods=["GET"]) -def test_flask_common_modules_patch_read(): - copy_open = copy.deepcopy(open) - return Response(f"OK: {isinstance(copy_open, FunctionWrapper)}") +@app.route("/common-modules-patch", methods=["GET"]) +def test_flask_common_modules_patch(): + function = request.args.get("function") + copy_function = "" + if function == "open": + copy_function = copy.deepcopy(open) + elif function == "os_system": + copy_function = os.system + elif function == "subprocess_popen": + copy_function = subprocess.Popen.__init__ + return Response(f"OK: {isinstance(copy_function, FunctionWrapper)}") @app.route("/returnheaders", methods=["GET"]) @@ -997,14 +1000,78 @@ def return_headers(*args, **kwargs): @app.route("/vulnerablerequestdownstream", methods=["GET"]) def vulnerable_request_downstream(): _weak_hash_vulnerability() + port = str(request.args.get("port", "8050")) # Propagate the received headers to the downstream service http_poolmanager = urllib3.PoolManager(num_pools=1) # Sending a GET request and getting back response as HTTPResponse object. - response = http_poolmanager.request("GET", "http://localhost:8050/returnheaders") + response = http_poolmanager.request("GET", f"http://localhost:{port}/returnheaders") http_poolmanager.clear() return Response(response.data) +@app.route("/gevent-greenlet", methods=["GET"]) +def gevent_greenlet(): + """Spawn and join a gevent Greenlet to ensure no deadlocks with IAST/gevent. + + This endpoint is used by tests parametrized with Gunicorn/gevent configurations + to validate stability under gevent monkey-patching. + """ + try: + import gevent + from gevent import Greenlet + + def _noop(): + return True + + g = Greenlet(_noop) + g.start() + gevent.joinall([g]) + ok = g.value is True + except Exception: + # If gevent is not available, still return OK to keep the test minimal + ok = True + return Response(f"OK:{ok}") + + +@app.route("/socketpair", methods=["GET"]) +def socketpair_roundtrip(): + """Exercise socket.socketpair send/recv lifecycle and return OK status.""" + try: + import socket + + s1, s2 = socket.socketpair() + try: + msg = b"ping" + s1.sendall(msg) + data = s2.recv(16) + ok = data == msg + finally: + s1.close() + s2.close() + except Exception: + ok = False + return Response(f"OK:{ok}") + + +@app.route("/subprocess-popen", methods=["GET"]) +def subprocess_popen_ok(): + """Run a trivial subprocess to ensure process lifecycle behaves under gevent.""" + ok = True + try: + pass + subp = ( + subprocess.Popen(args=["/bin/echo", "ok"]) + if os.path.exists("/bin/echo") + else subprocess.Popen(args=["echo", "ok"]) + ) + subp.communicate() + rc = subp.wait() + ok = rc == 0 + except Exception: + ok = False + return Response(f"OK:{ok}") + + if __name__ == "__main__": env_port = os.getenv("FLASK_RUN_PORT", 8000) debug = asbool(os.getenv("FLASK_DEBUG", "false")) diff --git a/tests/appsec/appsec_utils.py b/tests/appsec/appsec_utils.py index 2bdbd8c045b..90fb6897a62 100644 --- a/tests/appsec/appsec_utils.py +++ b/tests/appsec/appsec_utils.py @@ -4,6 +4,7 @@ import signal import subprocess import sys +import typing as _t from requests.exceptions import ConnectionError # noqa: A004 @@ -19,7 +20,7 @@ @contextmanager -def gunicorn_server( +def gunicorn_flask_server( use_ddtrace_cmd=True, appsec_enabled="true", iast_enabled="false", @@ -90,6 +91,60 @@ def flask_server( ) +@contextmanager +def gunicorn_django_server( + use_ddtrace_cmd: bool = True, + appsec_enabled: str = "true", + iast_enabled: str = "false", + remote_configuration_enabled: str = "true", + tracer_enabled: str = "true", + apm_tracing_enabled: str = "true", + token: str = None, + port: int = 8000, + workers: str = "1", + use_threads: bool = False, + use_gevent: bool = False, + assert_debug: bool = False, + env: dict = None, +): + """Run the Django test application under Gunicorn. + + Uses the WSGI application at + ``tests.appsec.integrations.django_tests.django_app.wsgi:application``. + Mirrors options supported by gunicorn_flask_server. + """ + cmd = ["gunicorn", "-w", workers, "--log-level", "debug"] + if use_ddtrace_cmd: + cmd = ["python", "-m", "ddtrace.commands.ddtrace_run"] + cmd + if use_threads: + cmd += ["--threads", "1"] + if use_gevent: + cmd += ["-k", "gevent"] + cmd += [ + "-b", + f"0.0.0.0:{port}", + "tests.appsec.integrations.django_tests.django_app.wsgi:application", + ] + # Ensure Django settings are set for WSGI + extra_env = { + "DJANGO_SETTINGS_MODULE": "tests.appsec.integrations.django_tests.django_app.settings", + } + if env: + extra_env.update(env) + yield from appsec_application_server( + cmd, + appsec_enabled=appsec_enabled, + iast_enabled=iast_enabled, + apm_tracing_enabled=apm_tracing_enabled, + remote_configuration_enabled=remote_configuration_enabled, + tracer_enabled=tracer_enabled, + token=token, + env=extra_env, + port=port, + assert_debug=assert_debug, + ) + + @contextmanager def django_server( python_cmd="python", @@ -103,6 +158,8 @@ def django_server( env=None, assert_debug=False, manual_propagation_debug=False, + *args, + **kwargs, ): """ Context manager that runs a Django test server in a subprocess. @@ -197,6 +254,16 @@ def appsec_application_server( assert_debug=False, manual_propagation_debug=False, ): + """Start an application server subprocess for AppSec/IAST tests. + + This helper optionally applies CPU/memory limits to the spawned subprocess when the following + environment variables are set (Linux/Unix only): + - TEST_SUBPROC_MEM_MB: integer megabytes to cap address space (RLIMIT_AS) + - TEST_SUBPROC_CPU_AFFINITY: comma-separated CPU indices for sched_setaffinity (Linux) + - TEST_SUBPROC_NICE: integer niceness value to apply via os.nice() + + This is opt-in and introduces no behavior change unless the variables are provided. + """ env = _build_env(env, file_path=FILE_PATH) env["DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS"] = "0.5" env["DD_REMOTE_CONFIGURATION_ENABLED"] = remote_configuration_enabled @@ -234,6 +301,12 @@ def appsec_application_server( subprocess_kwargs["stderr"] = subprocess.PIPE subprocess_kwargs["text"] = True + # Only set preexec_fn on POSIX. It's ignored/unsupported on Windows. + if os.name == "posix": + preexec = _make_preexec() + if preexec is not None: + subprocess_kwargs["preexec_fn"] = preexec # type: ignore[assignment] + server_process = subprocess.Popen(cmd, **subprocess_kwargs) try: client = Client("http://0.0.0.0:%s" % port) @@ -281,3 +354,48 @@ def appsec_application_server( assert "Return from " in process_output assert "Return value is tainted" in process_output assert "Tainted arguments:" in process_output + + +def _make_preexec() -> _t.Optional[_t.Callable[[], None]]: + """Create a preexec_fn that applies resource limits if configured. + + Returns None if no limits were requested. + """ + mem_mb = os.environ.get("TEST_SUBPROC_MEM_MB") + cpu_aff = os.environ.get("TEST_SUBPROC_CPU_AFFINITY") + nice_val = os.environ.get("TEST_SUBPROC_NICE") + if not any((mem_mb, cpu_aff, nice_val)): + return None + + # Import inside to keep portability on Windows. + try: + import resource # type: ignore[attr-defined] + except Exception: # pragma: no cover + resource = None # type: ignore[assignment] + + def _preexec(): # pragma: no cover - exercised in integration tests + # Set process group leader (already done via start_new_session) + # Apply niceness first to reduce priority + if nice_val is not None: + try: + os.nice(int(nice_val)) + except Exception: + pass + # CPU affinity (Linux only) + if cpu_aff: + try: + cpus = {int(x) for x in cpu_aff.split(",") if x.strip() != ""} + if hasattr(os, "sched_setaffinity") and cpus: + os.sched_setaffinity(0, cpus) # type: ignore[attr-defined] + except Exception: + pass + # Memory limit via RLIMIT_AS (virtual memory) + if mem_mb and resource is not None: + try: + limit_bytes = int(mem_mb) * 1024 * 1024 + resource.setrlimit(resource.RLIMIT_AS, (limit_bytes, limit_bytes)) + except Exception: + # Fall back silently if not supported + pass + + return _preexec diff --git a/tests/appsec/architectures/mini.py b/tests/appsec/architectures/mini.py index f0981167a55..bb10e976ed4 100644 --- a/tests/appsec/architectures/mini.py +++ b/tests/appsec/architectures/mini.py @@ -11,33 +11,24 @@ from flask import request # noqa: E402 import requests # noqa: E402 F401 +import ddtrace.internal.telemetry.writer # noqa: E402 from ddtrace.settings.asm import config as asm_config # noqa: E402 from ddtrace.version import get_version # noqa: E402 app = Flask(__name__) _TELEMETRY_DEPENDENCIES = [] +update_imported_dependencies = ddtrace.internal.telemetry.writer.update_imported_dependencies -# intercept telemetry events -from ddtrace.internal.telemetry.writer import TelemetryWriter # noqa: E402 - -_flush_events = TelemetryWriter._flush_events_queue - - -def _flush_events_wrapper(self): +def wrap_update_imported_dependencies(imported_dependencies, newly_imported_deps): global _TELEMETRY_DEPENDENCIES - res = _flush_events(self) - if res: - dependencies = [v.get("payload", {}).get("dependencies", {}) for v in res] - dependencies = [d for d in dependencies if d] - for lst in dependencies: - _TELEMETRY_DEPENDENCIES.extend(lst) - print(f"flushed events {dependencies}", flush=True) - return res + dependencies = update_imported_dependencies(imported_dependencies, newly_imported_deps) + _TELEMETRY_DEPENDENCIES.extend(dependencies) + return dependencies -TelemetryWriter._flush_events_queue = _flush_events_wrapper +ddtrace.internal.telemetry.writer.update_imported_dependencies = wrap_update_imported_dependencies @app.route("/") diff --git a/tests/appsec/contrib_appsec/fastapi_app/app.py b/tests/appsec/contrib_appsec/fastapi_app/app.py index 03cef3fc491..6cc2cf74b08 100644 --- a/tests/appsec/contrib_appsec/fastapi_app/app.py +++ b/tests/appsec/contrib_appsec/fastapi_app/app.py @@ -3,8 +3,10 @@ import os import sqlite3 import subprocess +from typing import AsyncGenerator from typing import Optional +from fastapi import Depends from fastapi import FastAPI from fastapi import Request from fastapi.responses import HTMLResponse @@ -40,6 +42,17 @@ class User(BaseModel): def get_app(): app = FastAPI() + async def get_db() -> AsyncGenerator[sqlite3.Connection, None]: + db = sqlite3.connect(":memory:") + db.execute("CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)") + db.execute("INSERT INTO users (id, name) VALUES ('1_secret_id', 'Alice')") + db.execute("INSERT INTO users (id, name) VALUES ('2_secret_id', 'Bob')") + db.execute("INSERT INTO users (id, name) VALUES ('3_secret_id', 'Christophe')") + try: + yield db + finally: + db.close() + @app.middleware("http") async def passthrough_middleware(request: Request, call_next): """Middleware to test BlockingException nesting in ExceptionGroups (or BaseExceptionGroups) @@ -144,12 +157,7 @@ async def stream(): @app.get("/rasp/{endpoint:str}/") @app.post("/rasp/{endpoint:str}/") @app.options("/rasp/{endpoint:str}/") - async def rasp(endpoint: str, request: Request): - DB = sqlite3.connect(":memory:") - DB.execute("CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)") - DB.execute("INSERT INTO users (id, name) VALUES ('1_secret_id', 'Alice')") - DB.execute("INSERT INTO users (id, name) VALUES ('2_secret_id', 'Bob')") - DB.execute("INSERT INTO users (id, name) VALUES ('3_secret_id', 'Christophe')") + async def rasp(endpoint: str, request: Request, db: sqlite3.Connection = Depends(get_db)): query_params = request.query_params if endpoint == "lfi": res = ["lfi endpoint"] @@ -199,7 +207,7 @@ async def rasp(endpoint: str, request: Request): user_id = query_params[param] try: if param.startswith("user_id"): - cursor = DB.execute(f"SELECT * FROM users WHERE id = {user_id}") + cursor = db.execute(f"SELECT * FROM users WHERE id = {user_id}") res.append(f"Url: {list(cursor)}") except Exception as e: res.append(f"Error: {e}") diff --git a/tests/appsec/contrib_appsec/flask_app/app.py b/tests/appsec/contrib_appsec/flask_app/app.py index 5658454e684..9ac1f333039 100644 --- a/tests/appsec/contrib_appsec/flask_app/app.py +++ b/tests/appsec/contrib_appsec/flask_app/app.py @@ -308,6 +308,11 @@ def login(user_id: str, login: str) -> None: return "login failure", 401 +@app.route("/buggy_endpoint/", methods=None) +def buggy_endpoint(): + return "" + + @app.before_request def service_renaming(): if request.headers.get("x-rename-service", "false") == "true": diff --git a/tests/appsec/contrib_appsec/utils.py b/tests/appsec/contrib_appsec/utils.py index 895bc429f9d..32eed25b077 100644 --- a/tests/appsec/contrib_appsec/utils.py +++ b/tests/appsec/contrib_appsec/utils.py @@ -14,7 +14,6 @@ from ddtrace._trace.pin import Pin from ddtrace.appsec import _asm_request_context from ddtrace.appsec import _constants as asm_constants -from ddtrace.appsec._utils import get_security from ddtrace.appsec._utils import get_triggers from ddtrace.internal import constants from ddtrace.settings.asm import config as asm_config @@ -226,7 +225,7 @@ def parse(path: str) -> str: assert isinstance(ep.path, str) assert ep.resource_name assert ep.operation_name - if ep.method not in ("GET", "*", "POST"): + if ep.method not in ("GET", "*", "POST") or ep.path.startswith("/static"): continue path = parse(ep.path) found.add(path.rstrip("/")) @@ -235,7 +234,10 @@ def parse(path: str) -> str: if ep.method == "POST" else interface.client.get(path) ) - assert self.status(response) in (200, 401), f"ep.path failed: {ep.path} -> {path}" + assert self.status(response) in ( + 200, + 401, + ), f"ep.path failed: [{self.status(response)}] {ep.path} -> {path}" resource = "GET" + ep.resource_name[1:] if ep.resource_name.startswith("* ") else ep.resource_name assert find_resource(resource) assert must_found <= found @@ -1956,27 +1958,6 @@ def test_trace_tagging( sampling_decision = get_entry_span_tag(constants.SAMPLING_DECISION_TRACE_TAG_KEY) assert span_sampling_priority < 2 or sampling_decision != f"-{constants.SamplingMechanism.APPSEC}" - @pytest.mark.parametrize("rename_service", [True, False]) - @pytest.mark.parametrize("metastruct", [True, False]) - def test_iast(self, interface, root_span, get_tag, metastruct, rename_service): - from ddtrace.ext import http - - with override_global_config(dict(_use_metastruct_for_iast=metastruct, _iast_use_root_span=True)): - url = "/rasp/command_injection/?cmds=." - self.update_tracer(interface) - response = interface.client.get(url, headers={"x-rename-service": str(rename_service).lower()}) - assert self.status(response) == 200 - assert get_tag(http.STATUS_CODE) == "200" - assert self.body(response).startswith("command_injection endpoint") - stack_traces = self.get_stack_trace(root_span, "vulnerability") - if asm_config._iast_enabled: - assert get_security(root_span()) is not None - # checking for iast stack traces - assert stack_traces - else: - assert get_security(root_span()) is None - assert stack_traces == [] - @pytest.mark.parametrize("endpoint", ["urlopen_request", "urlopen_string"]) def test_api10(self, endpoint, interface, get_tag): """test api10 on downstream request headers on rasp endpoint""" diff --git a/tests/appsec/iast/_ast/test_ast_patching.py b/tests/appsec/iast/_ast/test_ast_patching.py index 4fd59940a9c..ce786290160 100644 --- a/tests/appsec/iast/_ast/test_ast_patching.py +++ b/tests/appsec/iast/_ast/test_ast_patching.py @@ -412,7 +412,7 @@ def test_should_not_iast_patch_if_stdlib(module_name): def test_module_path_none(caplog): - with caplog.at_level(logging.DEBUG), mock.patch("ddtrace.internal.module.Path.resolve", side_effect=AttributeError): + with caplog.at_level(logging.DEBUG), mock.patch("ddtrace.appsec._iast._ast.ast_patching.origin", return_value=None): assert ("", None) == astpatch_module(__import__("tests.appsec.iast.fixtures.ast.str.class_str", fromlist=["*"])) assert ( "iast::instrumentation::ast_patching::compiling::" diff --git a/tests/appsec/iast/conftest.py b/tests/appsec/iast/conftest.py index f270f3e5193..7e5d57ba296 100644 --- a/tests/appsec/iast/conftest.py +++ b/tests/appsec/iast/conftest.py @@ -14,9 +14,8 @@ from ddtrace.appsec._iast._taint_tracking._context import debug_context_array_free_slots_number from ddtrace.appsec._iast._taint_tracking._context import debug_context_array_size from ddtrace.appsec._iast.taint_sinks.code_injection import patch as code_injection_patch -from ddtrace.appsec._iast.taint_sinks.command_injection import patch as cmdi_patch -from ddtrace.appsec._iast.taint_sinks.command_injection import unpatch as cmdi_unpatch from ddtrace.appsec._iast.taint_sinks.header_injection import patch as header_injection_patch +from ddtrace.appsec._iast.taint_sinks.untrusted_serialization import patch as unstrusted_serialization_patch from ddtrace.appsec._iast.taint_sinks.weak_cipher import patch as weak_cipher_patch from ddtrace.appsec._iast.taint_sinks.weak_hash import patch as weak_hash_patch from ddtrace.appsec._iast.taint_sinks.weak_hash import unpatch_iast as weak_hash_unpatch @@ -66,8 +65,8 @@ class MockSpan: _start_iast_context_and_oce(span) weak_hash_patch() weak_cipher_patch() + unstrusted_serialization_patch() json_patch() - cmdi_patch() header_injection_patch() code_injection_patch() patch_common_modules() @@ -75,7 +74,6 @@ class MockSpan: yield finally: unpatch_common_modules() - cmdi_unpatch() weak_hash_unpatch() _testing_unpatch_iast() _end_iast_context_and_oce(span) diff --git a/tests/appsec/iast/fixtures/taint_sinks/command_injection.py b/tests/appsec/iast/fixtures/taint_sinks/command_injection.py new file mode 100644 index 00000000000..59dca198a7c --- /dev/null +++ b/tests/appsec/iast/fixtures/taint_sinks/command_injection.py @@ -0,0 +1,50 @@ +""" +CAVEAT: the line number is important to some IAST tests, be careful to modify this file and update the tests if you +make some changes +""" +import os +import subprocess + + +def pt_os_system(cmd, param): + # label pt_os_system + os.system(cmd + param) + + +def pt_subprocess_popen(cmd): + # label pt_subprocess_popen + subp = subprocess.Popen(args=cmd) + subp.communicate() + subp.wait() + + +def pt_subprocess_popen_shell(cmd): + # label pt_subprocess_popen_shell + subp = subprocess.Popen(cmd, shell=True) + subp.communicate() + subp.wait() + + +def pt_subprocess_run(cmd): + # label pt_subprocess_run + return subprocess.run(cmd) + + +def pt_spawnl(mode, command, *ags): + # label pt_spawnl + return os.spawnl(mode, command, *ags) + + +def pt_spawnlp(mode, command, *ags): + # label pt_spawnlp + return os.spawnlp(mode, command, *ags) + + +def pt_spawnv(mode, command, *ags): + # label pt_spawnv + return os.spawnv(mode, command, *ags) + + +def pt_spawnvp(mode, command, *ags): + # label pt_spawnvp + return os.spawnvp(mode, command, *ags) diff --git a/tests/appsec/iast/fixtures/taint_sinks/ssrf.py b/tests/appsec/iast/fixtures/taint_sinks/ssrf.py new file mode 100644 index 00000000000..b0775bfaade --- /dev/null +++ b/tests/appsec/iast/fixtures/taint_sinks/ssrf.py @@ -0,0 +1,81 @@ +""" +CAVEAT: the line number is important to some IAST tests, be careful to modify this file and update the tests if you +make some changes + +This module provides thin wrappers around various HTTP client calls to exercise SSRF sinks. +""" + + +def pt_requests_get(url): + """Trigger a requests.get call with the provided URL.""" + import requests + from requests.exceptions import ConnectionError # noqa: A004 + + try: + # label pt_requests_get + return requests.get(url) + except ConnectionError: + pass + + +def pt_urllib3_poolmanager(url): + """Trigger a urllib3.request call with the provided URL.""" + import urllib3 + from urllib3.exceptions import MaxRetryError + + try: + http_poolmanager = urllib3.PoolManager(num_pools=1) + # label pt_urllib3_poolmanager + response = http_poolmanager.request("GET", url) + http_poolmanager.clear() + return response.data + except MaxRetryError: + pass + + +def pt_urllib3_request(url): + """Trigger a urllib3.request call with the provided URL.""" + + import urllib3 + from urllib3.exceptions import MaxRetryError + + try: + # label pt_urllib3_request + return urllib3.request(method="GET", url=url) + except MaxRetryError: + pass + + +def pt_httplib_request(url): + """Trigger an http.client request using the provided URL as path.""" + + import http.client + + conn = http.client.HTTPConnection("127.0.0.1") + try: + # label pt_httplib_request + conn.request("GET", url) + return conn.getresponse() + except ConnectionError: + pass + + +def pt_webbrowser_open(url): + """Trigger a webbrowser.open call with the provided URL.""" + + import webbrowser + + # label pt_webbrowser_open + return webbrowser.open(url) + + +def pt_urllib_request(url): + """Trigger a urllib.request.urlopen call with the provided URL.""" + from urllib.error import URLError + import urllib.request + + try: + # label pt_urllib_request + return urllib.request.urlopen(url) + except URLError: + pass diff --git a/tests/appsec/iast/iast_utils.py b/tests/appsec/iast/iast_utils.py index 61e63fe3417..952fe9000a8 100644 --- a/tests/appsec/iast/iast_utils.py +++ b/tests/appsec/iast/iast_utils.py @@ -30,8 +30,7 @@ # Check if the log contains "iast::" to raise an error if that’s the case BUT, if the logs contains -# "iast::instrumentation::" or "iast::instrumentation::" -# are valid +# "iast::instrumentation::" or "iast::instrumentation::" are valid IAST_VALID_LOG = re.compile(r"^iast::(?!instrumentation::|propagation::context::|propagation::sink_point).*$") @@ -122,8 +121,10 @@ def iast_hypothesis_test(func): def _get_iast_data(): + data = {} span_report = get_iast_reporter() - data = span_report.build_and_scrub_value_parts() + if span_report: + data = span_report.build_and_scrub_value_parts() return data diff --git a/tests/appsec/iast/taint_sinks/test_command_injection.py b/tests/appsec/iast/taint_sinks/test_command_injection.py index eefebaa1e17..e5b7e55fe1b 100644 --- a/tests/appsec/iast/taint_sinks/test_command_injection.py +++ b/tests/appsec/iast/taint_sinks/test_command_injection.py @@ -10,19 +10,18 @@ from ddtrace.appsec._iast._taint_tracking import OriginType from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject from ddtrace.appsec._iast._taint_tracking._taint_objects_base import is_pyobject_tainted -from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect from ddtrace.appsec._iast.constants import VULN_CMDI from ddtrace.appsec._iast.secure_marks import cmdi_sanitizer from ddtrace.appsec._iast.taint_sinks.command_injection import _iast_report_cmdi -from ddtrace.appsec._iast.taint_sinks.command_injection import patch from tests.appsec.iast.iast_utils import _end_iast_context_and_oce from tests.appsec.iast.iast_utils import _get_iast_data +from tests.appsec.iast.iast_utils import _iast_patched_module from tests.appsec.iast.iast_utils import _start_iast_context_and_oce from tests.appsec.iast.iast_utils import get_line_and_hash from tests.appsec.iast.taint_sinks._taint_sinks_utils import NON_TEXT_TYPES_TEST_DATA -FIXTURES_PATH = "tests/appsec/iast/taint_sinks/test_command_injection.py" +FIXTURES_PATH = "tests/appsec/iast/fixtures/taint_sinks/command_injection.py" _PARAMS = ["/bin/ls", "-l"] @@ -62,127 +61,101 @@ def _assert_vulnerability(label, value_parts=None, source_name="", check_value=F def test_ossystem(iast_context_defaults): - source_name = "test_ossystem" + source_name = "pt_os_system" _BAD_DIR = taint_pyobject( pyobject=_BAD_DIR_DEFAULT, source_name=source_name, source_value=_BAD_DIR_DEFAULT, ) assert is_pyobject_tainted(_BAD_DIR) - # label test_ossystem - os.system(add_aspect("dir -l ", _BAD_DIR)) - _assert_vulnerability("test_ossystem", source_name=source_name) + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + mod.pt_os_system("dir -l ", _BAD_DIR) + _assert_vulnerability("pt_os_system", source_name=source_name) -def test_communicate(iast_context_defaults): - source_name = "test_communicate" +def test_subprocess_popen(iast_context_defaults): + source_name = "pt_subprocess_popen" _BAD_DIR = taint_pyobject( pyobject=_BAD_DIR_DEFAULT, source_name=source_name, source_value=_BAD_DIR_DEFAULT, source_origin=OriginType.PARAMETER, ) - # label test_communicate - subp = subprocess.Popen(args=["dir", "-l", _BAD_DIR]) - subp.communicate() - subp.wait() - _assert_vulnerability("test_communicate", source_name=source_name) + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + mod.pt_subprocess_popen(["dir", "-l", _BAD_DIR]) + _assert_vulnerability("pt_subprocess_popen", source_name=source_name) def test_run(iast_context_defaults): - source_name = "test_run" + source_name = "pt_subprocess_run" _BAD_DIR = taint_pyobject( pyobject=_BAD_DIR_DEFAULT, source_name=source_name, source_value=_BAD_DIR_DEFAULT, source_origin=OriginType.PARAMETER, ) - # label test_run - subprocess.run(["dir", "-l", _BAD_DIR]) - _assert_vulnerability("test_run", source_name=source_name) - - -def test_popen_wait(iast_context_defaults): - source_name = "test_popen_wait" - _BAD_DIR = taint_pyobject( - pyobject=_BAD_DIR_DEFAULT, - source_name=source_name, - source_value=_BAD_DIR_DEFAULT, - source_origin=OriginType.PARAMETER, - ) - # label test_popen_wait - subp = subprocess.Popen(args=["dir", "-l", _BAD_DIR]) - subp.wait() - - _assert_vulnerability("test_popen_wait", source_name=source_name) + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + mod.pt_subprocess_run(["dir", "-l", _BAD_DIR]) + _assert_vulnerability("pt_subprocess_run", source_name=source_name) def test_popen_wait_shell_true(iast_context_defaults): - source_name = "test_popen_wait_shell_true" + source_name = "pt_subprocess_popen_shell" _BAD_DIR = taint_pyobject( pyobject=_BAD_DIR_DEFAULT, source_name=source_name, source_value=_BAD_DIR_DEFAULT, source_origin=OriginType.PARAMETER, ) - # label test_popen_wait_shell_true - subp = subprocess.Popen(args=["dir", "-l", _BAD_DIR], shell=True) - subp.wait() + # label pt_subprocess_popen_shell + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + mod.pt_subprocess_popen_shell(["dir", "-l", _BAD_DIR]) - _assert_vulnerability("test_popen_wait_shell_true", source_name=source_name) + _assert_vulnerability("pt_subprocess_popen_shell", source_name=source_name) @pytest.mark.skipif(sys.platform not in ["linux", "darwin"], reason="Only for Unix") @pytest.mark.parametrize( "function,mode,arguments,tag", [ - (os.spawnl, os.P_WAIT, _PARAMS, "test_osspawn_variants1"), - (os.spawnl, os.P_NOWAIT, _PARAMS, "test_osspawn_variants1"), - (os.spawnlp, os.P_WAIT, _PARAMS, "test_osspawn_variants1"), - (os.spawnlp, os.P_NOWAIT, _PARAMS, "test_osspawn_variants1"), - (os.spawnv, os.P_WAIT, _PARAMS, "test_osspawn_variants2"), - (os.spawnv, os.P_NOWAIT, _PARAMS, "test_osspawn_variants2"), - (os.spawnvp, os.P_WAIT, _PARAMS, "test_osspawn_variants2"), - (os.spawnvp, os.P_NOWAIT, _PARAMS, "test_osspawn_variants2"), + ("spawnl", os.P_WAIT, _PARAMS, "test_osspawn_variants1"), + ("spawnl", os.P_NOWAIT, _PARAMS, "test_osspawn_variants1"), + ("spawnlp", os.P_WAIT, _PARAMS, "test_osspawn_variants1"), + ("spawnlp", os.P_NOWAIT, _PARAMS, "test_osspawn_variants1"), + ("spawnv", os.P_WAIT, _PARAMS, "test_osspawn_variants2"), + ("spawnv", os.P_NOWAIT, _PARAMS, "test_osspawn_variants2"), + ("spawnvp", os.P_WAIT, _PARAMS, "test_osspawn_variants2"), + ("spawnvp", os.P_NOWAIT, _PARAMS, "test_osspawn_variants2"), ], ) def test_osspawn_variants(iast_context_defaults, function, mode, arguments, tag): - source_name = "test_osspawn_variants" + func_name = "pt_" + function _BAD_DIR = taint_pyobject( pyobject=_BAD_DIR_DEFAULT, - source_name=source_name, + source_name=func_name, source_value=_BAD_DIR_DEFAULT, source_origin=OriginType.PARAMETER, ) copied_args = copy(arguments) copied_args.append(_BAD_DIR) - - if "_" in function.__name__: - # wrapt changes function names when debugging - cleaned_name = function.__name__.split("_")[-1] + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + if "spawnv" in func_name: + getattr(mod, func_name)(mode, copied_args[0], copied_args[1:]) else: - cleaned_name = function.__name__ - - if "spawnv" in cleaned_name: - # label test_osspawn_variants2 - function(mode, copied_args[0], copied_args[1:]) - label = "test_osspawn_variants2" - else: - # label test_osspawn_variants1 - function(mode, copied_args[0], *copied_args[1:]) - label = "test_osspawn_variants1" + getattr(mod, func_name)(mode, copied_args[0], *copied_args[1:]) _assert_vulnerability( - label, + func_name, value_parts=[{"value": "/bin/ls -l "}, {"source": 0, "value": _BAD_DIR}], - source_name=source_name, + source_name=func_name, check_value=True, - function="test_osspawn_variants", + function=func_name, ) @pytest.mark.skipif(sys.platform not in ["linux", "darwin"], reason="Only for Unix") def test_multiple_cmdi(iast_context_defaults): + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") _BAD_DIR = taint_pyobject( pyobject=_BAD_DIR_DEFAULT, source_name="test_run", @@ -195,8 +168,8 @@ def test_multiple_cmdi(iast_context_defaults): source_value="qwerty/", source_origin=OriginType.PARAMETER, ) - subprocess.run(["dir", "-l", _BAD_DIR]) - subprocess.run(["dir", "-l", dir_2]) + mod.pt_subprocess_run(["dir", "-l", _BAD_DIR]) + mod.pt_subprocess_run(["dir", "-l", dir_2]) data = _get_iast_data() @@ -205,16 +178,21 @@ def test_multiple_cmdi(iast_context_defaults): @pytest.mark.skipif(sys.platform not in ["linux", "darwin"], reason="Only for Unix") def test_string_cmdi(iast_context_defaults): - cmd = taint_pyobject( + source_name = "pt_subprocess_popen" + tainted_cmd = taint_pyobject( pyobject="dir -l .", - source_name="test_run", + source_name=source_name, source_value="dir -l .", source_origin=OriginType.PARAMETER, ) - subprocess.run(cmd, shell=True, check=True) + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + with pytest.raises(FileNotFoundError): + subprocess.Popen(tainted_cmd) - data = _get_iast_data() + with pytest.raises(FileNotFoundError): + mod.pt_subprocess_popen(tainted_cmd) + data = _get_iast_data() assert len(list(data["vulnerabilities"])) == 1 @@ -233,7 +211,10 @@ def test_string_cmdi_secure_mark(iast_context_defaults): # Apply the sanitizer result = cmdi_sanitizer(cmd_function, None, [cmd], {}) - subprocess.run(result, shell=True, check=True) + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + + with pytest.raises(FileNotFoundError): + mod.pt_subprocess_popen(result) # Verify the result is marked as secure span_report = get_iast_reporter() @@ -241,8 +222,9 @@ def test_string_cmdi_secure_mark(iast_context_defaults): def test_cmdi_deduplication(iast_context_deduplication_enabled): - patch() _end_iast_context_and_oce() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.command_injection") + for num_vuln_expected in [1, 0, 0]: _start_iast_context_and_oce() _BAD_DIR = "forbidden_dir/" @@ -255,7 +237,8 @@ def test_cmdi_deduplication(iast_context_deduplication_enabled): assert is_pyobject_tainted(_BAD_DIR) for _ in range(0, 5): # label test_ossystem - os.system(add_aspect("dir -l ", _BAD_DIR)) + + mod.pt_os_system("dir -l ", _BAD_DIR) span_report = get_iast_reporter() @@ -280,7 +263,7 @@ def test_cmdi_non_text_types_no_vulnerability(non_text_obj, obj_type, iast_conte ) # Call the command injection reporting function directly - _iast_report_cmdi(tainted_obj) + _iast_report_cmdi("func_name", tainted_obj) # Assert no vulnerability was reported span_report = get_iast_reporter() @@ -304,7 +287,7 @@ def test_cmdi_list_with_non_text_types_no_vulnerability(iast_context_defaults): tainted_list.append(tainted_item) # Call the command injection reporting function - _iast_report_cmdi(tainted_list) + _iast_report_cmdi("func_name", tainted_list) # Assert no vulnerability was reported span_report = get_iast_reporter() diff --git a/tests/appsec/iast/taint_sinks/test_ssrf.py b/tests/appsec/iast/taint_sinks/test_ssrf.py index 035f25b50e2..a163e4c7776 100644 --- a/tests/appsec/iast/taint_sinks/test_ssrf.py +++ b/tests/appsec/iast/taint_sinks/test_ssrf.py @@ -8,25 +8,15 @@ from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect from ddtrace.appsec._iast.constants import VULN_SSRF from ddtrace.appsec._iast.taint_sinks.ssrf import _iast_report_ssrf -from ddtrace.contrib.internal.httplib.patch import patch as httplib_patch -from ddtrace.contrib.internal.httplib.patch import unpatch as httplib_unpatch -from ddtrace.contrib.internal.requests.patch import patch as requests_patch -from ddtrace.contrib.internal.requests.patch import unpatch as requests_unpatch -from ddtrace.contrib.internal.urllib.patch import patch as urllib_patch -from ddtrace.contrib.internal.urllib.patch import unpatch as urllib_unpatch -from ddtrace.contrib.internal.urllib3.patch import patch as urllib3_patch -from ddtrace.contrib.internal.urllib3.patch import unpatch as urllib3_unpatch -from ddtrace.contrib.internal.webbrowser.patch import patch as webbrowser_patch -from ddtrace.contrib.internal.webbrowser.patch import unpatch as webbrowser_unpatch from tests.appsec.iast.iast_utils import _end_iast_context_and_oce from tests.appsec.iast.iast_utils import _get_iast_data +from tests.appsec.iast.iast_utils import _iast_patched_module from tests.appsec.iast.iast_utils import _start_iast_context_and_oce from tests.appsec.iast.iast_utils import get_line_and_hash from tests.appsec.iast.taint_sinks._taint_sinks_utils import NON_TEXT_TYPES_TEST_DATA -from tests.utils import override_global_config -FIXTURES_PATH = "tests/appsec/iast/taint_sinks/test_ssrf.py" +FIXTURES_PATH = "tests/appsec/iast/fixtures/taint_sinks/ssrf.py" def _get_tainted_url(): @@ -65,97 +55,57 @@ def _check_report(tainted_path, label): assert vulnerability["hash"] == hash_value -def test_ssrf_requests(tracer, iast_context_defaults): - with override_global_config(dict(_iast_enabled=True)): - requests_patch() - try: - import requests - from requests.exceptions import ConnectionError # noqa: A004 +def test_ssrf_requests(iast_context_defaults): + tainted_url, tainted_path = _get_tainted_url() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") - tainted_url, tainted_path = _get_tainted_url() - try: - # label test_ssrf_requests - requests.get(tainted_url) - except ConnectionError: - pass + mod.pt_requests_get(tainted_url) - _check_report(tainted_path, "test_ssrf_requests") - finally: - requests_unpatch() + _check_report(tainted_path, "pt_requests_get") -def test_ssrf_urllib3(tracer, iast_context_defaults): - with override_global_config(dict(_iast_enabled=True)): - urllib3_patch() - try: - import urllib3 +def test_ssrf_urllib3(iast_context_defaults): + tainted_url, tainted_path = _get_tainted_url() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") - tainted_url, tainted_path = _get_tainted_url() - try: - # label test_ssrf_urllib3 - urllib3.request(method="GET", url=tainted_url) - except urllib3.exceptions.HTTPError: - pass + mod.pt_urllib3_request(tainted_url) - _check_report(tainted_path, "test_ssrf_urllib3") - finally: - urllib3_unpatch() + _check_report(tainted_path, "pt_urllib3_request") -def test_ssrf_httplib(tracer, iast_context_defaults): - with override_global_config(dict(_iast_enabled=True)): - httplib_patch() - try: - import http.client +def test_ssrf_urllib3_poolmanager(iast_context_defaults): + tainted_url, tainted_path = _get_tainted_url() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") - tainted_url, tainted_path = _get_tainted_url() - try: - conn = http.client.HTTPConnection("127.0.0.1") - # label test_ssrf_httplib - conn.request("GET", tainted_url) - conn.getresponse() - except ConnectionError: - pass + mod.pt_urllib3_poolmanager(tainted_url) - _check_report(tainted_path, "test_ssrf_httplib") - finally: - httplib_unpatch() + _check_report(tainted_path, "pt_urllib3_poolmanager") -def test_ssrf_webbrowser(tracer, iast_context_defaults): - with override_global_config(dict(_iast_enabled=True)): - webbrowser_patch() - try: - import webbrowser +def test_ssrf_httplib(iast_context_defaults): + tainted_url, tainted_path = _get_tainted_url() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") - tainted_url, tainted_path = _get_tainted_url() - try: - # label test_ssrf_webbrowser - webbrowser.open(tainted_url) - except ConnectionError: - pass + mod.pt_httplib_request(tainted_url) - _check_report(tainted_path, "test_ssrf_webbrowser") - finally: - webbrowser_unpatch() + _check_report(tainted_path, "pt_httplib_request") -def test_urllib_request(tracer, iast_context_defaults): - with override_global_config(dict(_iast_enabled=True)): - urllib_patch() - try: - import urllib.request +def test_ssrf_webbrowser(iast_context_defaults): + tainted_url, tainted_path = _get_tainted_url() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + mod.pt_webbrowser_open(tainted_url) - tainted_url, tainted_path = _get_tainted_url() - try: - # label test_urllib_request - urllib.request.urlopen(tainted_url) - except urllib.error.URLError: - pass + _check_report(tainted_path, "pt_webbrowser_open") - _check_report(tainted_path, "test_urllib_request") - finally: - urllib_unpatch() + +def test_urllib_request(iast_context_defaults): + tainted_url, tainted_path = _get_tainted_url() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + + mod.pt_urllib_request(tainted_url) + + _check_report(tainted_path, "pt_urllib_request") def _check_no_report_if_deduplicated(num_vuln_expected): @@ -169,111 +119,71 @@ def _check_no_report_if_deduplicated(num_vuln_expected): def test_ssrf_requests_deduplication(iast_context_deduplication_enabled): - requests_patch() - try: - import requests - from requests.exceptions import ConnectionError # noqa: A004 - - for num_vuln_expected in [1, 0, 0]: - _start_iast_context_and_oce() - tainted_url, tainted_path = _get_tainted_url() - for _ in range(0, 5): - try: - # label test_ssrf_requests_deduplication - requests.get(tainted_url) - except ConnectionError: - pass - - _check_no_report_if_deduplicated(num_vuln_expected) - _end_iast_context_and_oce() - finally: - requests_unpatch() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + + for num_vuln_expected in [1, 0, 0]: + _start_iast_context_and_oce() + tainted_url, tainted_path = _get_tainted_url() + for _ in range(0, 5): + mod.pt_requests_get(tainted_url) + + _check_no_report_if_deduplicated(num_vuln_expected) + _end_iast_context_and_oce() def test_ssrf_urllib3_deduplication(iast_context_deduplication_enabled): - urllib3_patch() - try: - for num_vuln_expected in [1, 0, 0]: - _start_iast_context_and_oce() - import urllib3 - - tainted_url, tainted_path = _get_tainted_url() - for _ in range(0, 5): - try: - # label test_ssrf_urllib3_deduplication - urllib3.request(method="GET", url=tainted_url) - except urllib3.exceptions.HTTPError: - pass - - _check_no_report_if_deduplicated(num_vuln_expected) - _end_iast_context_and_oce() - finally: - requests_unpatch() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + for num_vuln_expected in [1, 0, 0]: + _start_iast_context_and_oce() + tainted_url, tainted_path = _get_tainted_url() + for _ in range(0, 5): + mod.pt_urllib3_request(tainted_url) + + _check_no_report_if_deduplicated(num_vuln_expected) + _end_iast_context_and_oce() def test_ssrf_httplib_deduplication(iast_context_deduplication_enabled): - httplib_patch() - try: - import http.client - - for num_vuln_expected in [1, 0, 0]: - _start_iast_context_and_oce() - tainted_url, tainted_path = _get_tainted_url() - for _ in range(0, 5): - try: - conn = http.client.HTTPConnection("127.0.0.1") - # label test_ssrf_httplib_deduplication - conn.request("GET", tainted_url) - conn.getresponse() - except ConnectionError: - pass - - _check_no_report_if_deduplicated(num_vuln_expected) - _end_iast_context_and_oce() - finally: - httplib_unpatch() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + + for num_vuln_expected in [1, 0, 0]: + _start_iast_context_and_oce() + tainted_url, tainted_path = _get_tainted_url() + for _ in range(0, 5): + mod.pt_httplib_request(tainted_url) + + _check_no_report_if_deduplicated(num_vuln_expected) + _end_iast_context_and_oce() def test_ssrf_webbrowser_deduplication(iast_context_deduplication_enabled): - webbrowser_patch() - try: - import webbrowser - - for num_vuln_expected in [1, 0, 0]: - _start_iast_context_and_oce() - tainted_url, tainted_path = _get_tainted_url() - for _ in range(0, 5): - try: - # label test_ssrf_webbrowser_deduplication - webbrowser.open(tainted_url) - except ConnectionError: - pass - - _check_no_report_if_deduplicated(num_vuln_expected) - _end_iast_context_and_oce() - finally: - webbrowser_unpatch() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + + for num_vuln_expected in [1, 0, 0]: + _start_iast_context_and_oce() + tainted_url, tainted_path = _get_tainted_url() + for _ in range(0, 5): + try: + # label test_ssrf_webbrowser_deduplication + mod.pt_webbrowser_open(tainted_url) + except ConnectionError: + pass + + _check_no_report_if_deduplicated(num_vuln_expected) + _end_iast_context_and_oce() def test_ssrf_urllib_deduplication(iast_context_deduplication_enabled): - urllib_patch() - try: - import urllib.request - - for num_vuln_expected in [1, 0, 0]: - _start_iast_context_and_oce() - tainted_url, tainted_path = _get_tainted_url() - for _ in range(0, 5): - try: - # label test_urllib_request_deduplication - urllib.request.urlopen(tainted_url) - except urllib.error.URLError: - pass - - _check_no_report_if_deduplicated(num_vuln_expected) - _end_iast_context_and_oce() - finally: - urllib_unpatch() + mod = _iast_patched_module("tests.appsec.iast.fixtures.taint_sinks.ssrf") + + for num_vuln_expected in [1, 0, 0]: + _start_iast_context_and_oce() + tainted_url, tainted_path = _get_tainted_url() + for _ in range(0, 5): + mod.pt_urllib_request(tainted_url) + + _check_no_report_if_deduplicated(num_vuln_expected) + _end_iast_context_and_oce() @pytest.mark.parametrize("non_text_obj,obj_type", NON_TEXT_TYPES_TEST_DATA) @@ -297,11 +207,11 @@ def mock_request_func(url): from ddtrace.appsec._iast.taint_sinks.ssrf import _FUNC_TO_URL_ARGUMENT original_mapping = _FUNC_TO_URL_ARGUMENT.copy() - _FUNC_TO_URL_ARGUMENT["tests.appsec.iast.taint_sinks.test_ssrf.mock_request_func"] = (0, "url") + _FUNC_TO_URL_ARGUMENT["my_testing_cool_module"] = (0, "url") try: # Call the SSRF reporting function - _iast_report_ssrf(mock_request_func, tainted_obj) + _iast_report_ssrf("my_cool_function", "my_testing_cool_module", mock_request_func, tainted_obj) finally: # Restore original mapping _FUNC_TO_URL_ARGUMENT.clear() diff --git a/tests/appsec/iast/taint_sinks/test_text_type_validation.py b/tests/appsec/iast/taint_sinks/test_text_type_validation.py index ec5f638cdc4..94bfb26e01b 100644 --- a/tests/appsec/iast/taint_sinks/test_text_type_validation.py +++ b/tests/appsec/iast/taint_sinks/test_text_type_validation.py @@ -39,7 +39,7 @@ def test_text_types_still_trigger_vulnerabilities(iast_context_defaults): ) # Test command injection - should report vulnerability - _iast_report_cmdi(tainted_obj) + _iast_report_cmdi("open", tainted_obj) span_report = get_iast_reporter() assert span_report is not None, f"No vulnerability reported for {text_type.__name__}" assert len(span_report.vulnerabilities) > 0, f"No vulnerabilities found for {text_type.__name__}" diff --git a/tests/appsec/iast/taint_sinks/test_untrusted_serialization.py b/tests/appsec/iast/taint_sinks/test_untrusted_serialization.py new file mode 100644 index 00000000000..6ee68b23a73 --- /dev/null +++ b/tests/appsec/iast/taint_sinks/test_untrusted_serialization.py @@ -0,0 +1,233 @@ +import io + +import pytest + +from ddtrace.appsec._iast._taint_tracking import OriginType +from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject +from ddtrace.appsec._iast.constants import VULN_UNTRUSTED_SERIALIZATION +from ddtrace.appsec._iast.taint_sinks import untrusted_serialization as untrusted_mod +from tests.appsec.iast.iast_utils import _get_iast_data + + +@pytest.fixture(autouse=True) +def _ensure_patch(): + """Ensure the untrusted serialization sinks are patched before each test.""" + untrusted_mod.patch() + yield + + +def _assert_one_untrusted_vuln(): + data = _get_iast_data() + vulnerabilities = data.get("vulnerabilities", []) + assert len(vulnerabilities) == 1 + vulnerability = vulnerabilities[0] + print(vulnerability) + assert vulnerability["type"] == VULN_UNTRUSTED_SERIALIZATION + + +def _assert_no_untrusted_vuln(): + data = _get_iast_data() + assert len(data.get("vulnerabilities", [])) == 0 + + +def test_untrusted_serialization_yaml_unsafe_load(iast_context_defaults): + yaml = pytest.importorskip("yaml") + + payload = "key: value" + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + yaml.unsafe_load(tainted) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_yaml_load(iast_context_defaults): + yaml = pytest.importorskip("yaml") + + payload = "key: value" + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + # yaml.load may require a Loader; calling unsafe_load in previous test already ensures coverage. + # Here we still call load directly to exercise the wrapper. + + yaml.load(tainted, Loader=getattr(yaml, "UnsafeLoader", None)) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_pickle_loads(iast_context_defaults): + import pickle + + # Example object + data = [1, 2, 3, {"x": 10, "y": 20}] + + # Serialize to bytes + payload = pickle.dumps(data) + + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + pickle.loads(tainted) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization__pickle_loads(iast_context_defaults): + _pickle = pytest.importorskip("_pickle") + + payload = b"\x80\x04N." + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + _pickle.loads(tainted) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_dill_loads(iast_context_defaults): + dill = pytest.importorskip("dill") + + payload = b"\x80\x04N." + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + dill.loads(tainted) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_pickle_load(iast_context_defaults): + import pickle + + data = {"a": 1} + payload = pickle.dumps(data) + bio = io.BytesIO(payload) + tainted_bio = bio # stream is not tainted; taint the payload argument for Unpickler path + + # pickle.load reads from file-like; to ensure sink is hit, create Unpickler with tainted buffer + # but pickle.load should be wrapped itself; call directly + pickle.load(tainted_bio) + + _assert_no_untrusted_vuln() + + +def test_untrusted_serialization_pickle_unpickler_load(iast_context_defaults): + import pickle + + data = ["x", 2] + payload = pickle.dumps(data) + bio = io.BytesIO(payload) + unpickler = pickle.Unpickler(bio) + + # The method load() is wrapped via "pickle\n_Unpickler.load" mapping in the module + unpickler.load() + + _assert_no_untrusted_vuln() + + +def test_untrusted_serialization__pickle_load(iast_context_defaults): + _pickle = pytest.importorskip("_pickle") + import pickle + + payload = pickle.dumps({"k": "v"}) + bio = io.BytesIO(payload) + + _pickle.load(bio) + + _assert_no_untrusted_vuln() + + +def test_untrusted_serialization__pickle_unpickler_load(iast_context_defaults): + _pickle = pytest.importorskip("_pickle") + import pickle + + payload = pickle.dumps(123) + bio = io.BytesIO(payload) + unpickler = _pickle.Unpickler(bio) + + unpickler.load() + + _assert_no_untrusted_vuln() + + +def test_untrusted_serialization_dill_load(iast_context_defaults): + dill = pytest.importorskip("dill") + import pickle + + payload = pickle.dumps({"z": 9}) + bio = io.BytesIO(payload) + + dill.load(bio) + + _assert_no_untrusted_vuln() + + +def test_untrusted_serialization_yaml_load_all(iast_context_defaults): + yaml = pytest.importorskip("yaml") + + payload = "---\na: 1\n---\nb: 2\n" + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + list(yaml.load_all(tainted, Loader=getattr(yaml, "UnsafeLoader", None))) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_yaml_unsafe_load_all(iast_context_defaults): + yaml = pytest.importorskip("yaml") + + payload = "---\na: 1\n---\nb: 2\n" + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + list(yaml.unsafe_load_all(tainted)) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_yaml_full_load(iast_context_defaults): + yaml = pytest.importorskip("yaml") + + payload = "a: 1" + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + yaml.full_load(tainted) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_yaml_full_load_all(iast_context_defaults): + yaml = pytest.importorskip("yaml") + + payload = "---\na: 1\n---\nb: 2\n" + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + list(yaml.full_load_all(tainted)) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_pickle__loads_internal(iast_context_defaults): + import pickle + + if not hasattr(pickle, "_loads"): + pytest.skip("pickle._loads not available") + + data = {"int": 1} + payload = pickle.dumps(data) + tainted = taint_pyobject(payload, source_name="path", source_value=payload, source_origin=OriginType.PATH) + + pickle._loads(tainted) + + _assert_one_untrusted_vuln() + + +def test_untrusted_serialization_pickle__load_internal(iast_context_defaults): + import io as _io + import pickle + + if not hasattr(pickle, "_load"): + pytest.skip("pickle._load not available") + + payload = pickle.dumps([1, 2, 3]) + bio = _io.BytesIO(payload) + + pickle._load(bio) + + _assert_no_untrusted_vuln() diff --git a/tests/appsec/iast/taint_sinks/test_untrusted_serialization_redacted.py b/tests/appsec/iast/taint_sinks/test_untrusted_serialization_redacted.py new file mode 100644 index 00000000000..296f167f84f --- /dev/null +++ b/tests/appsec/iast/taint_sinks/test_untrusted_serialization_redacted.py @@ -0,0 +1,44 @@ +import pytest + +from ddtrace.appsec._iast._taint_tracking import origin_to_str +from ddtrace.appsec._iast._taint_tracking import str_to_origin +from ddtrace.appsec._iast.constants import VULN_UNTRUSTED_SERIALIZATION +from ddtrace.appsec._iast.taint_sinks.untrusted_serialization import UntrustedSerialization +from tests.appsec.iast.iast_utils import _get_iast_data +from tests.appsec.iast.taint_sinks._taint_sinks_utils import _taint_pyobject_multiranges +from tests.appsec.iast.taint_sinks._taint_sinks_utils import get_parametrize + + +@pytest.mark.parametrize( + "evidence_input, sources_expected, vulnerabilities_expected,element", + list(get_parametrize(VULN_UNTRUSTED_SERIALIZATION)), +) +def test_unstrusted_serialization_redaction_suite( + evidence_input, sources_expected, vulnerabilities_expected, iast_context_defaults, element +): + tainted_object = evidence_input_value = evidence_input.get("value", "") + if evidence_input_value: + tainted_object = _taint_pyobject_multiranges( + evidence_input_value, + [ + ( + input_ranges["iinfo"]["parameterName"], + input_ranges["iinfo"]["parameterValue"], + str_to_origin(input_ranges["iinfo"]["type"]), + input_ranges["start"], + input_ranges["end"] - input_ranges["start"], + ) + for input_ranges in evidence_input.get("ranges", {}) + ], + ) + + UntrustedSerialization.report(tainted_object) + + data = _get_iast_data() + vulnerability = list(data["vulnerabilities"])[0] + source = list(data["sources"])[0] + source["origin"] = origin_to_str(source["origin"]) + + assert vulnerability["type"] == VULN_UNTRUSTED_SERIALIZATION + assert vulnerability["evidence"] == vulnerabilities_expected["evidence"] + assert source == sources_expected diff --git a/tests/appsec/iast/taint_tracking/test_native_taint_range.py b/tests/appsec/iast/taint_tracking/test_native_taint_range.py index 12cf3656e35..4af8d439f4f 100644 --- a/tests/appsec/iast/taint_tracking/test_native_taint_range.py +++ b/tests/appsec/iast/taint_tracking/test_native_taint_range.py @@ -591,7 +591,9 @@ def test_contexts_in_threads(caplog, telemetry_writer): if IAST_VALID_LOG.search(message): pytest.fail(message) - list_metrics_logs = list(telemetry_writer._logs) + list_metrics_logs = [ + log for log in telemetry_writer._logs if not log["message"].startswith("failed to send, dropping") + ] assert len(list_metrics_logs) == 0 @@ -621,7 +623,9 @@ async def test_context_race_conditions_async(caplog, telemetry_writer): assert results.count(True) == 7 log_messages = [record.message for record in caplog.get_records("call")] assert len([message for message in log_messages if IAST_VALID_LOG.search(message)]) == 0 - list_metrics_logs = list(telemetry_writer._logs) + list_metrics_logs = [ + log for log in telemetry_writer._logs if not log["message"].startswith("failed to send, dropping") + ] assert len(list_metrics_logs) == 0 diff --git a/tests/appsec/iast/test_telemetry.py b/tests/appsec/iast/test_telemetry.py index 76ff7b84f30..f36ee063e5d 100644 --- a/tests/appsec/iast/test_telemetry.py +++ b/tests/appsec/iast/test_telemetry.py @@ -13,13 +13,18 @@ from ddtrace.appsec._iast._taint_tracking import OriginType from ddtrace.appsec._iast._taint_tracking import origin_to_str from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject -from ddtrace.appsec._iast.constants import VULN_CMDI from ddtrace.appsec._iast.constants import VULN_CODE_INJECTION from ddtrace.appsec._iast.constants import VULN_HEADER_INJECTION +from ddtrace.appsec._iast.constants import VULN_INSECURE_HASHING_TYPE +from ddtrace.appsec._iast.constants import VULN_UNTRUSTED_SERIALIZATION +from ddtrace.appsec._iast.constants import VULN_UNVALIDATED_REDIRECT +from ddtrace.appsec._iast.constants import VULN_XSS from ddtrace.appsec._iast.taint_sinks.code_injection import patch as code_injection_patch -from ddtrace.appsec._iast.taint_sinks.command_injection import patch as cmdi_patch from ddtrace.appsec._iast.taint_sinks.header_injection import patch as header_injection_patch +from ddtrace.appsec._iast.taint_sinks.untrusted_serialization import patch as untrusted_serialization_patch +from ddtrace.appsec._iast.taint_sinks.unvalidated_redirect import patch as unvalidated_redirect_patch from ddtrace.appsec._iast.taint_sinks.weak_hash import patch as weak_hash_patch +from ddtrace.appsec._iast.taint_sinks.xss import patch as xss_patch from ddtrace.ext import SpanTypes from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.internal.telemetry.constants import TELEMETRY_TYPE_GENERATE_METRICS @@ -35,7 +40,7 @@ def _assert_instrumented_sink(telemetry_writer, vuln_type): assert len(generate_metrics) == 1, "Expected 1 generate_metrics" assert [metric["metric"] for metric in generate_metrics] == ["instrumented.sink"] assert [metric["tags"] for metric in generate_metrics] == [[f"vulnerability_type:{vuln_type.lower()}"]] - assert [metric["points"][0][1] for metric in generate_metrics] == [1] + assert [metric["points"][0][1] for metric in generate_metrics][0] >= 1 assert [metric["type"] for metric in generate_metrics] == ["count"] @@ -108,33 +113,25 @@ def test_metric_executed_sink( assert span.get_metric(IAST_SPAN_TAGS.TELEMETRY_REQUEST_TAINTED) is None -def test_metric_instrumented_cmdi(no_request_sampling, telemetry_writer): - with override_global_config( - dict(_iast_enabled=True, _iast_is_testing=True, _iast_telemetry_report_lvl=TELEMETRY_INFORMATION_NAME) - ): - cmdi_patch() - - _assert_instrumented_sink(telemetry_writer, VULN_CMDI) - - -def test_metric_instrumented_header_injection(no_request_sampling, telemetry_writer): - # We need to unpatch first because ddtrace.appsec._iast._patch_modules loads at runtime this patch function - with override_global_config( - dict(_iast_enabled=True, _iast_is_testing=True, _iast_telemetry_report_lvl=TELEMETRY_INFORMATION_NAME) - ): - header_injection_patch() - - _assert_instrumented_sink(telemetry_writer, VULN_HEADER_INJECTION) - - -def test_metric_instrumented_code_injection(no_request_sampling, telemetry_writer): +@pytest.mark.parametrize( + "patch_func, vuln", + [ + (header_injection_patch, VULN_HEADER_INJECTION), + (code_injection_patch, VULN_CODE_INJECTION), + (untrusted_serialization_patch, VULN_UNTRUSTED_SERIALIZATION), + (unvalidated_redirect_patch, VULN_UNVALIDATED_REDIRECT), + (xss_patch, VULN_XSS), + (weak_hash_patch, VULN_INSECURE_HASHING_TYPE), + ], +) +def test_metric_instrumented_vulnerability(no_request_sampling, telemetry_writer, patch_func, vuln): # We need to unpatch first because ddtrace.appsec._iast._patch_modules loads at runtime this patch function with override_global_config( dict(_iast_enabled=True, _iast_is_testing=True, _iast_telemetry_report_lvl=TELEMETRY_INFORMATION_NAME) ): - code_injection_patch() + patch_func() - _assert_instrumented_sink(telemetry_writer, VULN_CODE_INJECTION) + _assert_instrumented_sink(telemetry_writer, vuln) def test_metric_instrumented_propagation(no_request_sampling, telemetry_writer): @@ -187,7 +184,7 @@ def test_log_metric(telemetry_writer): list_metrics_logs = list(telemetry_writer._logs) assert len(list_metrics_logs) == 1 assert list_metrics_logs[0]["message"] == "test_format_key_error_and_no_log_metric raises" - assert str(list_metrics_logs[0]["stack_trace"]).startswith(' File "/') + assert "stack_trace" not in list_metrics_logs[0].keys() def test_log_metric_debug_disabled(telemetry_writer): @@ -206,7 +203,7 @@ def test_log_metric_debug_deduplication(telemetry_writer): list_metrics_logs = list(telemetry_writer._logs) assert len(list_metrics_logs) == 1 assert list_metrics_logs[0]["message"] == "test_log_metric_debug_deduplication raises 2" - assert "stack_trace" in list_metrics_logs[0].keys() + assert "stack_trace" not in list_metrics_logs[0].keys() def test_log_metric_debug_disabled_deduplication(telemetry_writer): @@ -228,7 +225,7 @@ def test_log_metric_debug_deduplication_different_messages(telemetry_writer): assert list_metrics_logs[0]["message"].startswith( "test_log_metric_debug_deduplication_different_messages raises" ) - assert "stack_trace" in list_metrics_logs[0].keys() + assert "stack_trace" not in list_metrics_logs[0].keys() def test_log_metric_debug_disabled_deduplication_different_messages(telemetry_writer): diff --git a/tests/appsec/iast_tdd_propagation/flask_orm_app.py b/tests/appsec/iast_tdd_propagation/flask_orm_app.py index f68b220aa9c..2d5ba89330d 100644 --- a/tests/appsec/iast_tdd_propagation/flask_orm_app.py +++ b/tests/appsec/iast_tdd_propagation/flask_orm_app.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""This Flask application is imported on tests.appsec.appsec_utils.gunicorn_server""" +"""This Flask application is imported on tests.appsec.appsec_utils.gunicorn_flask_server""" import importlib import os diff --git a/tests/appsec/integrations/django_tests/django_app/urls.py b/tests/appsec/integrations/django_tests/django_app/urls.py index 4b0d1948f30..8dc672cd620 100644 --- a/tests/appsec/integrations/django_tests/django_app/urls.py +++ b/tests/appsec/integrations/django_tests/django_app/urls.py @@ -23,8 +23,10 @@ def shutdown(request): handler(r"^$", views.index), handler(r"^shutdown$", shutdown), handler(r"^iast-enabled/$", views.iast_enabled), + handler(r"^vulnerablerequestdownstream/$", views.vulnerable_request_downstream), # This must precede composed-view. handler("appsec/response-header/$", views.magic_header_key, name="response-header"), + handler("appsec/returnheaders/$", views.return_headers, name="return_headers"), handler("appsec/body/$", views.body_view, name="body_view"), handler("appsec/view_with_exception/$", views.view_with_exception, name="view_with_exception"), handler("appsec/weak-hash/$", views.weak_hash_view, name="weak_hash"), @@ -134,6 +136,21 @@ def shutdown(request): handler("appsec/propagation/ospathjoin/$", views.ospathjoin_propagation), handler("appsec/iast_sampling/$", views.iast_sampling), handler("appsec/iast_sampling_2/$", views.iast_sampling_2), + handler("iast/untrusted/yaml/$", views.untrusted_serialization_yaml_view, name="untrusted_serialization_yaml"), + handler( + "iast/untrusted/yaml/load/$", + views.untrusted_serialization_yaml_load_view, + name="untrusted_serialization_yaml_load", + ), + handler( + "iast/untrusted/yaml/safe_load/$", + views.untrusted_serialization_yaml_safe_load_view, + name="untrusted_serialization_yaml_safe_load", + ), + handler( + "iast/untrusted/pickle/$", views.untrusted_serialization_pickle_view, name="untrusted_serialization_pickle" + ), + handler("iast/untrusted/dill/$", views.untrusted_serialization_dill_view, name="untrusted_serialization_dill"), path( "appsec/iast_sampling_by_route_method//", views.iast_sampling_by_route_method, diff --git a/tests/appsec/integrations/django_tests/django_app/views.py b/tests/appsec/integrations/django_tests/django_app/views.py index 3e1e8f94064..f5fc6f2e032 100644 --- a/tests/appsec/integrations/django_tests/django_app/views.py +++ b/tests/appsec/integrations/django_tests/django_app/views.py @@ -8,6 +8,7 @@ import os from pathlib import Path from pathlib import PosixPath +import pickle import shlex import subprocess import time @@ -25,6 +26,8 @@ from django.views.decorators.csrf import csrf_exempt import requests from requests.exceptions import ConnectionError # noqa: A004 +import urllib3 +import yaml from ddtrace.appsec import _asm_request_context from ddtrace.appsec._iast._iast_request_context_base import is_iast_request_enabled @@ -107,6 +110,25 @@ def weak_hash_view(request): return HttpResponse("OK", status=200) +def untrusted_serialization_yaml_load_view(request): + """Endpoint to exercise UNTRUSTED_SERIALIZATION via yaml.load. + + Uses UnsafeLoader when available to match unsafe execution behavior. + """ + user_input = request.GET.get("input", "") + # label untrusted_serialization_yaml_load + yaml.load(user_input, Loader=getattr(yaml, "UnsafeLoader", None)) + return HttpResponse("OK", status=200) + + +def untrusted_serialization_yaml_safe_load_view(request): + """Endpoint using yaml.safe_load; should not report untrusted serialization.""" + user_input = request.GET.get("input", "") + # label untrusted_serialization_yaml_safe_load + yaml.safe_load(user_input) + return HttpResponse("OK", status=200) + + def block_callable_view(request): _asm_request_context.block_request() return HttpResponse("OK", status=200) @@ -399,7 +421,94 @@ def command_injection_subprocess(request): # label iast_command_injection_subprocess subp = subprocess.Popen(args=[cmd, "-la", filename], shell=True) subp.communicate() - subp.wait() + return HttpResponse("OK", status=200) + + +def return_headers(request): + """Return all incoming request headers as JSON. + + Uses request.headers where available (Django >= 2.2), otherwise falls back to META. + """ + headers = {} + if hasattr(request, "headers"): + for key, value in request.headers.items(): + headers[key] = value + else: + # Django < 2.2 compatibility: reconstruct headers from META + for key, value in request.META.items(): + if key.startswith("HTTP_"): + name = key[5:].replace("_", "-").title() + headers[name] = value + elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"): + name = key.replace("_", "-").title() + headers[name] = value + return JsonResponse(headers) + + +def vulnerable_request_downstream(request): + """Trigger a weak-hash vulnerability, then call downstream return-headers endpoint. + + Mirrors Flask's /vulnerablerequestdownstream behavior to validate header propagation + and IAST instrumentation under Django. + """ + # Trigger weak hash for IAST + m = hashlib.md5() + m.update(b"Nobody inspects") + m.update(b" the spammish repetition") + _ = m.digest() + + port = request.GET.get("port", "8050") + http_poolmanager = urllib3.PoolManager(num_pools=1) + # Sending a GET request and getting back response as HTTPResponse object. + response = http_poolmanager.request("GET", f"http://localhost:{port}/appsec/returnheaders") + http_poolmanager.clear() + + return HttpResponse(response.data, status=200, content_type="application/json") + + +def untrusted_serialization_yaml_view(request): + """Endpoint to exercise UNTRUSTED_SERIALIZATION via YAML loaders. + + Uses a tainted query parameter and yaml.unsafe_load to trigger the sink. + """ + user_input = request.GET.get("input", "") + # label untrusted_serialization_yaml_view + yaml.unsafe_load(user_input) + return HttpResponse("OK", status=200) + + +def untrusted_serialization_pickle_view(request): + """Endpoint to exercise pickle.loads with user input. + + Note: We convert the string to bytes. Current IAST may not propagate taint + through encode, so Django integration test is a smoke test (no vuln expected). + """ + user_input = request.GET.get("input", "") + data = user_input.encode("utf-8", "ignore") + try: + # label untrusted_serialization_pickle + pickle.loads(data) + except Exception: + pass + return HttpResponse("OK", status=200) + + +def untrusted_serialization_dill_view(request): + """Endpoint to exercise dill.loads with user input. + + Dill is optional; if not installed, we handle gracefully. As with pickle, + encode may drop taint, so treat as smoke test in integration. + """ + import dill # type: ignore + + user_input = request.GET.get("input", "") + data = user_input.encode("utf-8", "ignore") + + try: + # label untrusted_serialization_dill + dill.loads(data) + except Exception: + pass return HttpResponse("OK", status=200) diff --git a/tests/appsec/integrations/django_tests/django_app/wsgi.py b/tests/appsec/integrations/django_tests/django_app/wsgi.py new file mode 100644 index 00000000000..a838bc7cfcc --- /dev/null +++ b/tests/appsec/integrations/django_tests/django_app/wsgi.py @@ -0,0 +1,14 @@ +"""WSGI entry point for the Django test application. + +This enables running the Django test app under Gunicorn using the +``tests.appsec.integrations.django_tests.django_app.wsgi:application`` target. +""" +import ddtrace.auto # noqa: F401 # isort: skip +import os + +from django.core.wsgi import get_wsgi_application + + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.appsec.integrations.django_tests.django_app.settings") + +application = get_wsgi_application() diff --git a/tests/appsec/integrations/django_tests/test_iast_django.py b/tests/appsec/integrations/django_tests/test_iast_django.py index d877272cf17..39580863142 100644 --- a/tests/appsec/integrations/django_tests/test_iast_django.py +++ b/tests/appsec/integrations/django_tests/test_iast_django.py @@ -268,6 +268,131 @@ def test_django_sqli_http_request_parameter(client, iast_span, tracer): assert loaded["vulnerabilities"][0]["hash"] == hash_value +@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") +def test_django_untrusted_serialization_yaml(client, iast_span, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + iast_span, + tracer, + url="/iast/untrusted/yaml/?input=a: 1", + ) + + vuln_type = "UNTRUSTED_SERIALIZATION" + + assert response.status_code == 200 + + loaded = load_iast_report(root_span) + + line, hash_value = get_line_and_hash("untrusted_serialization_yaml_view", vuln_type, filename=TEST_FILE) + + assert loaded["sources"] == [ + { + "name": "input", + "origin": "http.request.parameter", + "value": "a: 1", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + +@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") +def test_django_untrusted_serialization_yaml_load(client, iast_span, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + iast_span, + tracer, + url="/iast/untrusted/yaml/load/?input=a: 1", + ) + + vuln_type = "UNTRUSTED_SERIALIZATION" + + assert response.status_code == 200 + + loaded = load_iast_report(root_span) + + line, hash_value = get_line_and_hash("untrusted_serialization_yaml_load", vuln_type, filename=TEST_FILE) + + assert loaded["sources"] == [ + { + "name": "input", + "origin": "http.request.parameter", + "value": "a: 1", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + +@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") +def test_django_untrusted_serialization_yaml_safe_load(client, iast_span, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + iast_span, + tracer, + url="/iast/untrusted/yaml/safe_load/?input=a: 1", + ) + + assert response.status_code == 200 + + loaded = load_iast_report(root_span) + # safe_load should not be reported as untrusted serialization + assert loaded is None + + +@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") +def test_django_untrusted_serialization_pickle_smoke(client, iast_span, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + iast_span, + tracer, + url="/iast/untrusted/pickle/?input=AQID", + ) + + vuln_type = "UNTRUSTED_SERIALIZATION" + + assert response.status_code == 200 + loaded = load_iast_report(root_span) + + line, hash_value = get_line_and_hash("untrusted_serialization_pickle", vuln_type, filename=TEST_FILE) + assert loaded["sources"] == [] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + +@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") +def test_django_untrusted_serialization_dill_smoke(client, iast_span, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + iast_span, + tracer, + url="/iast/untrusted/dill/?input=AQID", + ) + + vuln_type = "UNTRUSTED_SERIALIZATION" + + assert response.status_code == 200 + + loaded = load_iast_report(root_span) + + line, hash_value = get_line_and_hash("untrusted_serialization_dill", vuln_type, filename=TEST_FILE) + assert loaded["sources"] == [] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_request_parameter_name_get_and_stacktrace(client, iast_span, tracer): diff --git a/tests/appsec/integrations/django_tests/test_iast_django_testagent.py b/tests/appsec/integrations/django_tests/test_iast_django_testagent.py index 1acc85dc0f4..0ca20f9b5b3 100644 --- a/tests/appsec/integrations/django_tests/test_iast_django_testagent.py +++ b/tests/appsec/integrations/django_tests/test_iast_django_testagent.py @@ -1,9 +1,15 @@ import concurrent.futures +import pytest + from ddtrace.appsec._iast.constants import VULN_CMDI from ddtrace.appsec._iast.constants import VULN_HEADER_INJECTION +from ddtrace.appsec._iast.constants import VULN_INSECURE_HASHING_TYPE +from ddtrace.appsec._iast.constants import VULN_SSRF +from ddtrace.appsec._iast.constants import VULN_UNTRUSTED_SERIALIZATION from ddtrace.internal.logger import get_logger from tests.appsec.appsec_utils import django_server +from tests.appsec.appsec_utils import gunicorn_django_server from tests.appsec.iast.iast_utils import load_iast_report from tests.appsec.integrations.utils_testagent import _get_span from tests.appsec.integrations.utils_testagent import clear_session @@ -60,6 +66,209 @@ def test_iast_cmdi(): assert vulnerability["hash"] +def test_iast_untrusted_serialization_yaml(): + token = "test_iast_untrusted_serialization_yaml" + _ = start_trace(token) + with django_server( + iast_enabled="true", + token=token, + env={ + "DD_TRACE_DEBUG": "true", + "_DD_IAST_DEBUG": "true", + "_DD_IAST_PATCH_MODULES": ( + "benchmarks.," "tests.appsec.," "tests.appsec.integrations.django_tests.django_app.views." + ), + }, + ) as context: + _, django_client, pid = context + + response = django_client.get("/iast/untrusted/yaml/?input=a: 1") + + assert response.status_code == 200 + + response_tracer = _get_span(token) + spans_with_iast = [] + vulnerabilities = [] + for trace in response_tracer: + for span in trace: + if span.get("metrics", {}).get("_dd.iast.enabled") == 1.0: + spans_with_iast.append(span) + iast_data = load_iast_report(span) + if iast_data: + vulnerabilities.append(iast_data.get("vulnerabilities")) + clear_session(token) + + assert len(spans_with_iast) == 2, f"Invalid number of spans ({len(spans_with_iast)}):\n{spans_with_iast}" + assert len(vulnerabilities) == 1, f"Invalid number of vulnerabilities ({len(vulnerabilities)}):\n{vulnerabilities}" + assert len(vulnerabilities[0]) == 1 + + vulnerability = vulnerabilities[0][0] + assert vulnerability["type"] == VULN_UNTRUSTED_SERIALIZATION + assert vulnerability["location"]["spanId"] + assert vulnerability["location"]["stackId"] + assert vulnerability["hash"] + + +@pytest.mark.parametrize( + "server, config", + ( + ( + gunicorn_django_server, + { + "workers": "3", + "use_threads": False, + "use_gevent": False, + "env": { + "DD_APM_TRACING_ENABLED": "false", + }, + }, + ), + ( + gunicorn_django_server, + { + "workers": "3", + "use_threads": True, + "use_gevent": False, + "env": { + "DD_APM_TRACING_ENABLED": "false", + }, + }, + ), + ( + gunicorn_django_server, + { + "workers": "3", + "use_threads": True, + "use_gevent": True, + "env": { + "DD_APM_TRACING_ENABLED": "false", + }, + }, + ), + ( + gunicorn_django_server, + { + "workers": "1", + "use_threads": True, + "use_gevent": True, + "env": { + "DD_APM_TRACING_ENABLED": "false", + "_DD_IAST_PROPAGATION_ENABLED": "false", + }, + }, + ), + ( + gunicorn_django_server, + { + "workers": "1", + "use_threads": True, + "use_gevent": True, + "env": { + "DD_APM_TRACING_ENABLED": "false", + }, + }, + ), + ( + gunicorn_django_server, + { + "workers": "1", + "use_threads": True, + "use_gevent": True, + "env": {"_DD_IAST_PROPAGATION_ENABLED": "false"}, + }, + ), + (django_server, {"env": {"DD_APM_TRACING_ENABLED": "false"}}), + ), +) +def test_iast_vulnerable_request_downstream_django(server, config): + """Mirror Flask downstream propagation test for Django server. + + Sends a request with Datadog headers to the Django endpoint which triggers a weak-hash + vulnerability and then calls a downstream endpoint to echo headers. Asserts that headers are + properly propagated and that an IAST WEAK_HASH vulnerability is reported. + """ + token = "test_iast_vulnerable_request_downstream_django" + _ = start_trace(token) + env = { + "DD_APM_TRACING_ENABLED": "false", + "DD_TRACE_URLLIB3_ENABLED": "true", + } + # Merge base env with parametrized env overrides + cfg_env = dict(config.get("env", {})) + cfg_env.update(env) + config = dict(config) + config["env"] = cfg_env + # TODO(APPSEC-59081): without use_ddtrace_cmd=False it raises a `NameError: name 'sys' is not defined` + # File "/proj/dd-trace-py/ddtrace/internal/module.py", line 301, in _exec_module + # self.loader.exec_module(module) + # File "", line 883, in exec_module + # File "", line 241, in _call_with_frames_removed + # File "/proj/dd-trace-py/tests/appsec/integrations/django_tests/django_app/wsgi.py", line 13, in + # application = get_wsgi_application() + # File "/proj/venv310/site-packages/django/core/wsgi.py", line 12, in get_wsgi_application + # django.setup(set_prefix=False) + # File "/proj/venv310/site-packages/django/__init__.py", line 24, in setup + # apps.populate(settings.INSTALLED_APPS) + # File "/proj/dd-trace-py/ddtrace/contrib/internal/trace_utils.py", line 315, in wrapper + # return func(mod, pin, wrapped, instance, args, kwargs) + # File "/proj/dd-trace-py/ddtrace/contrib/internal/django/patch.py", line 124, in traced_populate + # ret = func(*args, **kwargs) + # File "/proj/venv310/site-packages/django/apps/registry.py", line 91, in populate + # app_config = AppConfig.create(entry) + # File "/proj/venv310/site-packages/django/apps/config.py", line 121, in create + # if module_has_submodule(app_module, APPS_MODULE_NAME): + # File "/proj/venv310/site-packages/django/utils/module_loading.py", line 85, in module_has_submodule + # return importlib_find(full_module_name, package_path) is not None + # File "/python310importlib/util.py", line 103, in find_spec + # return _find_spec(fullname, parent_path) + # File "/python310importlib/_bootstrap.py", line 923, in _find_spec + # meta_path = sys.meta_path + # NameError: name 'sys' is not defined + with server(use_ddtrace_cmd=False, iast_enabled="true", token=token, port=8050, **config) as context: + _, django_client, pid = context + + trace_id = 1212121212121212121 + parent_id = 34343434 + response = django_client.get( + "/vulnerablerequestdownstream/?port=8050", + headers={ + "x-datadog-trace-id": str(trace_id), + "x-datadog-parent-id": str(parent_id), + "x-datadog-sampling-priority": "-1", + "x-datadog-origin": "rum", + "x-datadog-tags": "_dd.p.other=1", + }, + ) + + assert response.status_code == 200 + downstream_headers = response.json() + assert downstream_headers["X-Datadog-Origin"] == "rum" + assert downstream_headers["X-Datadog-Parent-Id"] != str(parent_id) + assert downstream_headers["X-Datadog-Sampling-Priority"] == "2" + + response_tracer = _get_span(token) + spans = [] + spans_with_iast = [] + vulnerabilities = [] + for trace in response_tracer: + for span in trace: + if span.get("metrics", {}).get("_dd.iast.enabled") == 1.0: + spans_with_iast.append(span) + iast_data = load_iast_report(span) + if iast_data: + vulnerabilities.append(iast_data.get("vulnerabilities")) + spans.append(span) + clear_session(token) + + assert len(spans) >= 8, f"Incorrect number of spans ({len(spans)}):\n{spans}" + assert len(spans_with_iast) >= 2, f"Invalid number of spans with IAST ({len(spans_with_iast)}):\n{spans_with_iast}" + assert len(vulnerabilities) >= 1, f"Invalid number of vulnerabilities ({len(vulnerabilities)}):\n{vulnerabilities}" + assert len(vulnerabilities[0]) >= 1 + for vulnerability in vulnerabilities[0]: + assert vulnerability["type"] in {VULN_INSECURE_HASHING_TYPE, VULN_SSRF} + assert vulnerability["hash"] + + def test_iast_concurrent_requests_limit_django(): """Ensure only DD_IAST_MAX_CONCURRENT_REQUESTS requests have IAST enabled concurrently in Django app. diff --git a/tests/appsec/integrations/fastapi_tests/app.py b/tests/appsec/integrations/fastapi_tests/app.py index e9aa4d17c63..945617509ee 100644 --- a/tests/appsec/integrations/fastapi_tests/app.py +++ b/tests/appsec/integrations/fastapi_tests/app.py @@ -1,9 +1,13 @@ import asyncio +import hashlib import subprocess from fastapi import FastAPI from fastapi import Form +from fastapi import Request +from fastapi.responses import JSONResponse from fastapi.responses import Response +import urllib3 import uvicorn from ddtrace import tracer @@ -77,6 +81,29 @@ async def cmdi_form(command: str = Form(...)): subp.wait() return Response(content="OK") + @app.get("/returnheaders") + def return_headers(request: Request): + headers = {} + for key, value in request.headers.items(): + headers[key] = value + return JSONResponse(headers) + + @app.get("/vulnerablerequestdownstream") + def vulnerable_request_downstream(port: int = 8050): + """Trigger a weak-hash vulnerability, then call downstream return-headers endpoint. + + Mirrors Flask/Django behavior to validate header propagation and IAST instrumentation. + """ + # Trigger weak hash for IAST + m = hashlib.md5() + m.update(b"Nobody inspects") + m.update(b" the spammish repetition") + _ = m.digest() + http_ = urllib3.PoolManager() + # Sending a GET request and getting back response as HTTPResponse object. + response = http_.request("GET", f"http://0.0.0.0:{port}/returnheaders") + return response.data + return app diff --git a/tests/appsec/integrations/fastapi_tests/test_iast_fastapi_testagent.py b/tests/appsec/integrations/fastapi_tests/test_iast_fastapi_testagent.py index 41e48fb5174..1e8ec03f183 100644 --- a/tests/appsec/integrations/fastapi_tests/test_iast_fastapi_testagent.py +++ b/tests/appsec/integrations/fastapi_tests/test_iast_fastapi_testagent.py @@ -1,9 +1,12 @@ import concurrent.futures +import json import pytest from requests.exceptions import ConnectionError # noqa: A004 from ddtrace.appsec._iast.constants import VULN_CMDI +from ddtrace.appsec._iast.constants import VULN_INSECURE_HASHING_TYPE +from ddtrace.appsec._iast.constants import VULN_SSRF from tests.appsec.appsec_utils import uvicorn_server from tests.appsec.iast.iast_utils import load_iast_report from tests.appsec.integrations.utils_testagent import _get_span @@ -203,3 +206,66 @@ def worker(): false_count = results.count(False) assert true_count == max_concurrent assert false_count == rejected_requests + + +def test_iast_vulnerable_request_downstream_fastapi(): + """Mirror downstream propagation test for FastAPI server. + + Sends a request with Datadog headers to the FastAPI endpoint which triggers a weak-hash + vulnerability and then calls a downstream endpoint to echo headers. Asserts that headers are + properly propagated and that an IAST WEAK_HASH vulnerability is reported. + """ + token = "test_iast_vulnerable_request_downstream_fastapi" + _ = start_trace(token) + env = { + "DD_APM_TRACING_ENABLED": "false", + "DD_TRACE_URLLIB3_ENABLED": "true", + } + with uvicorn_server(iast_enabled="true", token=token, env=env, port=8050) as context: + _, fastapi_client, pid = context + + trace_id = 1212121212121212121 + parent_id = 34343434 + response = fastapi_client.get( + "/vulnerablerequestdownstream", + params={"port": 8050}, + headers={ + "x-datadog-trace-id": str(trace_id), + "x-datadog-parent-id": str(parent_id), + "x-datadog-sampling-priority": "-1", + "x-datadog-origin": "rum", + "x-datadog-tags": "_dd.p.other=1", + }, + ) + + assert response.status_code == 200 + downstream_headers = json.loads(response.json()) + print(type(downstream_headers)) + print(downstream_headers) + assert downstream_headers["x-datadog-origin"] == "rum" + assert downstream_headers["x-datadog-parent-id"] != "34343434" + assert "_dd.p.other=1" in downstream_headers["x-datadog-tags"] + assert downstream_headers["x-datadog-sampling-priority"] == "2" + assert downstream_headers["x-datadog-trace-id"] == "1212121212121212121" + + response_tracer = _get_span(token) + spans = [] + spans_with_iast = [] + vulnerabilities = [] + for trace in response_tracer: + for span in trace: + if span.get("metrics", {}).get("_dd.iast.enabled") == 1.0: + spans_with_iast.append(span) + iast_data = load_iast_report(span) + if iast_data: + vulnerabilities.append(iast_data.get("vulnerabilities")) + spans.append(span) + clear_session(token) + + assert len(spans) >= 6, f"Incorrect number of spans ({len(spans)}):\n{spans}" + assert len(spans_with_iast) >= 2, f"Invalid number of spans with IAST ({len(spans_with_iast)}):\n{spans_with_iast}" + assert len(vulnerabilities) >= 1, f"Invalid number of vulnerabilities ({len(vulnerabilities)}):\n{vulnerabilities}" + assert len(vulnerabilities[0]) >= 1 + for vulnerability in vulnerabilities[0]: + assert vulnerability["type"] in {VULN_INSECURE_HASHING_TYPE, VULN_SSRF} + assert vulnerability["hash"] diff --git a/tests/appsec/integrations/fixtures/patch_gevent.py b/tests/appsec/integrations/fixtures/patch_gevent.py new file mode 100644 index 00000000000..061bbd5e67f --- /dev/null +++ b/tests/appsec/integrations/fixtures/patch_gevent.py @@ -0,0 +1,34 @@ +import types + + +code = """ +# Gevent monkey-patching scenario that previously interacted badly with shutdown flows. +# We only import and patch, then exercise urllib3 PoolManager open/clear lifecycle. +from gevent import monkey as _monkey +_monkey.patch_all() + +import urllib3 + + +def gevent_urllib3_poolmanager() -> str: + pm = urllib3.PoolManager(num_pools=1) + try: + pm.connection_from_host("localhost", port=80, scheme="http") + ok = True + return f"OK:{ok}" + finally: + pm.clear() +""" + + +def gevent_urllib3_poolmanager(): + """Monkey-patch gevent and use urllib3 PoolManager to simulate gevent+urllib3 usage. + + Returns an OK marker that tests can assert on, and helps ensure no tainting occurs. + """ + module_name = "test_" + "gevent_urllib3" + compiled_code = compile(code, "tests/appsec/integrations/packages_tests/", mode="exec") + module_changed = types.ModuleType(module_name) + exec(compiled_code, module_changed.__dict__) + result = eval("gevent_urllib3_poolmanager()", module_changed.__dict__) + return result diff --git a/tests/appsec/integrations/fixtures/patch_socket.py b/tests/appsec/integrations/fixtures/patch_socket.py new file mode 100644 index 00000000000..aadb1b27c3f --- /dev/null +++ b/tests/appsec/integrations/fixtures/patch_socket.py @@ -0,0 +1,31 @@ +import types + + +code = """ +import socket + +def do_socketpair_roundtrip() -> str: + s1, s2 = socket.socketpair() + try: + msg = b"ping" + s1.sendall(msg) + data = s2.recv(16) + ok = data == msg + return f"OK:{ok}" + finally: + s1.close() + s2.close() +""" + + +def socketpair_roundtrip(): + """Create a module at runtime and exercise socket open/send/recv/close. + + This simulates late imports and runtime execution paths post-IAST patching. + """ + module_name = "test_" + "socketpair" + compiled_code = compile(code, "tests/appsec/integrations/packages_tests/", mode="exec") + module_changed = types.ModuleType(module_name) + exec(compiled_code, module_changed.__dict__) + result = eval("do_socketpair_roundtrip()", module_changed.__dict__) + return result diff --git a/tests/appsec/integrations/fixtures/patch_urllib3.py b/tests/appsec/integrations/fixtures/patch_urllib3.py new file mode 100644 index 00000000000..4adc4f8eb9b --- /dev/null +++ b/tests/appsec/integrations/fixtures/patch_urllib3.py @@ -0,0 +1,29 @@ +import types + + +code = """ +import urllib3 + +def poolmanager_open_clear() -> str: + pm = urllib3.PoolManager(num_pools=1) + try: + # Create a connection pool for localhost (no actual request is made) + pm.connection_from_host("localhost", port=80, scheme="http") + ok = True + return f"OK:{ok}" + finally: + pm.clear() +""" + + +def urllib3_poolmanager_open_clear(): + """Exercise urllib3.PoolManager open/clear lifecycle without network IO. + + This mirrors scenarios where PoolManager is used and cleared during app shutdown. + """ + module_name = "test_" + "urllib3" + compiled_code = compile(code, "tests/appsec/integrations/packages_tests/", mode="exec") + module_changed = types.ModuleType(module_name) + exec(compiled_code, module_changed.__dict__) + result = eval("poolmanager_open_clear()", module_changed.__dict__) + return result diff --git a/tests/appsec/integrations/flask_tests/test_appsec_flask.py b/tests/appsec/integrations/flask_tests/test_appsec_flask.py index 4ee7737d136..c96817145e0 100644 --- a/tests/appsec/integrations/flask_tests/test_appsec_flask.py +++ b/tests/appsec/integrations/flask_tests/test_appsec_flask.py @@ -89,21 +89,25 @@ def test_route(user_id): assert resp.status_code == 200 +@pytest.mark.parametrize( + "function", + ["open", "os_system"], +) @pytest.mark.parametrize( "iast_enabled", ["true", "false"], ) @pytest.mark.parametrize("appsec_enabled", ["true", "false"]) -def test_flask_common_modules_patch_read(iast_enabled, appsec_enabled): +def test_flask_common_modules_patch(function, iast_enabled, appsec_enabled): with flask_server( - appsec_enabled=iast_enabled, iast_enabled=appsec_enabled, token=None, port=_PORT, assert_debug=False + appsec_enabled=appsec_enabled, iast_enabled=iast_enabled, token=None, port=_PORT, assert_debug=False ) as context: _, flask_client, pid = context - response = flask_client.get("/common-modules-patch-read") + response = flask_client.get(f"/common-modules-patch?function={function}") assert response.status_code == 200 - if iast_enabled == appsec_enabled == "false": + if appsec_enabled == "false": assert response.content == b"OK: False" else: assert response.content == b"OK: True" diff --git a/tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py b/tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py index 7c223cabdce..2b724c9b69b 100644 --- a/tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py +++ b/tests/appsec/integrations/flask_tests/test_flask_remoteconfig.py @@ -9,7 +9,7 @@ import pytest -from tests.appsec.appsec_utils import gunicorn_server +from tests.appsec.appsec_utils import gunicorn_flask_server from tests.appsec.integrations.flask_tests.utils import _PORT from tests.appsec.integrations.flask_tests.utils import _multi_requests from tests.appsec.integrations.flask_tests.utils import _request_200 @@ -179,7 +179,7 @@ def _request_403(client, debug_mode=False, max_retries=40, sleep_time=1): def test_load_testing_appsec_ip_blocking_gunicorn_rc_disabled(): token = "test_load_testing_appsec_ip_blocking_gunicorn_rc_disabled_{}".format(str(uuid.uuid4())) - with gunicorn_server(remote_configuration_enabled="false", token=token, port=_PORT) as context: + with gunicorn_flask_server(remote_configuration_enabled="false", token=token, port=_PORT) as context: _, gunicorn_client, pid = context _request_200(gunicorn_client) @@ -193,7 +193,7 @@ def test_load_testing_appsec_ip_blocking_gunicorn_rc_disabled(): def test_load_testing_appsec_ip_blocking_gunicorn_block(): token = "test_load_testing_appsec_ip_blocking_gunicorn_block_{}".format(str(uuid.uuid4())) - with gunicorn_server(token=token, port=_PORT, use_ddtrace_cmd=False) as context: + with gunicorn_flask_server(token=token, port=_PORT, use_ddtrace_cmd=False) as context: _, gunicorn_client, pid = context _request_200(gunicorn_client) @@ -209,7 +209,7 @@ def test_load_testing_appsec_ip_blocking_gunicorn_block(): def test_load_testing_appsec_ip_blocking_gunicorn_block_and_kill_child_worker(): token = "test_load_testing_appsec_ip_blocking_gunicorn_block_and_kill_child_worker_{}".format(str(uuid.uuid4())) - with gunicorn_server(token=token, port=_PORT, use_ddtrace_cmd=False) as context: + with gunicorn_flask_server(token=token, port=_PORT, use_ddtrace_cmd=False) as context: _, gunicorn_client, pid = context _request_200(gunicorn_client) @@ -233,7 +233,7 @@ def test_load_testing_appsec_1click_and_ip_blocking_gunicorn_block_and_kill_chil token = "test_load_testing_appsec_1click_and_ip_blocking_gunicorn_block_and_kill_child_worker_{}".format( str(uuid.uuid4()) ) - with gunicorn_server(appsec_enabled="", token=token, port=_PORT) as context: + with gunicorn_flask_server(appsec_enabled="", token=token, port=_PORT) as context: _, gunicorn_client, pid = context _request_200(gunicorn_client, debug_mode=False) diff --git a/tests/appsec/integrations/flask_tests/test_gunicorn_handlers.py b/tests/appsec/integrations/flask_tests/test_gunicorn_handlers.py index 08f62d52dde..3e4a99a1959 100644 --- a/tests/appsec/integrations/flask_tests/test_gunicorn_handlers.py +++ b/tests/appsec/integrations/flask_tests/test_gunicorn_handlers.py @@ -4,7 +4,7 @@ from requests.exceptions import ConnectionError # noqa: A004 from tests.appsec.appsec_utils import flask_server -from tests.appsec.appsec_utils import gunicorn_server +from tests.appsec.appsec_utils import gunicorn_flask_server _PORT = 8030 @@ -13,7 +13,7 @@ @pytest.mark.parametrize("appsec_enabled", ("true", "false")) @pytest.mark.parametrize("apm_tracing_enabled", ("true", "false")) @pytest.mark.parametrize("tracer_enabled", ("true", "false")) -@pytest.mark.parametrize("server", ((gunicorn_server, flask_server))) +@pytest.mark.parametrize("server", ((gunicorn_flask_server, flask_server))) def test_when_appsec_reads_chunked_requests(appsec_enabled, apm_tracing_enabled, tracer_enabled, server): def read_in_chunks(filepath, chunk_size=1024): file_object = open(filepath, "rb") @@ -55,7 +55,7 @@ def read_in_chunks(filepath, chunk_size=1024): @pytest.mark.parametrize("appsec_enabled", ("true", "false")) @pytest.mark.parametrize("apm_tracing_enabled", ("true", "false")) @pytest.mark.parametrize("tracer_enabled", ("true", "false")) -@pytest.mark.parametrize("server", ((gunicorn_server, flask_server))) +@pytest.mark.parametrize("server", ((gunicorn_flask_server, flask_server))) def test_corner_case_when_appsec_reads_chunked_request_with_no_body( appsec_enabled, apm_tracing_enabled, tracer_enabled, server ): @@ -82,7 +82,7 @@ def test_corner_case_when_appsec_reads_chunked_request_with_no_body( @pytest.mark.parametrize("appsec_enabled", ("true", "false")) @pytest.mark.parametrize("apm_tracing_enabled", ("true", "false")) @pytest.mark.parametrize("tracer_enabled", ("true", "false")) -@pytest.mark.parametrize("server", ((gunicorn_server, flask_server))) +@pytest.mark.parametrize("server", ((gunicorn_flask_server, flask_server))) def test_when_appsec_reads_empty_body_no_hang(appsec_enabled, apm_tracing_enabled, tracer_enabled, server): """A bug was detected when running a Flask application locally @@ -121,7 +121,7 @@ def test_when_appsec_reads_empty_body_no_hang(appsec_enabled, apm_tracing_enable @pytest.mark.parametrize("appsec_enabled", ("true", "false")) @pytest.mark.parametrize("apm_tracing_enabled", ("true", "false")) @pytest.mark.parametrize("tracer_enabled", ("true", "false")) -@pytest.mark.parametrize("server", ((gunicorn_server,))) +@pytest.mark.parametrize("server", ((gunicorn_flask_server,))) def test_when_appsec_reads_empty_body_and_content_length_no_hang( appsec_enabled, apm_tracing_enabled, tracer_enabled, server ): diff --git a/tests/appsec/integrations/flask_tests/test_iast_flask_patching.py b/tests/appsec/integrations/flask_tests/test_iast_flask_patching.py index 42fa0536bc5..5a22b258679 100644 --- a/tests/appsec/integrations/flask_tests/test_iast_flask_patching.py +++ b/tests/appsec/integrations/flask_tests/test_iast_flask_patching.py @@ -1,7 +1,7 @@ import pytest from tests.appsec.appsec_utils import flask_server -from tests.appsec.appsec_utils import gunicorn_server +from tests.appsec.appsec_utils import gunicorn_flask_server from tests.appsec.integrations.flask_tests.utils import _PORT from tests.appsec.integrations.flask_tests.utils import _request_200 @@ -92,7 +92,7 @@ def test_multiple_requests(): """we want to validate context is working correctly among multiple request and no race condition creating and destroying contexts """ - with gunicorn_server(remote_configuration_enabled="false", iast_enabled="true", port=_PORT) as context: + with gunicorn_flask_server(remote_configuration_enabled="false", iast_enabled="true", port=_PORT) as context: _, client, pid = context _request_200( diff --git a/tests/appsec/integrations/flask_tests/test_iast_flask_testagent.py b/tests/appsec/integrations/flask_tests/test_iast_flask_testagent.py index 0ea0a05a2ed..37bb6ec53c6 100644 --- a/tests/appsec/integrations/flask_tests/test_iast_flask_testagent.py +++ b/tests/appsec/integrations/flask_tests/test_iast_flask_testagent.py @@ -6,10 +6,11 @@ from ddtrace.appsec._iast.constants import VULN_CMDI from ddtrace.appsec._iast.constants import VULN_CODE_INJECTION from ddtrace.appsec._iast.constants import VULN_INSECURE_HASHING_TYPE +from ddtrace.appsec._iast.constants import VULN_SSRF from ddtrace.appsec._iast.constants import VULN_STACKTRACE_LEAK from ddtrace.appsec._iast.constants import VULN_UNVALIDATED_REDIRECT from tests.appsec.appsec_utils import flask_server -from tests.appsec.appsec_utils import gunicorn_server +from tests.appsec.appsec_utils import gunicorn_flask_server from tests.appsec.iast.iast_utils import load_iast_report from tests.appsec.integrations.flask_tests.utils import flask_version from tests.appsec.integrations.utils_testagent import _get_span @@ -17,6 +18,47 @@ from tests.appsec.integrations.utils_testagent import start_trace +_GEVENT_SERVERS_SCENARIOS = ( + ( + gunicorn_flask_server, + {"workers": "3", "use_threads": False, "use_gevent": False, "env": {}}, + ), + ( + gunicorn_flask_server, + {"workers": "3", "use_threads": True, "use_gevent": False, "env": {}}, + ), + ( + gunicorn_flask_server, + {"workers": "3", "use_threads": True, "use_gevent": True, "env": {}}, + ), + ( + gunicorn_flask_server, + { + "workers": "1", + "use_threads": True, + "use_gevent": True, + "env": { + "_DD_IAST_PROPAGATION_ENABLED": "false", + }, + }, + ), + ( + gunicorn_flask_server, + {"workers": "1", "use_threads": True, "use_gevent": True, "env": {}}, + ), + ( + gunicorn_flask_server, + { + "workers": "1", + "use_threads": True, + "use_gevent": True, + "env": {"_DD_IAST_PROPAGATION_ENABLED": "false"}, + }, + ), + (flask_server, {"env": {}}), +) + + @pytest.mark.skip(reason="Stacktrace error in debug mode doesn't work outside the request APPSEC-56862") def test_iast_stacktrace_error(): token = "test_iast_stacktrace_error" @@ -54,7 +96,90 @@ def test_iast_stacktrace_error(): assert vulnerability["hash"] -@pytest.mark.parametrize("server", (gunicorn_server, flask_server)) +# TODO(APPSEC-59081): this test fails for every configuration (IAST enable/disable, Appsec enable/disable) so +# the problem is related to the trace lifecycle +# @pytest.mark.parametrize( +# "server, config", +# ( +# ( +# gunicorn_flask_server, +# { +# "workers": "1", +# "use_threads": True, +# "use_gevent": True, +# "env": { +# "DD_APM_TRACING_ENABLED": "true", +# "DD_TRACE_URLLIB3_ENABLED": "true", +# }, +# }, +# ), +# (flask_server, {"env": {"DD_APM_TRACING_ENABLED": "true"}}), +# ), +# ) +# def test_iast_vulnerable_request_downstream_parallel(server, config): +# """Run the vulnerable_request_downstream scenario many times in parallel. +# """ +# # Keep a moderate fan-out to avoid overloading CI machines +# fan_out = int(os.environ.get("TEST_PARALLEL_RUNS", "4")) +# base_port = int(os.environ.get("TEST_PARALLEL_BASE_PORT", "8110")) +# +# def run_one(idx: int): +# tok = f"test_iast_vulnerable_request_downstream_parallel_{idx}" +# _ = start_trace(tok) +# port = base_port + idx +# with server(iast_enabled="false", token=tok, port=port, **config) as context: +# _, flask_client, pid = context +# trace_id = 1212121212121212121 +# parent_id = 34343434 +# response = flask_client.get( +# f"/vulnerablerequestdownstream?port={port}", +# headers={ +# "x-datadog-trace-id": str(trace_id), +# "x-datadog-parent-id": str(parent_id), +# "x-datadog-sampling-priority": "-1", +# "x-datadog-origin": "rum", +# "x-datadog-tags": "_dd.p.other=1", +# }, +# ) +# +# assert response.status_code == 200 +# # downstream_headers = json.loads(response.text) +# +# # assert downstream_headers["X-Datadog-Origin"] == "rum" +# # assert downstream_headers["X-Datadog-Parent-Id"] != "34343434" +# # assert "_dd.p.other=1" in downstream_headers["X-Datadog-Tags"] +# # assert downstream_headers["X-Datadog-Sampling-Priority"] == "2" +# # assert downstream_headers["X-Datadog-Trace-Id"] == "1212121212121212121" +# +# response_tracer = _get_span(tok) +# spans = [] +# spans_with_iast = [] +# vulnerabilities = [] +# +# for trace in response_tracer: +# for span in trace: +# if span.get("metrics", {}).get("_dd.iast.enabled") == 1.0: +# spans_with_iast.append(span) +# iast_data = load_iast_report(span) +# if iast_data: +# vulnerabilities.append(iast_data.get("vulnerabilities")) +# spans.append(span) +# clear_session(tok) +# +# assert len(spans) >= 28, f"Incorrect number of spans ({len(spans)}):\n{spans}" +# # assert len(spans_with_iast) == 3 +# # assert len(vulnerabilities) == 1 +# # assert len(vulnerabilities[0]) == 2 +# # vulnerability = vulnerabilities[0][0] +# # assert vulnerability["type"] == VULN_INSECURE_HASHING_TYPE +# # assert "valueParts" not in vulnerability["evidence"] +# # assert vulnerability["hash"] +# +# with concurrent.futures.ThreadPoolExecutor(max_workers=fan_out) as ex: +# list(ex.map(run_one, range(fan_out))) + + +@pytest.mark.parametrize("server", (gunicorn_flask_server, flask_server)) def test_iast_concurrent_requests_limit_flask(server): """Ensure only DD_IAST_MAX_CONCURRENT_REQUESTS requests have IAST enabled concurrently. @@ -96,11 +221,11 @@ def worker(): assert false_count == 0 -@pytest.mark.parametrize("server", (gunicorn_server, flask_server)) +@pytest.mark.parametrize("server", (gunicorn_flask_server, flask_server)) def test_iast_cmdi(server): token = "test_iast_cmdi" _ = start_trace(token) - with server(iast_enabled="true", token=token, port=8050, use_ddtrace_cmd=False) as context: + with server(iast_enabled="true", token=token, port=8050) as context: _, flask_client, pid = context response = flask_client.get("/iast-cmdi-vulnerability?filename=path_traversal_test_file.txt") @@ -132,11 +257,11 @@ def test_iast_cmdi(server): assert vulnerability["hash"] -@pytest.mark.parametrize("server", (gunicorn_server, flask_server)) +@pytest.mark.parametrize("server", (gunicorn_flask_server, flask_server)) def test_iast_cmdi_secure(server): token = "test_iast_cmdi_secure" _ = start_trace(token) - with server(iast_enabled="true", token=token, port=8050, use_ddtrace_cmd=False) as context: + with server(iast_enabled="true", token=token, port=8050) as context: _, flask_client, pid = context response = flask_client.get("/iast-cmdi-vulnerability-secure?filename=path_traversal_test_file.txt") @@ -152,7 +277,7 @@ def test_iast_cmdi_secure(server): clear_session(token) -@pytest.mark.parametrize("server", (gunicorn_server, flask_server)) +@pytest.mark.parametrize("server", (gunicorn_flask_server, flask_server)) def test_iast_sqli_complex(server): """Test complex SQL injection detection with SQLAlchemy in a Flask application. @@ -189,7 +314,7 @@ def test_iast_sqli_complex(server): clear_session(token) -@pytest.mark.parametrize("server", (gunicorn_server, flask_server)) +@pytest.mark.parametrize("server", (gunicorn_flask_server, flask_server)) def test_iast_header_injection_secure(server): """Test that header injection is prevented in a real Flask application. @@ -207,7 +332,7 @@ def test_iast_header_injection_secure(server): """ token = "test_iast_header_injection" _ = start_trace(token) - with server(iast_enabled="true", token=token, port=8050, use_ddtrace_cmd=False) as context: + with server(iast_enabled="true", token=token, port=8050) as context: _, flask_client, pid = context response = flask_client.get( @@ -234,7 +359,7 @@ def test_iast_header_injection_secure(server): assert len(vulnerabilities) == 0 -@pytest.mark.parametrize("server", ((gunicorn_server, flask_server))) +@pytest.mark.parametrize("server", ((gunicorn_flask_server, flask_server))) def test_iast_header_injection(server): token = "test_iast_header_injection" _ = start_trace(token) @@ -278,7 +403,7 @@ def test_iast_header_injection(server): assert len(vulnerabilities) == 0 -@pytest.mark.parametrize("server", ((gunicorn_server, flask_server))) +@pytest.mark.parametrize("server", ((gunicorn_flask_server, flask_server))) def test_iast_code_injection_with_stacktrace(server): token = "test_iast_code_injection_with_stacktrace" _ = start_trace(token) @@ -321,7 +446,7 @@ def test_iast_code_injection_with_stacktrace(server): def test_iast_unvalidated_redirect(): token = "test_iast_cmdi" _ = start_trace(token) - with gunicorn_server(iast_enabled="true", token=token, port=8050) as context: + with gunicorn_flask_server(iast_enabled="true", token=token, port=8050) as context: _, flask_client, pid = context response = flask_client.get("/iast-unvalidated_redirect-header?location=malicious_url") @@ -349,84 +474,23 @@ def test_iast_unvalidated_redirect(): assert vulnerability["hash"] -@pytest.mark.parametrize( - "server, config", - ( - ( - gunicorn_server, - { - "workers": "3", - "use_threads": False, - "use_gevent": False, - "env": { - "DD_APM_TRACING_ENABLED": "false", - }, - }, - ), - ( - gunicorn_server, - { - "workers": "3", - "use_threads": True, - "use_gevent": False, - "env": { - "DD_APM_TRACING_ENABLED": "false", - }, - }, - ), - ( - gunicorn_server, - { - "workers": "3", - "use_threads": True, - "use_gevent": True, - "env": { - "DD_APM_TRACING_ENABLED": "false", - }, - }, - ), - ( - gunicorn_server, - { - "workers": "1", - "use_threads": True, - "use_gevent": True, - "env": { - "DD_APM_TRACING_ENABLED": "false", - "_DD_IAST_PROPAGATION_ENABLED": "false", - }, - }, - ), - ( - gunicorn_server, - { - "workers": "1", - "use_threads": True, - "use_gevent": True, - "env": { - "DD_APM_TRACING_ENABLED": "false", - }, - }, - ), - ( - gunicorn_server, - { - "workers": "1", - "use_threads": True, - "use_gevent": True, - "env": {"_DD_IAST_PROPAGATION_ENABLED": "false"}, - }, - ), - (flask_server, {"env": {"DD_APM_TRACING_ENABLED": "false"}}), - ), -) -def test_iast_vulnerable_request_downstream(server, config): +@pytest.mark.parametrize("server, config", _GEVENT_SERVERS_SCENARIOS) +@pytest.mark.parametrize("apm_tracing_enabled", ("true", "false")) +def test_iast_vulnerable_request_downstream(server, config, apm_tracing_enabled): """Gevent has a lot of problematic interactions with the tracer. When IAST applies AST transformations to a file and reloads the module using compile and exec, it can interfere with Gevent’s monkey patching """ token = "test_iast_vulnerable_request_downstream" _ = start_trace(token) - with server(iast_enabled="true", token=token, port=8050, **config) as context: + config["env"].update({"DD_TRACE_URLLIB3_ENABLED": "true"}) + with server( + iast_enabled="true", + appsec_enabled="false", + apm_tracing_enabled=apm_tracing_enabled, + token=token, + port=8050, + **config, + ) as context: _, flask_client, pid = context trace_id = 1212121212121212121 parent_id = 34343434 @@ -468,7 +532,45 @@ def test_iast_vulnerable_request_downstream(server, config): assert len(spans_with_iast) == 3 assert len(vulnerabilities) == 1 assert len(vulnerabilities[0]) == 1 - vulnerability = vulnerabilities[0][0] - assert vulnerability["type"] == VULN_INSECURE_HASHING_TYPE - assert "valueParts" not in vulnerability["evidence"] - assert vulnerability["hash"] + for vulnerability in vulnerabilities[0]: + assert vulnerability["type"] in {VULN_INSECURE_HASHING_TYPE, VULN_SSRF} + assert vulnerability["hash"] + + +@pytest.mark.parametrize("server, config", _GEVENT_SERVERS_SCENARIOS) +@pytest.mark.parametrize("iast_enabled", ("true", "false")) +def test_gevent_sensitive_socketpair(server, config, iast_enabled): + """Validate socket.socketpair lifecycle under various Gunicorn/gevent configurations.""" + token = "test_gevent_sensitive_socketpair" + _ = start_trace(token) + with server(iast_enabled=iast_enabled, appsec_enabled="false", token=token, port=8050, **config) as context: + _, flask_client, pid = context + response = flask_client.get("/socketpair") + assert response.status_code == 200 + assert response.text == "OK:True" + + +@pytest.mark.parametrize("server, config", _GEVENT_SERVERS_SCENARIOS) +@pytest.mark.parametrize("iast_enabled", ("true", "false")) +def test_gevent_sensitive_greenlet(server, config, iast_enabled): + """Validate gevent Greenlet execution under various Gunicorn/gevent configurations.""" + token = "test_gevent_sensitive_greenlet" + _ = start_trace(token) + with server(iast_enabled=iast_enabled, appsec_enabled="false", token=token, port=8050, **config) as context: + _, flask_client, pid = context + response = flask_client.get("/gevent-greenlet") + assert response.status_code == 200 + assert response.text == "OK:True" + + +@pytest.mark.parametrize("server, config", _GEVENT_SERVERS_SCENARIOS) +@pytest.mark.parametrize("iast_enabled", ("true", "false")) +def test_gevent_sensitive_subprocess(server, config, iast_enabled): + """Validate subprocess.Popen lifecycle under various Gunicorn/gevent configurations.""" + token = "test_gevent_sensitive_subprocess" + _ = start_trace(token) + with server(iast_enabled=iast_enabled, appsec_enabled="false", token=token, port=8050, **config) as context: + _, flask_client, pid = context + response = flask_client.get("/subprocess-popen") + assert response.status_code == 200 + assert response.text == "OK:True" diff --git a/tests/appsec/integrations/langchain_tests/test_iast_langchain.py b/tests/appsec/integrations/langchain_tests/test_iast_langchain.py index afb28aac1fd..d7ed217d464 100644 --- a/tests/appsec/integrations/langchain_tests/test_iast_langchain.py +++ b/tests/appsec/integrations/langchain_tests/test_iast_langchain.py @@ -29,9 +29,11 @@ from ddtrace.appsec._iast._taint_tracking._taint_objects_base import is_pyobject_tainted from ddtrace.appsec._iast.constants import VULN_CMDI from tests.appsec.iast.conftest import iast_span_defaults # noqa: F401 +from tests.appsec.iast.iast_utils import _iast_patched_module TEST_FILE = "tests/appsec/integrations/langchain_tests/test_iast_langchain.py" +mod = _iast_patched_module("langchain_experimental.llm_bash.bash") def test_prompt_template_format(iast_span_defaults): # noqa: F811 diff --git a/tests/appsec/integrations/packages_tests/test_iast_shutdown_paths.py b/tests/appsec/integrations/packages_tests/test_iast_shutdown_paths.py new file mode 100644 index 00000000000..37fdf6008e8 --- /dev/null +++ b/tests/appsec/integrations/packages_tests/test_iast_shutdown_paths.py @@ -0,0 +1,32 @@ +from ddtrace.appsec._iast._taint_tracking._taint_objects_base import get_tainted_ranges +from ddtrace.appsec._iast._taint_tracking._taint_objects_base import is_pyobject_tainted +from tests.appsec.iast.iast_utils import _iast_patched_module + + +mod_socket = _iast_patched_module("tests.appsec.integrations.fixtures.patch_socket", should_patch_iast=True) +mod_urllib3 = _iast_patched_module("tests.appsec.integrations.fixtures.patch_urllib3", should_patch_iast=True) + + +def test_socketpair_roundtrip(): + """Exercise socket open/send/recv/close lifecycle and assert untainted result.""" + value = mod_socket.socketpair_roundtrip() + assert value == "OK:True" + assert not get_tainted_ranges(value) + assert not is_pyobject_tainted(value) + + +def test_urllib3_poolmanager_open_clear(): + """Exercise urllib3 PoolManager open/clear lifecycle and assert untainted result.""" + value = mod_urllib3.urllib3_poolmanager_open_clear() + assert value == "OK:True" + assert not get_tainted_ranges(value) + assert not is_pyobject_tainted(value) + + +def test_gevent_urllib3_poolmanager(): + """If gevent is available, exercise gevent.patch_all + urllib3 PoolManager and assert untainted result.""" + mod_gevent = _iast_patched_module("tests.appsec.integrations.fixtures.patch_gevent", should_patch_iast=True) + value = mod_gevent.gevent_urllib3_poolmanager() + assert value == "OK:True" + assert not get_tainted_ranges(value) + assert not is_pyobject_tainted(value) diff --git a/tests/ci_visibility/api/fake_runner_mix_fail_itr_suite_level.py b/tests/ci_visibility/api/fake_runner_mix_fail_itr_suite_level.py index 008acd1f1b0..b55b12aa184 100644 --- a/tests/ci_visibility/api/fake_runner_mix_fail_itr_suite_level.py +++ b/tests/ci_visibility/api/fake_runner_mix_fail_itr_suite_level.py @@ -16,6 +16,7 @@ Comment lines in the test start/finish lines are there for visual distinction. """ + import json from multiprocessing import freeze_support from pathlib import Path @@ -427,8 +428,19 @@ def main(): if __name__ == "__main__": freeze_support() # NOTE: this is only safe because these tests are run in a subprocess - import os - os.environ["_DD_CIVISIBILITY_ITR_SUITE_MODE"] = "1" - with mock.patch("ddtrace.internal.ci_visibility.CIVisibility.is_itr_enabled", return_value=True): - main() + from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings + + itr_settings = TestVisibilityAPISettings( + coverage_enabled=False, + skipping_enabled=True, + require_git=False, + itr_enabled=True, + flaky_test_retries_enabled=False, + ) + + mock.patch( + "ddtrace.internal.ci_visibility.recorder.CIVisibility._check_enabled_features", return_value=itr_settings + ).start() + + main() diff --git a/tests/ci_visibility/api/test_api_fake_runners.py b/tests/ci_visibility/api/test_api_fake_runners.py index b5f1cd81b3d..40a7206470f 100644 --- a/tests/ci_visibility/api/test_api_fake_runners.py +++ b/tests/ci_visibility/api/test_api_fake_runners.py @@ -272,12 +272,8 @@ def test_manual_api_fake_runner_mix_fail_itr_suite_level(self): mock_ci_env=True, ), replace_os_env=True, - ), mock.patch( - "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_settings", - return_value=TestVisibilityAPISettings(False, False, False, False, False), - ), mock.patch( - "ddtrace.internal.ci_visibility.recorder.ddconfig", _get_default_civisibility_ddconfig() ): + # The fake runner handles its own ITR mocking internally subprocess.run(["python", "fake_runner_mix_fail_itr_suite_level.py"]) @snapshot(ignores=SNAPSHOT_IGNORES) diff --git a/tests/conftest.py b/tests/conftest.py index 48ef8521ddb..9742fc2ef81 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,7 @@ import ast import base64 import contextlib +import copy import functools import http.client as httplib import importlib @@ -19,7 +20,9 @@ from tempfile import gettempdir import time from typing import Any # noqa:F401 +from typing import Dict from typing import Generator # noqa:F401 +from typing import List from typing import Tuple # noqa:F401 from unittest import mock from urllib import parse @@ -32,7 +35,6 @@ from ddtrace.internal.core import crashtracking from ddtrace.internal.remoteconfig.client import RemoteConfigClient from ddtrace.internal.remoteconfig.worker import remoteconfig_poller -from ddtrace.internal.runtime import get_runtime_id from ddtrace.internal.service import ServiceStatus from ddtrace.internal.service import ServiceStatusError from ddtrace.internal.telemetry import TelemetryWriter @@ -580,7 +582,7 @@ def clear(self): pytest.fail("Failed to clear session: %s" % self.token) return True - def get_requests(self, request_type=None, filter_heartbeats=True): + def get_requests(self, filter_heartbeats=True): """Get a list of the requests sent to the test agent Results are in reverse order by ``seq_id`` @@ -595,25 +597,42 @@ def get_requests(self, request_type=None, filter_heartbeats=True): # /test/session/requests captures non telemetry payloads, ignore these requests continue req["body"] = json.loads(base64.b64decode(req["body"])) - # filter heartbeat requests to reduce noise + if req["body"]["request_type"] == "app-heartbeat" and filter_heartbeats: continue - if request_type is None or req["body"]["request_type"] == request_type: - requests.append(req) + requests.append(req) return sorted(requests, key=lambda r: r["body"]["seq_id"], reverse=True) - def get_events(self, event_type=None, filter_heartbeats=True, subprocess=False): + def get_events(self, event_type=None, filter_heartbeats=True): """Get a list of the event payloads sent to the test agent Results are in reverse order by ``seq_id`` """ - requests = self.get_requests(event_type, filter_heartbeats) - if subprocess: - # Use get_runtime_id to filter telemetry events generated in the current process - runtime_id = get_runtime_id() - requests = [req for req in requests if req["body"]["runtime_id"] != runtime_id] - return [req["body"] for req in requests] + requests = self.get_requests() + events = [] + for req in requests: + for req_body in self._get_request_bodies(req): + if filter_heartbeats and req_body["request_type"] == "app-heartbeat": + # filter heartbeat events to reduce noise + continue + if event_type is None or req_body["request_type"] == event_type: + events.append(req_body) + return events + + def _get_request_bodies(self, req: Dict[str, Any]) -> List[Dict[str, Any]]: + if req["body"]["request_type"] == "message-batch": + payloads = req["body"]["payload"] + else: + payloads = [{"payload": req["body"]["payload"], "request_type": req["body"]["request_type"]}] + + requests = [] + for payload in payloads: + req_body = copy.deepcopy(req["body"]) + req_body["request_type"] = payload["request_type"] + req_body["payload"] = payload["payload"] + requests.append(req_body) + return requests def get_metrics(self, name=None): metrics = [] diff --git a/tests/contrib/aiobotocore/test.py b/tests/contrib/aiobotocore/test.py index 72c36461a41..5e7151797b0 100644 --- a/tests/contrib/aiobotocore/test.py +++ b/tests/contrib/aiobotocore/test.py @@ -38,6 +38,7 @@ async def test_traced_client(tracer): assert span.get_tag("aws.agent") == "aiobotocore" assert span.get_tag("aws.region") == "us-west-2" assert span.get_tag("region") == "us-west-2" + assert span.get_tag("aws.partition") == "aws" assert span.get_tag("aws.operation") == "DescribeInstances" assert_span_http_status_code(span, 200) assert span.get_metric("retry_attempts") == 0 @@ -203,6 +204,7 @@ async def test_sqs_client(tracer): assert_is_measured(span) assert span.get_tag("aws.region") == "us-west-2" assert span.get_tag("region") == "us-west-2" + assert span.get_tag("aws.partition") == "aws" assert span.get_tag("aws.operation") == "ListQueues" assert_span_http_status_code(span, 200) assert span.service == "aws.sqs" @@ -225,6 +227,7 @@ async def test_kinesis_client(tracer): assert_is_measured(span) assert span.get_tag("aws.region") == "us-west-2" assert span.get_tag("region") == "us-west-2" + assert span.get_tag("aws.partition") == "aws" assert span.get_tag("aws.operation") == "ListStreams" assert_span_http_status_code(span, 200) assert span.service == "aws.kinesis" diff --git a/tests/contrib/aiohttp/conftest.py b/tests/contrib/aiohttp/conftest.py index 4166049bd23..ee6f379e7c2 100644 --- a/tests/contrib/aiohttp/conftest.py +++ b/tests/contrib/aiohttp/conftest.py @@ -1,29 +1,49 @@ import aiohttp # noqa:F401 import pytest +import pytest_asyncio from ddtrace.contrib.internal.aiohttp.middlewares import trace_app from ddtrace.contrib.internal.aiohttp.patch import unpatch from ddtrace.internal.utils import version # noqa:F401 +from ddtrace.internal.utils.version import parse_version from .app.web import setup_app -@pytest.fixture -async def app_tracer(tracer, loop): - app = setup_app() - trace_app(app, tracer) - return app, tracer +PYTEST_ASYNCIO_VERSION = parse_version(pytest_asyncio.__version__) -@pytest.fixture -async def patched_app_tracer(app_tracer): - app, tracer = app_tracer - yield app, tracer - unpatch() +if PYTEST_ASYNCIO_VERSION < (1, 0): + + @pytest.fixture + async def app_tracer(tracer, loop): + app = setup_app() + trace_app(app, tracer) + return app, tracer + + @pytest.fixture + async def untraced_app_tracer(tracer, loop): + app = setup_app() + yield app, tracer + unpatch() + +else: + + @pytest.fixture + async def app_tracer(tracer): + app = setup_app() + trace_app(app, tracer) + return app, tracer + + @pytest.fixture + async def untraced_app_tracer(tracer): + app = setup_app() + yield app, tracer + unpatch() @pytest.fixture -async def untraced_app_tracer(tracer, loop): - app = setup_app() +async def patched_app_tracer(app_tracer): + app, tracer = app_tracer yield app, tracer unpatch() diff --git a/tests/contrib/aiohttp/test_middleware.py b/tests/contrib/aiohttp/test_middleware.py index 3e089f26300..37e6ea2e3de 100644 --- a/tests/contrib/aiohttp/test_middleware.py +++ b/tests/contrib/aiohttp/test_middleware.py @@ -2,6 +2,7 @@ from opentracing.scope_managers.asyncio import AsyncioScopeManager import pytest +import pytest_asyncio from ddtrace._trace.sampler import RateSampler from ddtrace.constants import _SAMPLING_PRIORITY_KEY @@ -12,6 +13,7 @@ from ddtrace.contrib.internal.aiohttp.middlewares import trace_app from ddtrace.contrib.internal.aiohttp.middlewares import trace_middleware from ddtrace.ext import http +from ddtrace.internal.utils.version import parse_version from tests.opentracer.utils import init_tracer from tests.tracer.utils_inferred_spans.test_helpers import assert_web_and_inferred_aws_api_gateway_span_data from tests.utils import assert_span_http_status_code @@ -21,6 +23,9 @@ from .app.web import setup_app +PYTEST_ASYNCIO_VERSION = parse_version(pytest_asyncio.__version__) + + async def test_handler(app_tracer, aiohttp_client): app, tracer = app_tracer client = await aiohttp_client(app) @@ -48,6 +53,7 @@ async def test_handler(app_tracer, aiohttp_client): assert span.get_tag("span.kind") == "server" +@pytest.mark.skipif(PYTEST_ASYNCIO_VERSION >= (1, 0), reason="'loop' fixture removed") @pytest.mark.parametrize("schema_version", [None, "v0", "v1"]) def test_service_operation_schema(ddtrace_run_python_code_in_subprocess, schema_version): """ @@ -113,7 +119,7 @@ async def async_test(app_tracer, aiohttp_client): ("foo=bar&foo=baz&x=y", True), ), ) -async def test_param_handler(app_tracer, aiohttp_client, loop, query_string, trace_query_string): +async def test_param_handler(app_tracer, aiohttp_client, query_string, trace_query_string): app, tracer = app_tracer if trace_query_string: app[CONFIG_KEY]["trace_query_string"] = True @@ -262,7 +268,7 @@ async def test_coroutine_chaining(app_tracer, aiohttp_client): assert root.get_tag("span.kind") == "server" -async def test_static_handler(app_tracer, aiohttp_client, loop): +async def test_static_handler(app_tracer, aiohttp_client): app, tracer = app_tracer client = await aiohttp_client(app) # it should create a trace with multiple spans diff --git a/tests/contrib/aiohttp/test_request.py b/tests/contrib/aiohttp/test_request.py index 056eda09c4b..1a5e15f81b2 100644 --- a/tests/contrib/aiohttp/test_request.py +++ b/tests/contrib/aiohttp/test_request.py @@ -10,7 +10,7 @@ from .app.web import setup_app -async def test_full_request(patched_app_tracer, aiohttp_client, loop): +async def test_full_request(patched_app_tracer, aiohttp_client): app, tracer = patched_app_tracer client = await aiohttp_client(app) # it should create a root span when there is a handler hit @@ -31,7 +31,7 @@ async def test_full_request(patched_app_tracer, aiohttp_client, loop): assert "GET /" == request_span.resource -async def test_full_request_w_mem_leak_prevention_flag(patched_app_tracer, aiohttp_client, loop): +async def test_full_request_w_mem_leak_prevention_flag(patched_app_tracer, aiohttp_client): config.aiohttp.disable_stream_timing_for_mem_leak = True try: app, tracer = patched_app_tracer @@ -58,7 +58,7 @@ async def test_full_request_w_mem_leak_prevention_flag(patched_app_tracer, aioht config.aiohttp.disable_stream_timing_for_mem_leak = False -async def test_stream_request(patched_app_tracer, aiohttp_client, loop): +async def test_stream_request(patched_app_tracer, aiohttp_client): app, tracer = patched_app_tracer async with await aiohttp_client(app) as client: response = await client.request("GET", "/stream/") @@ -68,7 +68,7 @@ async def test_stream_request(patched_app_tracer, aiohttp_client, loop): assert abs(0.5 - request_span.duration) < 0.05 -async def test_multiple_full_request(patched_app_tracer, aiohttp_client, loop): +async def test_multiple_full_request(patched_app_tracer, aiohttp_client): app, tracer = patched_app_tracer client = await aiohttp_client(app) @@ -96,7 +96,7 @@ def make_requests(): assert 1 == len(traces[0]) -async def test_user_specified_service(tracer, aiohttp_client, loop): +async def test_user_specified_service(tracer, aiohttp_client): """ When a service name is specified by the user The aiohttp integration should use it as the service name @@ -114,7 +114,7 @@ async def test_user_specified_service(tracer, aiohttp_client, loop): assert request_span.service == "mysvc" -async def test_http_request_header_tracing(patched_app_tracer, aiohttp_client, loop): +async def test_http_request_header_tracing(patched_app_tracer, aiohttp_client): app, tracer = patched_app_tracer client = await aiohttp_client(app) @@ -133,7 +133,7 @@ async def test_http_request_header_tracing(patched_app_tracer, aiohttp_client, l assert request_span.get_tag("span.kind") == "server" -async def test_http_response_header_tracing(patched_app_tracer, aiohttp_client, loop): +async def test_http_response_header_tracing(patched_app_tracer, aiohttp_client): app, tracer = patched_app_tracer client = await aiohttp_client(app) diff --git a/tests/contrib/aiohttp/test_request_safety.py b/tests/contrib/aiohttp/test_request_safety.py index 0734d3b7e4a..5b44ed45048 100644 --- a/tests/contrib/aiohttp/test_request_safety.py +++ b/tests/contrib/aiohttp/test_request_safety.py @@ -10,7 +10,7 @@ from tests.utils import assert_is_measured -async def test_full_request(patched_app_tracer, aiohttp_client, loop): +async def test_full_request(patched_app_tracer, aiohttp_client): app, tracer = patched_app_tracer client = await aiohttp_client(app) # it should create a root span when there is a handler hit @@ -31,7 +31,7 @@ async def test_full_request(patched_app_tracer, aiohttp_client, loop): assert request_span.get_tag("span.kind") == "server" -async def test_multiple_full_request(patched_app_tracer, aiohttp_client, loop): +async def test_multiple_full_request(patched_app_tracer, aiohttp_client): NUMBER_REQUESTS = 10 responses = [] diff --git a/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py b/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py index 8af3af345a7..06d9b55a027 100644 --- a/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py +++ b/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py @@ -60,7 +60,7 @@ async def test_template_rendering_snapshot_patched_server( assert 200 == request.status -async def test_template_rendering_filesystem(untraced_app_tracer_jinja, aiohttp_client, loop): +async def test_template_rendering_filesystem(untraced_app_tracer_jinja, aiohttp_client): app, tracer = untraced_app_tracer_jinja client = await aiohttp_client(app) # it should trace a template rendering with a FileSystemLoader @@ -83,7 +83,7 @@ async def test_template_rendering_filesystem(untraced_app_tracer_jinja, aiohttp_ @pytest.mark.skipif(VERSION < (1, 5, 0), reason="Package loader doesn't work in older versions") -async def test_template_rendering_package(untraced_app_tracer_jinja, aiohttp_client, loop): +async def test_template_rendering_package(untraced_app_tracer_jinja, aiohttp_client): app, tracer = untraced_app_tracer_jinja client = await aiohttp_client(app) # it should trace a template rendering with a PackageLoader @@ -105,7 +105,7 @@ async def test_template_rendering_package(untraced_app_tracer_jinja, aiohttp_cli assert 0 == span.error -async def test_template_decorator(untraced_app_tracer_jinja, aiohttp_client, loop): +async def test_template_decorator(untraced_app_tracer_jinja, aiohttp_client, loop=None): app, tracer = untraced_app_tracer_jinja client = await aiohttp_client(app) # it should trace a template rendering @@ -126,7 +126,7 @@ async def test_template_decorator(untraced_app_tracer_jinja, aiohttp_client, loo assert 0 == span.error -async def test_template_error(untraced_app_tracer_jinja, aiohttp_client, loop): +async def test_template_error(untraced_app_tracer_jinja, aiohttp_client): app, tracer = untraced_app_tracer_jinja client = await aiohttp_client(app) # it should trace a template rendering diff --git a/tests/contrib/anthropic/test_anthropic_llmobs.py b/tests/contrib/anthropic/test_anthropic_llmobs.py index 4bd9d5cf652..b1cc0a054b9 100644 --- a/tests/contrib/anthropic/test_anthropic_llmobs.py +++ b/tests/contrib/anthropic/test_anthropic_llmobs.py @@ -121,7 +121,7 @@ def test_completion_proxy( ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 2 - assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span.kind"] == "llm" + assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span"]["kind"] == "llm" def test_completion(self, anthropic, ddtrace_global_config, mock_llmobs_writer, mock_tracer, request_vcr): """Ensure llmobs records are emitted for completion endpoints when configured. @@ -665,7 +665,12 @@ def test_tools_sync_stream( + " the location is fully specified. We can proceed with calling the get_weather tool.\n", "type": "text", }, - {"text": WEATHER_OUTPUT_MESSAGE_2_TOOL_CALL, "type": "text"}, + { + "name": "get_weather", + "input": {"location": "San Francisco, CA"}, + "id": "toolu_01DYJo37oETVsCdLTTcCWcdq", + "type": "tool_use", + }, ] traces = mock_tracer.pop_traces() @@ -733,7 +738,7 @@ def test_tools_sync_stream( input_messages=[ {"content": WEATHER_PROMPT, "role": "user"}, {"content": message[0]["text"], "role": "assistant"}, - {"content": message[1]["text"], "role": "assistant"}, + {"content": "", "role": "assistant", "tool_calls": WEATHER_OUTPUT_MESSAGE_2_TOOL_CALL}, {"content": "", "role": "user", "tool_results": WEATHER_TOOL_RESULT}, ], output_messages=[ diff --git a/tests/contrib/asyncio/utils.py b/tests/contrib/asyncio/utils.py index 8b7c4385355..509d987f6f1 100644 --- a/tests/contrib/asyncio/utils.py +++ b/tests/contrib/asyncio/utils.py @@ -1,10 +1,14 @@ import asyncio from functools import wraps +import logging import sys from tests.utils import TracerTestCase +log = logging.getLogger(__name__) + + class AsyncioTestCase(TracerTestCase): """ Base TestCase for asyncio framework that setup a new loop @@ -14,16 +18,21 @@ class AsyncioTestCase(TracerTestCase): def setUp(self): super(AsyncioTestCase, self).setUp() - # each test must have its own event loop - self._main_loop = asyncio.get_event_loop() + try: + # each test must have its own event loop + self._main_loop = asyncio.get_event_loop() + except RuntimeError: + log.info("Couldn't find existing event loop") + self._main_loop = None self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): super(AsyncioTestCase, self).tearDown() - # restore the main loop - asyncio.set_event_loop(self._main_loop) + if self._main_loop is not None: + # restore the main loop + asyncio.set_event_loop(self._main_loop) self.loop = None self._main_loop = None diff --git a/tests/contrib/azure_functions/azure_function_app/host.json b/tests/contrib/azure_functions/azure_function_app/host.json index 73036f6398d..a7a9aeaaaad 100644 --- a/tests/contrib/azure_functions/azure_function_app/host.json +++ b/tests/contrib/azure_functions/azure_function_app/host.json @@ -2,6 +2,6 @@ "version": "2.0", "extensionBundle": { "id": "Microsoft.Azure.Functions.ExtensionBundle", - "version": "[4.0.0, 4.22.0)" + "version": "[4.25.1]" } } diff --git a/tests/contrib/azure_functions/test_azure_functions_patch.py b/tests/contrib/azure_functions/test_azure_functions_patch.py index 9b5f3149eac..08daddcc0db 100644 --- a/tests/contrib/azure_functions/test_azure_functions_patch.py +++ b/tests/contrib/azure_functions/test_azure_functions_patch.py @@ -1,8 +1,3 @@ -# This test script was automatically generated by the contrib-patch-tests.py -# script. If you want to make changes to it, you should make sure that you have -# removed the ``_generated`` suffix from the file name, to prevent the content -# from being overwritten by future re-generations. - from ddtrace.contrib.internal.azure_functions.patch import get_version from ddtrace.contrib.internal.azure_functions.patch import patch diff --git a/tests/contrib/azure_functions_servicebus/__init__.py b/tests/contrib/azure_functions_servicebus/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/azure_functions_servicebus/azure_function_app/function_app.py b/tests/contrib/azure_functions_servicebus/azure_function_app/function_app.py new file mode 100644 index 00000000000..7a50a66c238 --- /dev/null +++ b/tests/contrib/azure_functions_servicebus/azure_function_app/function_app.py @@ -0,0 +1,104 @@ +import os + +import azure.functions as func +import azure.servicebus as azure_servicebus + +from ddtrace import patch + + +patch(azure_functions=True, azure_servicebus=True, requests=True) + +app = func.FunctionApp() + + +@app.route(route="queuesendmessagesingle", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.POST]) +def queue_send_single_message(req: func.HttpRequest) -> func.HttpResponse: + with azure_servicebus.ServiceBusClient.from_connection_string( + conn_str=os.getenv("CONNECTION_STRING", "") + ) as servicebus_client: + with servicebus_client.get_queue_sender(queue_name="queue.1") as queue_sender: + queue_sender.send_messages(azure_servicebus.ServiceBusMessage('{"body":"test message"}')) + return func.HttpResponse("Hello Datadog!") + + +@app.route(route="queuesendmessagebatch", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.POST]) +def queue_send_message_batch(req: func.HttpRequest) -> func.HttpResponse: + with azure_servicebus.ServiceBusClient.from_connection_string( + conn_str=os.getenv("CONNECTION_STRING", "") + ) as servicebus_client: + with servicebus_client.get_queue_sender(queue_name="queue.1") as queue_sender: + batch = queue_sender.create_message_batch() + batch.add_message(azure_servicebus.ServiceBusMessage('{"body":"test message"}')) + batch.add_message(azure_servicebus.ServiceBusMessage('{"body":"test message"}')) + queue_sender.send_messages(batch) + return func.HttpResponse("Hello Datadog!") + + +@app.route(route="topicsendmessagesingle", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.POST]) +def topic_send_single_message(req: func.HttpRequest) -> func.HttpResponse: + with azure_servicebus.ServiceBusClient.from_connection_string( + conn_str=os.getenv("CONNECTION_STRING", "") + ) as servicebus_client: + with servicebus_client.get_topic_sender(topic_name="topic.1") as topic_sender: + topic_sender.send_messages(azure_servicebus.ServiceBusMessage('{"body":"test message"}')) + return func.HttpResponse("Hello Datadog!") + + +@app.route(route="topicsendmessagebatch", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.POST]) +def topic_send_message_batch(req: func.HttpRequest) -> func.HttpResponse: + with azure_servicebus.ServiceBusClient.from_connection_string( + conn_str=os.getenv("CONNECTION_STRING", "") + ) as servicebus_client: + with servicebus_client.get_topic_sender(topic_name="topic.1") as topic_sender: + batch = topic_sender.create_message_batch() + batch.add_message(azure_servicebus.ServiceBusMessage('{"body":"test message"}')) + batch.add_message(azure_servicebus.ServiceBusMessage('{"body":"test message"}')) + topic_sender.send_messages(batch) + return func.HttpResponse("Hello Datadog!") + + +if os.getenv("IS_ASYNC") == "True": + + @app.function_name(name="servicebusqueue") + @app.service_bus_queue_trigger( + arg_name="msg", + queue_name="queue.1", + connection="CONNECTION_STRING", + cardinality=os.getenv("CARDINALITY", "one"), + ) + async def service_bus_queue(msg: func.ServiceBusMessage): + pass + + @app.function_name(name="servicebustopic") + @app.service_bus_topic_trigger( + arg_name="msg", + topic_name="topic.1", + subscription_name="subscription.3", + connection="CONNECTION_STRING", + cardinality=os.getenv("CARDINALITY", "one"), + ) + async def service_bus_topic(msg: func.ServiceBusMessage): + pass + +else: + + @app.function_name(name="servicebusqueue") + @app.service_bus_queue_trigger( + arg_name="msg", + queue_name="queue.1", + connection="CONNECTION_STRING", + cardinality=os.getenv("CARDINALITY", "one"), + ) + def service_bus_queue(msg: func.ServiceBusMessage): + pass + + @app.function_name(name="servicebustopic") + @app.service_bus_topic_trigger( + arg_name="msg", + topic_name="topic.1", + subscription_name="subscription.3", + connection="CONNECTION_STRING", + cardinality=os.getenv("CARDINALITY", "one"), + ) + def service_bus_topic(msg: func.ServiceBusMessage): + pass diff --git a/tests/contrib/azure_functions_servicebus/azure_function_app/host.json b/tests/contrib/azure_functions_servicebus/azure_function_app/host.json new file mode 100644 index 00000000000..a7a9aeaaaad --- /dev/null +++ b/tests/contrib/azure_functions_servicebus/azure_function_app/host.json @@ -0,0 +1,7 @@ +{ + "version": "2.0", + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[4.25.1]" + } +} diff --git a/tests/contrib/azure_functions_servicebus/azure_function_app/local.settings.json b/tests/contrib/azure_functions_servicebus/azure_function_app/local.settings.json new file mode 100644 index 00000000000..f72aae5c0bc --- /dev/null +++ b/tests/contrib/azure_functions_servicebus/azure_function_app/local.settings.json @@ -0,0 +1,11 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "FUNCTIONS_EXTENSION_VERSION": "~4", + "AzureWebJobsFeatureFlags": "EnableWorkerIndexing", + "AzureWebJobsStorage": "UseDevelopmentStorage=true", + "CONNECTION_STRING": "Endpoint=sb://localhost;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=SAS_KEY_VALUE;UseDevelopmentEmulator=true;", + "WEBSITE_SITE_NAME": "test-func" + } +} diff --git a/tests/contrib/azure_functions_servicebus/test_azure_functions_snapshot.py b/tests/contrib/azure_functions_servicebus/test_azure_functions_snapshot.py new file mode 100644 index 00000000000..73ab0d5feb6 --- /dev/null +++ b/tests/contrib/azure_functions_servicebus/test_azure_functions_snapshot.py @@ -0,0 +1,93 @@ +import itertools +import os +import signal +import subprocess +import time + +import pytest + +from tests.webclient import Client + + +# Ignoring span link attributes until values are normalized: https://github.com/DataDog/dd-apm-test-agent/issues/154 +SNAPSHOT_IGNORES = ["meta.messaging.message_id", "span_links.tracestate", "span_links.trace_id_high"] +DEFAULT_HEADERS = {"User-Agent": "python-httpx/x.xx.x"} +ENTITY_TYPES = ["queue", "topic"] +ASYNC_OPTIONS = [False, True] +CARDINALITY = ["one", "many"] +DISTRIBUTED_TRACING_ENABLED_OPTIONS = [None, False] + +params = [ + ( + f"{e}{'_async' if a else ''}_consume_{c}_distributed_tracing_{'enabled' if d is None else 'disabled'}", + ( + { + "IS_ASYNC": str(a), + "CARDINALITY": c, + **({"DD_AZURE_FUNCTIONS_DISTRIBUTED_TRACING": str(d)} if d is not None else {}), + **({"DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING": str(d)} if d is not None else {}), + }, + e, + "single" if c == "one" else "batch", + ), + ) + for e, a, c, d in itertools.product(ENTITY_TYPES, ASYNC_OPTIONS, CARDINALITY, DISTRIBUTED_TRACING_ENABLED_OPTIONS) +] + +param_ids, param_values = zip(*params) + + +@pytest.fixture +def azure_functions_client(request): + env_vars = getattr(request, "param", {}) + + # Copy the env to get the correct PYTHONPATH and such + # from the virtualenv. + env = os.environ.copy() + env.update(env_vars) + + port = 7071 + env["AZURE_FUNCTIONS_TEST_PORT"] = str(port) + + # webservers might exec or fork into another process, so we need to os.setsid() to create a process group + # (all of which will listen to signals sent to the parent) so that we can kill the whole application. + proc = subprocess.Popen( + ["func", "start", "--port", str(port)], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + close_fds=True, + env=env, + preexec_fn=os.setsid, + cwd=os.path.join(os.path.dirname(__file__), "azure_function_app"), + ) + try: + client = Client(f"http://0.0.0.0:{port}") + # Wait for the server to start up + try: + client.wait(delay=0.5) + yield client + client.get_ignored("/shutdown") + except Exception: + pass + # At this point the traces have been sent to the test agent + # but the test agent hasn't necessarily finished processing + # the traces (race condition) so wait just a bit for that + # processing to complete. + time.sleep(1) + finally: + os.killpg(proc.pid, signal.SIGKILL) + proc.wait() + + +@pytest.mark.parametrize( + "azure_functions_client, entity, payload_type", + param_values, + ids=param_ids, + indirect=["azure_functions_client"], +) +@pytest.mark.snapshot(ignores=SNAPSHOT_IGNORES) +def test_service_bus_trigger(azure_functions_client: Client, entity, payload_type) -> None: + assert ( + azure_functions_client.post(f"/api/{entity}sendmessage{payload_type}", headers=DEFAULT_HEADERS).status_code + == 200 + ) diff --git a/tests/contrib/azure_servicebus/common.py b/tests/contrib/azure_servicebus/common.py new file mode 100644 index 00000000000..394d2b9cfb5 --- /dev/null +++ b/tests/contrib/azure_servicebus/common.py @@ -0,0 +1,204 @@ +from datetime import datetime +from datetime import timezone +import os +from typing import Union +from uuid import uuid4 + +from azure.servicebus import ServiceBusClient +from azure.servicebus import ServiceBusMessage +from azure.servicebus import ServiceBusReceiveMode +from azure.servicebus import ServiceBusReceiver +from azure.servicebus import ServiceBusSender +from azure.servicebus.aio import ServiceBusClient as ServiceBusClientAsync +from azure.servicebus.aio import ServiceBusReceiver as ServiceBusReceiverAsync +from azure.servicebus.aio import ServiceBusSender as ServiceBusSenderAsync +from azure.servicebus.amqp import AmqpAnnotatedMessage +import pytest + + +CONNECTION_STRING = "Endpoint=sb://localhost;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=SAS_KEY_VALUE;UseDevelopmentEmulator=true;" +QUEUE_NAME = "queue.1" +TOPIC_NAME = "topic.1" +SUBSCRIPTION_NAME = "subscription.3" +DEFAULT_APPLICATION_PROPERTIES = {"property": "val", b"byteproperty": b"byteval"} +TRACE_CONTEXT_KEYS = [ + "x-datadog-trace-id", + "x-datadog-parent-id", + "x-datadog-sampling-priority", + "x-datadog-tags", + "traceparent", + "tracestate", +] + + +def normalize_properties(message: Union[ServiceBusMessage, AmqpAnnotatedMessage]): + props = message.application_properties + + if not props: + props = {} + + return {k.decode() if isinstance(k, bytes) else k: v for k, v in props.items()} + + +def make_servicebus_messages(): + return [ + ServiceBusMessage(body='{"body":"ServiceBusMessage without properties"}'), + ServiceBusMessage( + body='{"body":"ServiceBusMessage with properties and custom message_id"}', + application_properties=DEFAULT_APPLICATION_PROPERTIES, + message_id=str(uuid4()), + ), + ] + + +def make_amqp_annotated_messages(): + return [ + AmqpAnnotatedMessage(data_body='{"body":"AmqpAnnotatedMessage without properties"}'), + AmqpAnnotatedMessage( + data_body='{"body":"AmqpAnnotatedMessage with properties and custom message_id"}', + application_properties=DEFAULT_APPLICATION_PROPERTIES, + properties={"message_id": uuid4()}, + ), + ] + + +def run_test( + sender: ServiceBusSender, + receiver: ServiceBusReceiver, + method: Union[str, None], + message_payload_type: Union[str, None], + distributed_tracing_enabled: bool, + batch_links_enabled: bool, +): + servicebus_messages = make_servicebus_messages() + amqp_annotated_messages = make_amqp_annotated_messages() + + message_length = len(servicebus_messages) + len(amqp_annotated_messages) + now = datetime.now(timezone.utc) + + if method == "send_messages" and message_payload_type == "single": + for servicebus_message in servicebus_messages: + sender.send_messages(servicebus_message) + for amqp_annotated_message in amqp_annotated_messages: + sender.send_messages(amqp_annotated_message) + elif method == "send_messages" and message_payload_type == "list": + sender.send_messages(servicebus_messages) + sender.send_messages(amqp_annotated_messages) + elif method == "send_messages" and message_payload_type == "batch": + servicebus_message_batch = sender.create_message_batch() + for servicebus_message in servicebus_messages: + servicebus_message_batch.add_message(servicebus_message) + sender.send_messages(servicebus_message_batch) + + amqp_annotated_message_batch = sender.create_message_batch() + for amqp_annotated_message in amqp_annotated_messages: + amqp_annotated_message_batch.add_message(amqp_annotated_message) + sender.send_messages(amqp_annotated_message_batch) + elif method == "schedule_messages" and message_payload_type == "single": + for servicebus_message in servicebus_messages: + sender.schedule_messages(servicebus_message, now) + for amqp_annotated_message in amqp_annotated_messages: + sender.schedule_messages(amqp_annotated_message, now) + elif method == "schedule_messages" and message_payload_type == "list": + sender.schedule_messages(servicebus_messages, now) + sender.schedule_messages(amqp_annotated_messages, now) + + received_queue_messages = receiver.receive_messages(max_message_count=message_length, max_wait_time=5) + assert len(received_queue_messages) == message_length + + if not distributed_tracing_enabled or not batch_links_enabled: + assert not any(key in normalize_properties(m) for m in received_queue_messages for key in TRACE_CONTEXT_KEYS) + else: + assert all(key in normalize_properties(m) for m in received_queue_messages for key in TRACE_CONTEXT_KEYS) + + +async def run_test_async( + sender: ServiceBusSenderAsync, + receiver: ServiceBusReceiverAsync, + method: Union[str, None], + message_payload_type: Union[str, None], + distributed_tracing_enabled: bool, + batch_links_enabled: bool, +): + servicebus_messages = make_servicebus_messages() + amqp_annotated_messages = make_amqp_annotated_messages() + + message_length = len(servicebus_messages) + len(amqp_annotated_messages) + now = datetime.now(timezone.utc) + + if method == "send_messages" and message_payload_type == "single": + for servicebus_message in servicebus_messages: + await sender.send_messages(servicebus_message) + for amqp_annotated_message in amqp_annotated_messages: + await sender.send_messages(amqp_annotated_message) + elif method == "send_messages" and message_payload_type == "list": + await sender.send_messages(servicebus_messages) + await sender.send_messages(amqp_annotated_messages) + elif method == "send_messages" and message_payload_type == "batch": + servicebus_message_batch = await sender.create_message_batch() + for servicebus_message in servicebus_messages: + servicebus_message_batch.add_message(servicebus_message) + await sender.send_messages(servicebus_message_batch) + + amqp_annotated_message_batch = await sender.create_message_batch() + for amqp_annotated_message in amqp_annotated_messages: + amqp_annotated_message_batch.add_message(amqp_annotated_message) + await sender.send_messages(amqp_annotated_message_batch) + elif method == "schedule_messages" and message_payload_type == "single": + for servicebus_message in servicebus_messages: + await sender.schedule_messages(servicebus_message, now) + for amqp_annotated_message in amqp_annotated_messages: + await sender.schedule_messages(amqp_annotated_message, now) + elif method == "schedule_messages" and message_payload_type == "list": + await sender.schedule_messages(servicebus_messages, now) + await sender.schedule_messages(amqp_annotated_messages, now) + + received_queue_messages = await receiver.receive_messages(max_message_count=message_length, max_wait_time=5) + assert len(received_queue_messages) == message_length + + if not distributed_tracing_enabled or not batch_links_enabled: + assert not any(key in normalize_properties(m) for m in received_queue_messages for key in TRACE_CONTEXT_KEYS) + else: + assert all(key in normalize_properties(m) for m in received_queue_messages for key in TRACE_CONTEXT_KEYS) + + +@pytest.mark.asyncio +async def test_common(): + method = os.environ.get("METHOD") + is_async = os.environ.get("IS_ASYNC") == "True" + message_payload_type = os.environ.get("MESSAGE_PAYLOAD_TYPE") + distributed_tracing_enabled = os.environ.get("DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING", "True") == "True" + batch_links_enabled = os.environ.get("DD_TRACE_AZURE_SERVICEBUS_BATCH_LINKS_ENABLED", "True") == "True" + + if is_async: + client = ServiceBusClientAsync.from_connection_string(CONNECTION_STRING) + sender = client.get_queue_sender(queue_name=QUEUE_NAME) + receiver = client.get_queue_receiver( + queue_name=QUEUE_NAME, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE + ) + try: + await run_test_async( + sender, receiver, method, message_payload_type, distributed_tracing_enabled, batch_links_enabled + ) + finally: + await receiver.close() + await sender.close() + await client.close() + else: + client = ServiceBusClient.from_connection_string(CONNECTION_STRING) + sender = client.get_queue_sender(queue_name=QUEUE_NAME) + receiver = client.get_queue_receiver( + queue_name=QUEUE_NAME, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE + ) + try: + run_test(sender, receiver, method, message_payload_type, distributed_tracing_enabled, batch_links_enabled) + finally: + receiver.close() + sender.close() + client.close() + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-x", __file__])) diff --git a/tests/contrib/azure_servicebus/test_azure_servicebus_patch.py b/tests/contrib/azure_servicebus/test_azure_servicebus_patch.py index 613e1b3d0e1..a76f7d6aaaf 100644 --- a/tests/contrib/azure_servicebus/test_azure_servicebus_patch.py +++ b/tests/contrib/azure_servicebus/test_azure_servicebus_patch.py @@ -1,8 +1,3 @@ -# This test script was automatically generated by the contrib-patch-tests.py -# script. If you want to make changes to it, you should make sure that you have -# removed the ``_generated`` suffix from the file name, to prevent the content -# from being overwritten by future re-generations. - from ddtrace.contrib.internal.azure_servicebus.patch import get_version from ddtrace.contrib.internal.azure_servicebus.patch import patch @@ -22,14 +17,20 @@ class TestAzureServiceBusPatch(PatchTestCase.Base): __get_version__ = get_version def assert_module_patched(self, azure_servicebus): + self.assert_wrapped(azure_servicebus.ServiceBusMessageBatch.add_message) + self.assert_wrapped(azure_servicebus.ServiceBusSender.create_message_batch) self.assert_wrapped(azure_servicebus.ServiceBusSender.send_messages) self.assert_wrapped(azure_servicebus.ServiceBusSender.schedule_messages) def assert_not_module_patched(self, azure_servicebus): + self.assert_not_wrapped(azure_servicebus.ServiceBusMessageBatch.add_message) + self.assert_not_wrapped(azure_servicebus.ServiceBusSender.create_message_batch) self.assert_not_wrapped(azure_servicebus.ServiceBusSender.send_messages) self.assert_not_wrapped(azure_servicebus.ServiceBusSender.schedule_messages) def assert_not_module_double_patched(self, azure_servicebus): + self.assert_not_double_wrapped(azure_servicebus.ServiceBusMessageBatch.add_message) + self.assert_not_double_wrapped(azure_servicebus.ServiceBusSender.create_message_batch) self.assert_not_double_wrapped(azure_servicebus.ServiceBusSender.send_messages) self.assert_not_double_wrapped(azure_servicebus.ServiceBusSender.schedule_messages) @@ -42,13 +43,16 @@ class TestAzureServiceBusAioPatch(PatchTestCase.Base): __get_version__ = get_version def assert_module_patched(self, azure_servicebus): + self.assert_wrapped(azure_servicebus.ServiceBusSender.create_message_batch) self.assert_wrapped(azure_servicebus.ServiceBusSender.send_messages) self.assert_wrapped(azure_servicebus.ServiceBusSender.schedule_messages) def assert_not_module_patched(self, azure_servicebus): + self.assert_not_wrapped(azure_servicebus.ServiceBusSender.create_message_batch) self.assert_not_wrapped(azure_servicebus.ServiceBusSender.send_messages) self.assert_not_wrapped(azure_servicebus.ServiceBusSender.schedule_messages) def assert_not_module_double_patched(self, azure_servicebus): + self.assert_not_double_wrapped(azure_servicebus.ServiceBusSender.create_message_batch) self.assert_not_double_wrapped(azure_servicebus.ServiceBusSender.send_messages) self.assert_not_double_wrapped(azure_servicebus.ServiceBusSender.schedule_messages) diff --git a/tests/contrib/azure_servicebus/test_azure_servicebus_snapshot.py b/tests/contrib/azure_servicebus/test_azure_servicebus_snapshot.py index b18be0ecba9..3d40c0786d2 100644 --- a/tests/contrib/azure_servicebus/test_azure_servicebus_snapshot.py +++ b/tests/contrib/azure_servicebus/test_azure_servicebus_snapshot.py @@ -1,38 +1,49 @@ -import asyncio -from datetime import datetime -from datetime import timezone +import itertools import os -from typing import Dict -from typing import List -from typing import Optional -from typing import Union -import uuid +from pathlib import Path -from azure.servicebus import ServiceBusClient -from azure.servicebus import ServiceBusMessage -from azure.servicebus import ServiceBusReceiveMode -from azure.servicebus import ServiceBusReceiver -from azure.servicebus import ServiceBusSender -from azure.servicebus.aio import ServiceBusClient as ServiceBusClientAsync -from azure.servicebus.aio import ServiceBusReceiver as ServiceBusReceiverAsync -from azure.servicebus.aio import ServiceBusSender as ServiceBusSenderAsync import pytest from ddtrace.contrib.internal.azure_servicebus.patch import patch from ddtrace.contrib.internal.azure_servicebus.patch import unpatch -CONNECTION_STRING = "Endpoint=sb://localhost;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=SAS_KEY_VALUE;UseDevelopmentEmulator=true;" -QUEUE_NAME = "queue.1" -TOPIC_NAME = "topic.1" -SUBSCRIPTION_NAME = "subscription.3" -DEFAULT_APPLICATION_PROPERTIES = {"property": "val", b"byteproperty": b"byteval"} +# Ignoring span link attributes until values are normalized: https://github.com/DataDog/dd-apm-test-agent/issues/154 +SNAPSHOT_IGNORES = ["meta.messaging.message_id", "meta._dd.span_links"] -DISTRIBUTED_TRACING_DISABLED_PARAMS = { - "DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING": "False", -} +METHODS = ["send_messages", "schedule_messages"] +ASYNC_OPTIONS = [False, True] +PAYLOAD_TYPES = ["single", "list", "batch"] +DISTRIBUTED_TRACING_ENABLED_OPTIONS = [None, False] +BATCH_LINKS_ENABLED_OPTIONS = [None, False] -SPAN_ATTRIBUTE_SCHEMA_V1_PARAMS = {"DD_TRACE_SPAN_ATTRIBUTE_SCHEMA": "v1"} + +def is_invalid_test_combination(method, payload_type, batch_links_enabled): + return (method == "schedule_messages" and payload_type == "batch") or ( + payload_type != "batch" and batch_links_enabled is False + ) + + +params = [ + ( + f"{m}{'_async' if a else ''}_{p}" + f"_distributed_tracing_{'enabled' if d is None else 'disabled'}" + f"{'_batch_links_enabled' if p == 'batch' and b is None else '_batch_links_disabled' if p == 'batch' else ''}", + { + "METHOD": m, + "IS_ASYNC": str(a), + "MESSAGE_PAYLOAD_TYPE": p, + **({"DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING": str(d)} if d is not None else {}), + **({"DD_TRACE_AZURE_SERVICEBUS_BATCH_LINKS_ENABLED": str(b)} if b is not None else {}), + }, + ) + for m, a, p, d, b in itertools.product( + METHODS, ASYNC_OPTIONS, PAYLOAD_TYPES, DISTRIBUTED_TRACING_ENABLED_OPTIONS, BATCH_LINKS_ENABLED_OPTIONS + ) + if not is_invalid_test_combination(m, p, b) +] + +param_ids, param_values = zip(*params) @pytest.fixture(autouse=True) @@ -42,343 +53,19 @@ def patch_azure_servicebus(): unpatch() -@pytest.fixture() -def azure_servicebus_client(): - with ServiceBusClient.from_connection_string(conn_str=CONNECTION_STRING) as servicebus_client: - yield servicebus_client - - -@pytest.fixture() -def azure_servicebus_queue_sender(azure_servicebus_client: ServiceBusClient): - with azure_servicebus_client.get_queue_sender(queue_name=QUEUE_NAME) as queue_sender: - yield queue_sender - - -@pytest.fixture() -def azure_servicebus_topic_sender(azure_servicebus_client: ServiceBusClient): - with azure_servicebus_client.get_topic_sender(topic_name=TOPIC_NAME) as topic_sender: - yield topic_sender - - -@pytest.fixture() -def azure_servicebus_queue_receiver(azure_servicebus_client: ServiceBusClient): - with azure_servicebus_client.get_queue_receiver( - queue_name=QUEUE_NAME, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE - ) as queue_receiver: - yield queue_receiver - - -@pytest.fixture() -def azure_servicebus_subscription_receiver(azure_servicebus_client: ServiceBusClient): - with azure_servicebus_client.get_subscription_receiver( - topic_name=TOPIC_NAME, - subscription_name=SUBSCRIPTION_NAME, - receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, - ) as subscription_receiver: - yield subscription_receiver - - -@pytest.fixture() -async def azure_servicebus_client_async(): - async with ServiceBusClientAsync.from_connection_string(conn_str=CONNECTION_STRING) as servicebus_client: - yield servicebus_client - - -@pytest.fixture() -async def azure_servicebus_queue_sender_async(azure_servicebus_client_async: ServiceBusClientAsync): - async with azure_servicebus_client_async.get_queue_sender(queue_name=QUEUE_NAME) as queue_sender: - yield queue_sender - - -@pytest.fixture() -async def azure_servicebus_topic_sender_async(azure_servicebus_client_async: ServiceBusClientAsync): - async with azure_servicebus_client_async.get_topic_sender(topic_name=TOPIC_NAME) as topic_sender: - yield topic_sender - - -@pytest.fixture() -async def azure_servicebus_queue_receiver_async(azure_servicebus_client_async: ServiceBusClientAsync): - async with azure_servicebus_client_async.get_queue_receiver( - queue_name=QUEUE_NAME, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE - ) as queue_receiver: - yield queue_receiver - - -@pytest.fixture() -async def azure_servicebus_subscription_receiver_async(azure_servicebus_client_async: ServiceBusClientAsync): - async with azure_servicebus_client_async.get_subscription_receiver( - topic_name=TOPIC_NAME, - subscription_name=SUBSCRIPTION_NAME, - receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, - ) as subscription_receiver: - yield subscription_receiver - - -@pytest.fixture() -def trace_context_keys(): - return [ - "x-datadog-trace-id", - "x-datadog-parent-id", - "x-datadog-sampling-priority", - "x-datadog-tags", - "traceparent", - "tracestate", - ] - - -def make_messages(): - return [ - ServiceBusMessage("test message without properties"), - ServiceBusMessage("test message without properties"), - [ - ServiceBusMessage("test message without properties"), - ServiceBusMessage( - "test message with properties", - application_properties=DEFAULT_APPLICATION_PROPERTIES, - ), - ], - ] - - -async def send_messages_to_queue_async(queue_sender_async): - for message in make_messages(): - await queue_sender_async.send_messages(message) - - -async def send_messages_to_topic_async(topic_sender_async): - for message in make_messages(): - await topic_sender_async.send_messages(message) - - -async def schedule_messages_to_queue_async(queue_sender_async, schedule_time_utc): - for message in make_messages(): - await queue_sender_async.schedule_messages(message, schedule_time_utc) - - -async def schedule_messages_to_topic_async(topic_sender_async, schedule_time_utc): - for message in make_messages(): - await topic_sender_async.schedule_messages(message, schedule_time_utc) - - -def normalize_application_properties( - application_properties: Optional[Dict[Union[str, bytes], Union[int, float, bytes, bool, str, uuid.UUID]]], -): - if not application_properties: - return {} - - return {k.decode() if isinstance(k, bytes) else k: v for k, v in application_properties.items()} - - +@pytest.mark.asyncio @pytest.mark.parametrize( "env_vars", - [{}, DISTRIBUTED_TRACING_DISABLED_PARAMS, SPAN_ATTRIBUTE_SCHEMA_V1_PARAMS], - ids=["default_config", "distributed_tracing_disabled", "span_attribute_schema_v1"], + param_values, + ids=param_ids, ) -@pytest.mark.snapshot -def test_send_messages(env_vars, ddtrace_run_python_code_in_subprocess): - code = f""" -import os -from typing import Dict -from typing import Optional -from typing import Union -import uuid - -from azure.servicebus import ServiceBusClient -from azure.servicebus import ServiceBusMessage -from azure.servicebus import ServiceBusReceiveMode - -from ddtrace.internal.utils.formats import asbool - - -TRACE_CONTEXT_KEYS = [ - "x-datadog-trace-id", - "x-datadog-parent-id", - "x-datadog-sampling-priority", - "x-datadog-tags", - "traceparent", - "tracestate", -] - - -def normalize_application_properties( - application_properties: Optional[Dict[Union[str, bytes], Union[int, float, bytes, bool, str, uuid.UUID]]], -): - if not application_properties: - return {{}} - - return {{k.decode() if isinstance(k, bytes) else k: v for k, v in application_properties.items()}} - - -def make_messages(): - return [ - ServiceBusMessage("test message without properties"), - ServiceBusMessage("test message without properties"), - [ - ServiceBusMessage("test message without properties"), - ServiceBusMessage( - "test message with properties", - application_properties={DEFAULT_APPLICATION_PROPERTIES}, - ), - ], - ] - - -with ServiceBusClient.from_connection_string(conn_str="{CONNECTION_STRING}") as servicebus_client: - with servicebus_client.get_queue_sender(queue_name="{QUEUE_NAME}") as queue_sender: - for message in make_messages(): - queue_sender.send_messages(message) - with servicebus_client.get_topic_sender(topic_name="{TOPIC_NAME}") as topic_sender: - for message in make_messages(): - topic_sender.send_messages(message) - with servicebus_client.get_queue_receiver( - queue_name="{QUEUE_NAME}", receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE - ) as queue_receiver: - received_messages = queue_receiver.receive_messages(max_message_count=4, max_wait_time=5) - assert len(received_messages) == 4 - if asbool(os.getenv("DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING", default=True)): - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_messages - for key in TRACE_CONTEXT_KEYS - ) - else: - assert not any( - key in normalize_application_properties(msg.application_properties) - for msg in received_messages - for key in TRACE_CONTEXT_KEYS - ) - with servicebus_client.get_subscription_receiver( - topic_name="{TOPIC_NAME}", - subscription_name="{SUBSCRIPTION_NAME}", - receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE, - ) as subscription_receiver: - received_messages = subscription_receiver.receive_messages(max_message_count=4, max_wait_time=5) - assert len(received_messages) == 4 - if asbool(os.getenv("DD_AZURE_SERVICEBUS_DISTRIBUTED_TRACING", default=True)): - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_messages - for key in TRACE_CONTEXT_KEYS - ) - else: - assert not any( - key in normalize_application_properties(msg.application_properties) - for msg in received_messages - for key in TRACE_CONTEXT_KEYS - ) -""" - +@pytest.mark.snapshot(ignores=SNAPSHOT_IGNORES) +async def test_producer(ddtrace_run_python_code_in_subprocess, env_vars): env = os.environ.copy() env.update(env_vars) - out, err, status, _ = ddtrace_run_python_code_in_subprocess(code, env=env) - assert status == 0, (err.decode(), out.decode()) - assert err == b"", err.decode() - - -@pytest.mark.asyncio -@pytest.mark.snapshot -async def test_send_messages_async( - azure_servicebus_queue_sender_async: ServiceBusSenderAsync, - azure_servicebus_queue_receiver_async: ServiceBusReceiverAsync, - azure_servicebus_topic_sender_async: ServiceBusSenderAsync, - azure_servicebus_subscription_receiver_async: ServiceBusReceiverAsync, - trace_context_keys: List[str], -): - await asyncio.gather( - send_messages_to_queue_async(azure_servicebus_queue_sender_async), - send_messages_to_topic_async(azure_servicebus_topic_sender_async), - ) - - received_queue_messages, received_subscription_messages = await asyncio.gather( - azure_servicebus_queue_receiver_async.receive_messages(max_message_count=4, max_wait_time=5), - azure_servicebus_subscription_receiver_async.receive_messages(max_message_count=4, max_wait_time=5), - ) - - assert len(received_queue_messages) == 4 - assert len(received_subscription_messages) == 4 - - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_queue_messages - for key in trace_context_keys - ) - - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_subscription_messages - for key in trace_context_keys - ) + helper_path = Path(__file__).resolve().parent.joinpath("common.py") + out, err, status, _ = ddtrace_run_python_code_in_subprocess(helper_path.read_text(), env=env) -@pytest.mark.snapshot -def test_schedule_messages( - azure_servicebus_queue_sender: ServiceBusSender, - azure_servicebus_queue_receiver: ServiceBusReceiver, - azure_servicebus_topic_sender: ServiceBusSender, - azure_servicebus_subscription_receiver: ServiceBusReceiver, - trace_context_keys: List[str], -): - now = datetime.now(timezone.utc) - - for message in make_messages(): - azure_servicebus_queue_sender.schedule_messages(message, now) - - for message in make_messages(): - azure_servicebus_topic_sender.schedule_messages(message, now) - - received_queue_messages = azure_servicebus_queue_receiver.receive_messages(max_message_count=4, max_wait_time=5) - received_subscription_messages = azure_servicebus_subscription_receiver.receive_messages( - max_message_count=4, max_wait_time=5 - ) - - assert len(received_queue_messages) == 4 - assert len(received_subscription_messages) == 4 - - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_queue_messages - for key in trace_context_keys - ) - - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_subscription_messages - for key in trace_context_keys - ) - - -@pytest.mark.asyncio -@pytest.mark.snapshot -async def test_schedule_messages_async( - azure_servicebus_queue_sender_async: ServiceBusSenderAsync, - azure_servicebus_queue_receiver_async: ServiceBusReceiverAsync, - azure_servicebus_topic_sender_async: ServiceBusSenderAsync, - azure_servicebus_subscription_receiver_async: ServiceBusReceiverAsync, - trace_context_keys: List[str], -): - now = datetime.now(timezone.utc) - - await asyncio.gather( - schedule_messages_to_queue_async(azure_servicebus_queue_sender_async, now), - schedule_messages_to_topic_async(azure_servicebus_topic_sender_async, now), - ) - - received_queue_messages, received_subscription_messages = await asyncio.gather( - azure_servicebus_queue_receiver_async.receive_messages(max_message_count=4, max_wait_time=5), - azure_servicebus_subscription_receiver_async.receive_messages(max_message_count=4, max_wait_time=5), - ) - - assert len(received_queue_messages) == 4 - assert len(received_subscription_messages) == 4 - - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_queue_messages - for key in trace_context_keys - ) - - assert all( - key in normalize_application_properties(msg.application_properties) - for msg in received_subscription_messages - for key in trace_context_keys - ) + assert status == 0, (err.decode(), out.decode()) + assert err == b"", err.decode() diff --git a/tests/contrib/boto/test.py b/tests/contrib/boto/test.py index 6d1b0050d3b..91c626e6cbc 100644 --- a/tests/contrib/boto/test.py +++ b/tests/contrib/boto/test.py @@ -52,6 +52,7 @@ def test_ec2_client(self): self.assertEqual(span.get_tag(http.METHOD), "POST") self.assertEqual(span.get_tag("aws.region"), "us-west-2") self.assertEqual(span.get_tag("region"), "us-west-2") + self.assertEqual(span.get_tag("aws.partition"), "aws") self.assertEqual(span.get_tag("component"), "boto") self.assertEqual(span.get_tag("span.kind"), "client") @@ -67,6 +68,7 @@ def test_ec2_client(self): self.assertEqual(span.get_tag(http.METHOD), "POST") self.assertEqual(span.get_tag("aws.region"), "us-west-2") self.assertEqual(span.get_tag("region"), "us-west-2") + self.assertEqual(span.get_tag("aws.partition"), "aws") self.assertEqual(span.get_tag("component"), "boto") self.assertEqual(span.get_tag("span.kind"), "client") self.assertEqual(span.service, "test-boto-tracing.ec2") @@ -495,6 +497,7 @@ def test_lambda_client(self): self.assertEqual(span.get_tag(http.METHOD), "GET") self.assertEqual(span.get_tag("aws.region"), "us-east-2") self.assertEqual(span.get_tag("region"), "us-east-2") + self.assertEqual(span.get_tag("aws.partition"), "aws") self.assertEqual(span.get_tag("aws.operation"), "list_functions") self.assertEqual(span.get_tag("component"), "boto") self.assertEqual(span.get_tag("span.kind"), "client") @@ -612,6 +615,7 @@ def test_sts_client(self): assert_is_measured(span) self.assertEqual(span.get_tag("aws.region"), "us-west-2") self.assertEqual(span.get_tag("region"), "us-west-2") + self.assertEqual(span.get_tag("aws.partition"), "aws") self.assertEqual(span.get_tag("aws.operation"), "GetFederationToken") self.assertEqual(span.get_tag("component"), "boto") self.assertEqual(span.get_tag("span.kind"), "client") @@ -750,6 +754,7 @@ def test_elasticache_client(self): span = spans[0] self.assertEqual(span.get_tag("aws.region"), "us-west-2") self.assertEqual(span.get_tag("region"), "us-west-2") + self.assertEqual(span.get_tag("aws.partition"), "aws") self.assertEqual(span.get_tag("component"), "boto") self.assertEqual(span.get_tag("span.kind"), "client") self.assertEqual(span.service, "test-boto-tracing.elasticache") @@ -783,6 +788,7 @@ def test_ec2_client_ot(self): self.assertEqual(dd_span.get_tag(http.METHOD), "POST") self.assertEqual(dd_span.get_tag("aws.region"), "us-west-2") self.assertEqual(dd_span.get_tag("region"), "us-west-2") + self.assertEqual(dd_span.get_tag("aws.partition"), "aws") with ot_tracer.start_active_span("ot_span"): ec2.run_instances(21) @@ -800,6 +806,7 @@ def test_ec2_client_ot(self): self.assertEqual(dd_span.get_tag(http.METHOD), "POST") self.assertEqual(dd_span.get_tag("aws.region"), "us-west-2") self.assertEqual(dd_span.get_tag("region"), "us-west-2") + self.assertEqual(dd_span.get_tag("aws.partition"), "aws") self.assertEqual(dd_span.get_tag("component"), "boto") self.assertEqual(dd_span.get_tag("span.kind"), "client") self.assertEqual(dd_span.service, "test-boto-tracing.ec2") diff --git a/tests/contrib/botocore/test.py b/tests/contrib/botocore/test.py index b1cc4f9aedd..5270f69f021 100644 --- a/tests/contrib/botocore/test.py +++ b/tests/contrib/botocore/test.py @@ -164,6 +164,7 @@ def test_traced_client(self): assert span.get_tag("aws.agent") == "botocore" assert span.get_tag("aws.region") == "us-west-2" assert span.get_tag("region") == "us-west-2" + assert span.get_tag("aws.partition") == "aws" assert span.get_tag("aws.operation") == "DescribeInstances" assert span.get_tag("aws.requestid") == "fdcdcab1-ae5c-489e-9c33-4637c5dda355" assert span.get_tag("component") == "botocore" @@ -718,6 +719,7 @@ def _test_sqs_client(self): assert len(spans) == 1 assert span.get_tag("aws.region") == "us-east-1" assert span.get_tag("region") == "us-east-1" + assert span.get_tag("aws.partition") == "aws" assert span.get_tag("aws.operation") == "CreateQueue" assert span.get_tag("component") == "botocore" assert_is_measured(span) @@ -771,6 +773,7 @@ def test_sqs_send_message_distributed_tracing_off(self): assert len(spans) == 1 assert span.get_tag("aws.region") == "us-east-1" assert span.get_tag("region") == "us-east-1" + assert span.get_tag("aws.partition") == "aws" assert span.get_tag("aws.operation") == "SendMessage" assert span.get_tag("params.MessageBody") is None assert span.get_tag("component") == "botocore" diff --git a/tests/contrib/botocore/test_bedrock_agents_llmobs.py b/tests/contrib/botocore/test_bedrock_agents_llmobs.py index de2d4bd5439..3eb41802e9d 100644 --- a/tests/contrib/botocore/test_bedrock_agents_llmobs.py +++ b/tests/contrib/botocore/test_bedrock_agents_llmobs.py @@ -52,7 +52,7 @@ def _assert_agent_span(agent_span, resp_str): assert agent_span["meta"]["output"]["value"] == resp_str assert agent_span["meta"]["metadata"]["agent_alias_id"] == AGENT_ALIAS_ID assert agent_span["meta"]["metadata"]["agent_id"] == AGENT_ID - assert agent_span["meta"]["span.kind"] == "agent" + assert agent_span["meta"]["span"]["kind"] == "agent" assert "session_id:{}".format(SESSION_ID) in agent_span["tags"] @@ -64,26 +64,23 @@ def _assert_trace_step_spans(trace_step_spans): assert trace_step_spans[3]["name"].startswith("orchestrationTrace Step") assert trace_step_spans[4]["name"].startswith("orchestrationTrace Step") assert trace_step_spans[5]["name"].startswith("guardrailTrace Step") - assert all(span["meta"]["span.kind"] == "workflow" for span in trace_step_spans) + assert all(span["meta"]["span"]["kind"] == "workflow" for span in trace_step_spans) assert all(span["meta"]["metadata"]["bedrock_trace_id"] == span["span_id"] for span in trace_step_spans) def _assert_inner_span(span): assert span["name"] in ["guardrail", "modelInvocation", "reasoning", "location_suggestion"] - if span["name"] == "guardrail": - assert span["meta"]["span.kind"] == "task" + if span["name"] == "guardrail" or span["name"] == "reasoning": + assert span["meta"]["span"]["kind"] == "task" assert span["meta"]["output"].get("value") is not None elif span["name"] == "modelInvocation": - assert span["meta"]["span.kind"] == "llm" + assert span["meta"]["span"]["kind"] == "llm" assert span["meta"]["metadata"]["model_name"] == MODEL_NAME assert span["meta"]["metadata"]["model_provider"] == MODEL_PROVIDER assert span["metrics"].get("input_tokens") is not None assert span["metrics"].get("output_tokens") is not None - elif span["name"] == "reasoning": - assert span["meta"]["span.kind"] == "task" - assert span["meta"]["output"].get("value") is not None elif span["name"] == "location_suggestion": - assert span["meta"]["span.kind"] == "tool" + assert span["meta"]["span"]["kind"] == "tool" assert span["meta"]["output"].get("value") is not None diff --git a/tests/contrib/botocore/test_bedrock_llmobs.py b/tests/contrib/botocore/test_bedrock_llmobs.py index bef0b71d0cf..d88770452cb 100644 --- a/tests/contrib/botocore/test_bedrock_llmobs.py +++ b/tests/contrib/botocore/test_bedrock_llmobs.py @@ -721,7 +721,7 @@ def _test_llmobs_invoke_proxy( else: span = mock_tracer.pop_traces()[0][0] assert len(llmobs_events) == 1 - assert llmobs_events[0]["meta"]["span.kind"] == "llm" + assert llmobs_events[0]["meta"]["span"]["kind"] == "llm" LLMObs.disable() @@ -769,7 +769,7 @@ def _test_llmobs_invoke_stream_proxy( else: span = mock_tracer.pop_traces()[0][0] assert len(llmobs_events) == 1 - assert llmobs_events[0]["meta"]["span.kind"] == "llm" + assert llmobs_events[0]["meta"]["span"]["kind"] == "llm" LLMObs.disable() diff --git a/tests/contrib/django/test_django_appsec_snapshots.py b/tests/contrib/django/test_django_appsec_snapshots.py index 9c1d7396881..ef6600aee49 100644 --- a/tests/contrib/django/test_django_appsec_snapshots.py +++ b/tests/contrib/django/test_django_appsec_snapshots.py @@ -1,6 +1,7 @@ from contextlib import contextmanager import os import subprocess +import sys import django import pytest @@ -50,8 +51,15 @@ def daphne_client(django_asgi, additional_env=None): yield client finally: - resp = client.get_ignored("/shutdown-tracer") - assert resp.status_code == 200 + try: + resp = client.get_ignored("/shutdown-tracer") + assert resp.status_code == 200 + except ConnectionRefusedError: + # current mitigation for python 3.8 + if sys.version_info[:2] == (3, 8): + pass + else: + raise proc.terminate() diff --git a/tests/contrib/google_adk/__init__.py b/tests/contrib/google_adk/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/google_adk/cassettes/agent_error_handling.yaml b/tests/contrib/google_adk/cassettes/agent_error_handling.yaml new file mode 100644 index 00000000000..592647cb912 --- /dev/null +++ b/tests/contrib/google_adk/cassettes/agent_error_handling.yaml @@ -0,0 +1,84 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "Can you use the failing_tool to test + error handling?"}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are a test agent. When needed: (1) plan, (2) search, (3) write short Python + to compute or transform results. Be concise.\n\nYou are an agent. Your internal + name is \"test_agent\".\n\n The description about you is \"Test agent for ADK + integration testing\""}], "role": "user"}, "tools": [{"functionDeclarations": + [{"description": "A tiny search tool stub.", "name": "search_docs", "parameters": + {"properties": {"query": {"type": "STRING"}}, "type": "OBJECT"}}, {"description": + "Simple arithmetic tool.", "name": "multiply", "parameters": {"properties": + {"a": {"type": "INTEGER"}, "b": {"type": "INTEGER"}}, "type": "OBJECT"}}, {"name": + "failing_tool", "parameters": {"properties": {"query": {"type": "STRING"}}, + "type": "OBJECT"}}]}], "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '913' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + x-goog-api-client: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/21UyY6jSBC911eUfGV6vGAWjzQHNhsDZscGRqNRAgkGs++41f/elKur29UaDiky + 3suIyAfxvr68vi58kAdxAFrYLP56/WeOvL5+faxvWJG3MG9n4CM0B0tQt7+478/Xp/eZEna538ZF + zoA0/XT4B56DDM7xRQjiNM6j/9qiSBd//M4CddT8z+kZqTpYT28J5q7bxW/4t0/7b5/TLtpr0UXX + 1oijHLRd/WiDKSOOCvg2zkixCVSioPs16pw7NlhtHMTpV4RAS8eK03g+MRuFA8jlOCbFfej6QFjl + B2mMJjZTLKxQBIltVa2+mojKrVogxYDq1BVvWXI5jVkz1tPZV+M23A6Txpu+JAZGqdwkVjqyrJsQ + qoH7WF0BXLbXeUaDVCJtCUlIhdKtM5uitcCf8QE5HMzR39Webp9P+2ztqMiJvpimkdADAqWkQzwD + udgmETiuTVxwErPPJnWMp7HQDmfdsaaLm/g66fU5OQ1Mu7zKAtybWL7WTsh+Z9lu0okbhbheKx8L + Nb6c9kLQtZTXgk7SCMz23Ksdy2FSQbS8YWUSZamHGWofZ9LOtrbl1fCO3b067ESGXJ6Ng11HwSQ2 + bu6vmYgZIL7Tm1gQzHwz9lCGOH82AMunwx2KXkxXQ7lRaHzYycqyFBNFuTmkf9hal/Ci1AxaBQFJ + rFaaGJIma7pZvsUNMjiXS/ni80trPDGrerPzJt+4KjlEdbDJXe92x3zMzasM3vXpajMWLBR05LRt + Ty1TMViv10YPekEyYMrv1+L27St5FOegjiXuFJdh2JuuM+FRv2OcvAmHYFATtNPA9rZ39caTYexb + +5sLPYoGI+2ihV+yGirtskpga5RdV8ews1yJX+oBubINOh10woSE0+OrEqFVieM7ijzjjeOM3TbG + q+mAkKLSh3ujUUhUnn9YNrAv0G2WEVVlQB04Q6xaStoeWrJrA9tAIssfjyzVQUe8DLxOOGVPe7ct + eeJD8+T7QjwFtgIY+RKUzI7ncoG7NCbfOIaMBoazMe5uZCHyhGRJTeOJCYhEvPFQwJT4PDSblo0y + 2STpi0bdY48tDkLpdC4G9vYoj42CcMH9fqdSXzxG77VvIxke8l6NJDu59iuJFXjevhdkLmVTSoCd + QFUcxTd/P0/vr8n999fULuoifUxpVgQw/aD/HOtFGOdxc9UhaIr8jWaYivrTSxZxHsBxDq9ePgo8 + Ui+6BkTwBFswOx/4aTKLsi6ysjWLG8yZons432a1e8/25JSfCGv8B94WLUg/QSj5gT0lbti5bJw+ + W+iTu863BGncPtzN5GzzyRbnAp/7+tDi5UmyD4P7vcf1yw/R3nU8w7qJ3wWLYDZL+GXzJ/Zl7vJR + b1HDpizyBh6DN8bOIQQgG8Whwun16aamUt822m3x8u3lO5sU4QhDBgAA + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 16:23:17 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=5033 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/google_adk/cassettes/agent_math_and_code.yaml b/tests/contrib/google_adk/cassettes/agent_math_and_code.yaml new file mode 100644 index 00000000000..6f7bf81226a --- /dev/null +++ b/tests/contrib/google_adk/cassettes/agent_math_and_code.yaml @@ -0,0 +1,153 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "Please use the multiply tool to calculate + 37 times 29."}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are a helpful test agent. You can: (1) call tools using the provided functions, + (2) execute Python code blocks when they are provided to you. When you see ```python + code blocks, execute them using your code execution capability. Always be helpful + and use your available capabilities.\n\nYou are an agent. Your internal name + is \"test_agent\".\n\n The description about you is \"Test agent for ADK integration + testing\""}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": + "A tiny search tool stub.", "name": "search_docs", "parameters": {"properties": + {"query": {"type": "STRING"}}, "required": ["query"], "type": "OBJECT"}}, {"description": + "Simple arithmetic tool.", "name": "multiply", "parameters": {"properties": + {"a": {"type": "INTEGER"}, "b": {"type": "INTEGER"}}, "required": ["a", "b"], + "type": "OBJECT"}}]}], "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1020' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.14.1 gl-python/3.12.11 + x-goog-api-client: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.14.1 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/21T23KiShR99yssXp05IF7QeUMEBaOCoFxOnTrVSjd0uDSBBtRU/n3QxEQzwwMF + a63de/eutV5b7TZzAKmPfUBhwfxq/9sg7fbr9X3hSEphShviBjVgBnL6pX1/Xu++Gwkq0wPFJJVA + HD8Uf/ApSGCDM0kZU5zFJ+bHdwXIg+IvlQ2zb2B+/L3gUtIQPeEb/vbw//ZYxtCQlEFITRykgJb5 + dSapkiXRn1OcjHIPsnMelEPTWW1Ws0AZhiqHcUAc+rTT+DpRRMkQHeU4OAhbL8jZyUFLvNzCYn+H + dMvKTqCH1BGRRmRcTzpqHOjGMBiz9YTlD0q/KsQt8EaDyvCqRZ7vbIVMBLazOKv+bmURgocr5WzM + KVJXI19xc64fzrpxsh4vIup2hhFyp2eucvi4Ohkyh5ZceUYgfiFoFvYVO0QYWa4AQQh1SRv4M1V5 + coWRqzkjrRat0I2PY62WeqAT9J9yO7Pt+dPalIeyaMimTMu9MJNCUTlyRA1CjtUqZ4pdeb9f4z2R + nJ20nnbVs81KhmYfujQyX2x5hVms8mCpTRMh6q25SRXs/HMy9iQR1OZi0QEQCuys0zkgg1fXVN84 + 1XBa4nH8/FyDsYm1iK2JqZMgZk1vU+uDOl9kKCsiUAmVI8zh2gmf980M+w6fkDlaUXVuIh8Bz+b7 + ShdFXJRsdL1a6YE1PJ68TFgOsDpwhyGfxluBDaXl+sU8golpTF8KLt9YvQPT+ptb/vtyCpOT+N2t + xIfxTf5pJQbhFBfhBoKCpBeZaa31T0MzOPXhsYG51q3B9WimLEAAl5CCJnrg0+lMlpMkoxaJYCqR + 8hq97ujD7XdRfRDw3AdPCQXxA9Xjuj/+OLiYNm1xfJ/hu3g3twQxpqfLVSzZse6y2TR4nOu2i9bd + ym6hepxxzLc+dva+xh3MC/y+rwAmzQZ/8v8MfjZDXtsxOSwykhZQ9S+K7ULXgCoi0p9uT8b/7hHN + 3InBtN5avwFBrCfJwgQAAA== + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 17:50:08 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=2284 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "Please use the multiply tool to calculate + 37 times 29."}], "role": "user"}, {"parts": [{"thoughtSignature": "CvECAdHtim8rZe_H2au6SXNRNGgF6hI0iigoXtLVJ2wmFACQAXFx5c7UZgr_BcJmZrTiA4VfPTTpya3fI8oC8o9wB-IlgPQ6g9_wB_2cF4vsAUaZ85vQZvKrrVWFoB7_-KzIdVNTooi6NFzQHtfIN8dFYr04hG1lmO9KktY-6kfYDz0vX2lvyQE0fM0uzfalqofGh4FWhfifTY7eahePCJ5dGIFLY78YJX8JwAThYlx9JwC3a-g4LrWpWWHLOSE6EAQESEtub7GChAFx0oIgh0_JvXDiYEbbOiboCXVCOD1IzW_CQJWc1tkSqWENi_iI2aMJDm7k3O0BvgVdzm9ZCAawSKK-aee7_G--cfQ2IOtPRXv6Dui9ljjwa9SiJk_woSPogl_SZRwP5wrKpfpskav7vX7HeOXhjbbbOb-2moHfNtIHSfdfaZW24F1fk0kmRPPvNPgT6xyZp7M5iI5Y6h2nlU7_hCMOqSxaBSQDqs0rRT3c", + "functionCall": {"args": {"b": 29, "a": 37}, "name": "multiply"}}], "role": + "model"}, {"parts": [{"functionResponse": {"name": "multiply", "response": {"product": + 1073}}}], "role": "user"}], "systemInstruction": {"parts": [{"text": "You are + a helpful test agent. You can: (1) call tools using the provided functions, + (2) execute Python code blocks when they are provided to you. When you see ```python + code blocks, execute them using your code execution capability. Always be helpful + and use your available capabilities.\n\nYou are an agent. Your internal name + is \"test_agent\".\n\n The description about you is \"Test agent for ADK integration + testing\""}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": + "A tiny search tool stub.", "name": "search_docs", "parameters": {"properties": + {"query": {"type": "STRING"}}, "required": ["query"], "type": "OBJECT"}}, {"description": + "Simple arithmetic tool.", "name": "multiply", "parameters": {"properties": + {"a": {"type": "INTEGER"}, "b": {"type": "INTEGER"}}, "required": ["a", "b"], + "type": "OBJECT"}}]}], "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1742' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.14.1 gl-python/3.12.11 + x-goog-api-client: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.14.1 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/2WQQU+EMBCF7/yKZs4LWVhx1at6UNwsKjEmZg+NjGuz0BI6JBjCf7eUhe1qD20z + 8/qm7+s8xuCTy1zknFDDDfswFcY6uw89JQklmcZUMsWK13TSjqtz7kZC2A6PYJsEbLVmJErULLpm + QrNwuV4F4Mj7+b5bnIbUqsDBoVQ5FpO8nwTwJaTQ3y/ItZKD7DXbpjB3hcyxNeWlNw2w1tBovscN + Ejdx+RwKqlqVFWXqgPJWNTZuFMWjm4PnTBBeHvukiBfnby/CxT9jfWfGisLl5iA1KXkh6GeIkt2/ + Z+CQoD//mljYc+cdqYyg3rDWYiSyx9Iw8qMg9s03rCHUqCslNT7kVpGkjzxN0iceteHmkF7FTeY/ + L8HrvV9/eFFZGQIAAA== + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 17:50:56 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=2299 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/google_adk/cassettes/agent_run_async.yaml b/tests/contrib/google_adk/cassettes/agent_run_async.yaml new file mode 100644 index 00000000000..0c2f3bd7c56 --- /dev/null +++ b/tests/contrib/google_adk/cassettes/agent_run_async.yaml @@ -0,0 +1,158 @@ +interactions: + - request: + body: + '{"contents": [{"parts": [{"text": "First, call the search_docs tool with + query ''test''. Then call multiply with a=5 and b=3. Finally, execute Python + code to print the result."}], "role": "user"}], "systemInstruction": {"parts": + [{"text": "You are a test agent. When needed: (1) plan, (2) search, (3) write + short Python to compute results. Be concise.\n\nYou are an agent. Your internal + name is \"test_agent\".\n\n The description about you is \"Test agent for ADK + integration testing\""}], "role": "user"}, "tools": [{"functionDeclarations": + [{"description": "A tiny search tool stub.", "name": "search_docs", "parameters": + {"properties": {"query": {"type": "STRING"}}, "type": "OBJECT"}}, {"description": + "Simple arithmetic tool.", "name": "multiply", "parameters": {"properties": + {"a": {"type": "INTEGER"}, "b": {"type": "INTEGER"}}, "type": "OBJECT"}}]}], + "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '881' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/51V246jRhB9n68Y+ZVssDEYiLQPgG0uBnO/mCiKMLQxNnRjaIzxav89zMzOrme1 + ipTwgJpzqqpPFVKdL0/Pz5M0gVmRJRi0kz+e/xyR5+cvr+8XDkEMIB6Jd2gE66TBP2Lfni8P5zHk + 0MEUFwhKSVl+SP7Gw6QCIz5pQdKkx78zlLaT334OSpq8/UXyyFw60Awv+aNoPPmJ//rh++vHshN8 + RF1+xG6RwwR3zasK6aquhUzBRcVtF1Jy9+uNgpkwbDmq6LhqyxrWQT97a3UlXRk/tWiDqk1t50+d + TOvmSFgnWocIMqbleNYYHLu6t/R0VpYDVqVrXmW2n0FbDy8w8LJpRnJWn4k+rHXdtNEKmuWZECgI + 5etqEd08ljLKFXEl6Y7uC4eS4o0AbcKX2KIpYeQHFNzudEJWVT08tap8PyYr7HNMMN9DdUuVAm2r + OHb4+MZnYtdX97uw0rX7DnS9MM0r4iCURs4zhY92p2Cv7YC8sWlPoJbVjb0soyYcgCZUcmzN2dlg + +bjH5RnBSDht4ws7m+WeWwxMvWaqRqmnxMGNyBwZvHuVy2J7YqChVMC5KIOq9tdGwNlZIuMIgAwP + hXapuY46REKQL7CqlkpWdlUVFvV0WTJ4SjTB8tgTgdHAoadNg70ond4sStoOGkmUXUiyd12aA7vl + tSqx0W5mmgbPhVt/Y4oLXd5tlgpHmBcyINZTWV9G+4vhWxXPpIYXJML0KFeEJVHGzEpRuM4OUtTa + Kz1QbG4ph/pVkveaCnoMVs1wEq0kLxVyyQjzuR7Qi3tbyY4b3q7y5dzkAkn1qSgZEdi6pdLYB9Gx + ZDtdI01wRcer73ZEyxStDfx2oyB9t+rj9TIXmXtPntxKxdHiyskaWSpp53j0QTFPZmp5c0RaaXqb + h2IYJ9MQKUXpjZIgs0ADYAUu79cJNM5CvD1wWyoLW4JOFxHTW0pnGfJ0htv1NRHiRFuhPQgad348 + rz3ScyMfXDh6hxnpHoe8Gjh8pXq7mKeBI2r0kUXp7OC69al3CM06X5mm9Y+0SeJjnQmz9EBqtaNe + nA3e7Mlhe3RmJKFyooFCcoZ6Mu1K9ezmUGdYsxt6kKMMegWRc1RmrplU3STrK1Dkkx4LgokWXc9t + eUqZpmvR9W6k7ucmGVlFDb0jaSksJNZHzNqhtK9PN6ExhMFI01YnbCfgMq3MXEEq5413DabCyUN1 + KlbdnI1dl/e7U0VmzJXfllJV8EZiqcSevonhaTDITa2MOSwX0ieC1S0hKL3+8+fHrfFhR/zPTVZ1 + JS7qcvgva2w/wvOf418yRpz596329KvzXz9qTRpUvulCGSjfm/3e6ORQwKI9OiBpEXwJcz3T+i59 + UsAM3EZ4+vR+wWvpSdcmOTAATka7SL73NKkbVNXYQ2cAJdS92sWMp9+qPdjLh4D5t8YnGOGk/EDR + 9Dv3ULhdjtcW5aPvPFjS2GVSFvjVE7xV5D38hfGCj7reZ/H0MLJ3W/iokZotnr4N7W2OAWja4m1g + OajGEX6ifmc+jSpf75s0oK0RbIGavUQsOF1NzGIv0tUwM84W10L2k3CePH19+gcd9w4feAcAAA== + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 01:52:43 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=4674 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK + - request: + body: + '{"contents": [{"parts": [{"text": "First, call the search_docs tool with + query ''test''. Then call multiply with a=5 and b=3. Finally, execute Python + code to print the result."}], "role": "user"}, {"parts": [{"thoughtSignature": + "CvIFAdHtim8N6CazUpKHt5WWs82iu8mN7MPfLkTFIECv5UcP4M2pOJYU0RdJu3oAFaJuo-_Z4GZ1rM87Ezs401llytICvgmdQUdnQLWqnVTd0d_8PwdBUnpLLOQoEnOlk-A2nnGvE6XxT72MlE-v_4u4wiR2CZKAnQ-UC7irlnXUV2nNYL-GIILWjsIGzhaEtU85V3bnIN2lA4QItZR9Zx9dBuwmzzAELJzYeuwA0gm-fAlMg95iUoYjVbJYeGKQ4TA2Dmx7qDXrWyeJAmGZP371yPUtwtlkonXAjNZq711gTSiy5pF5mrHp0-fSX_goM9SvGliNj5nMHmeRqHyIIwvrAtdkC_ZXeedtyiJqp8u2fXAVg6tIIlHdlummWip0Dl5t0-rVDhw-VMrnyw4OM7qHuLr6l4QVrCBGSn_7zLC3eQs9JmaQoY1OOM98WNUKOB6LGYKDH8-Oq_V-F0GLDXbqMUPm95cMTVaA0hGm-PC2M1PcoWFdfCXsQELVHQ8DGWLvCGbJIewteEryjBPaglH_D5A33LV46zsmGRSWxvGqkrgA_2wcBCMXeNSlHrQfBRPGQcFoJASBRTpzQX4G24Jy9NKHoLYEwZFDgB5zw_jSmItX6v8GJ_lHcuRT4fHOjOcPT3o_Pccx3WBWZa0WoHilTbJIn56oye7A8gwFanMkAZNf8N2dWs-4c6X5wPHuPMG01tsFvaAZaJEobeVrS3hkFT_TSXUeq84Yt5CzZW9IVR9mITYZ94eRBJ4h7oc1fSSpjwR-JPkv5rsUh4O_thpdA1cf_JpRIqRKtKb_yNhR1_-I8BMoW_1ow_culIkSgnL57OuywegodnTi-g82dOF5cIKaFveHGjLZAAOo6uw8N92H0cFBSTx_LUgO_XPipnTh_PH7n-Fht7QWCbpjxArMAyMccsL-QRV8dJldSACl3rTvV0AjTopcBmu37ZSS9Uujm_d5v9NlCmi9MaPI-b4xBWjyM_KpHrTv78W4j-7LPAVlTw==", + "functionCall": {"args": {"query": "test"}, "name": "search_docs"}}, {"functionCall": + {"args": {"b": 3, "a": 5}, "name": "multiply"}}], "role": "model"}, {"parts": + [{"functionResponse": {"name": "search_docs", "response": {"results": ["Found + reference for: test"]}}}, {"functionResponse": {"name": "multiply", "response": + {"product": 15}}}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are a test agent. When needed: (1) plan, (2) search, (3) write short Python + to compute results. Be concise.\n\nYou are an agent. Your internal name is \"test_agent\".\n\n + The description about you is \"Test agent for ADK integration testing\""}], + "role": "user"}, "tools": [{"functionDeclarations": [{"description": "A tiny + search tool stub.", "name": "search_docs", "parameters": {"properties": {"query": + {"type": "STRING"}}, "type": "OBJECT"}}, {"description": "Simple arithmetic + tool.", "name": "multiply", "parameters": {"properties": {"a": {"type": "INTEGER"}, + "b": {"type": "INTEGER"}}, "type": "OBJECT"}}]}], "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '2288' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/3VRwWrCQBC95yvCnlrQ0gipbW9Fe5BWKjUUQaRMk1EXN7tpdgSD+O8dN4kmtt1D + djPz5s2bN3vP90UMOpEJEFrx6M854vt79z3mjCbUxIk6xMEMcjpjy7NvvBmy3OqYpNEDUKpVXOU1 + pMhxgTuMP7OC1kaLziUI8pX9o9jJSlx5lktNV0F4LS4wB++/v/N7cW4ocqMcYcrEqiY71ACxlFra + 9TuCZZ0Mm0Zvk5NeIXWCOw7fenUDRy22FlY4RgI2F06DsGiTZhSZDeqB2Tpze+FDydZYRgsQ3Fd5 + MgSqXdvvd34R2yG3laq5pcYCeUpQkorjKNHzLGpYzw3aumov3L3wKldKoz4wt7J0ZIUpe9Tt3YRd + luEIRY42M9riKDki+vHrCF7kbPg1pWC8mXzfRUXxZIR38H4AtuT8QocCAAA= + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 01:52:45 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=2046 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/google_adk/cassettes/agent_tool_usage.yaml b/tests/contrib/google_adk/cassettes/agent_tool_usage.yaml new file mode 100644 index 00000000000..bba5a78d60e --- /dev/null +++ b/tests/contrib/google_adk/cassettes/agent_tool_usage.yaml @@ -0,0 +1,154 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "Can you search for information about + recurring revenue?"}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are a test agent. When needed: (1) plan, (2) search, (3) write short Python + to compute or transform results. Be concise.\n\nYou are an agent. Your internal + name is \"test_agent\".\n\n The description about you is \"Test agent for ADK + integration testing\""}], "role": "user"}, "tools": [{"functionDeclarations": + [{"description": "A tiny search tool stub.", "name": "search_docs", "parameters": + {"properties": {"query": {"type": "STRING"}}, "type": "OBJECT"}}, {"description": + "Simple arithmetic tool.", "name": "multiply", "parameters": {"properties": + {"a": {"type": "INTEGER"}, "b": {"type": "INTEGER"}}, "type": "OBJECT"}}]}], + "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '811' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + x-goog-api-client: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/21UXY+qSBR891cYX713FRkFN7kPCqiMIB8KApvNpoEWUGigm0ZxMv99GedL7y4P + HThVp8+hkqqXTrfbCwAKkxBUkPT+7P7VVrrdl9v5huWogqhqgc9SWywArr6578/L3XtLOVAUVEmO + BJCmD80fOAIZbOs9AgEO4n/CPCC9H7+TAI7I/zS3SEkhbt76MQwoxgmKuhjWEFHY+438+vD9+jij + V8U5jeJqm0QIVBTfVhKwJc3CVZVkU7suzrakqdJWCmaSSebAaI6l52sepLnEzSVTKxXOT8dp0GfG + arx/Yhc7JVXmT36RNcRk9FPDyKpgKdgpTuLAJ3MXsQdr00xK6I73EQNUZWLVT84xtx0Dsbyu0CCR + 8TLhC5kntSsqZwPH0+FI3kG9HBtByIkHQ1dRJSRMn3K4z8crpxS082S050m756GPyF5djuQUr+a6 + AqxIFw5clF8aBT17h8TSOX9o+bP5hQUlqZT4chSPGj9QF1iv6mBQeQvPysGAYQsdWW7KJOUlwaUq + GtvoKc3c1Eu87ZTy4NyYJp14A3M3Nqe6pFFpIs1NdhNZ9tVhfEcPyZDFts1fa5lXCv4s2Nw1pBDK + e0WfJlZ/wlsgkPFCwcMLpBvDOdhZM2/wWeqvNW1S8gYo9qeAkmezVMaU1nm+rkvpGEeQE2ezCWPv + XQe7dNtfVkhBepYMJf9sD+VAas/jYo3YWajmQz+0lrsKK9Ntv5K2QgiKk1EWA63fAGU8EBROSkcl + pdMTAJQsRUZu6mvWZ5uDuJCP89hRtoNt5l+uGlcQcmTP/fW85Iu1WgA7nrOJZ7h7JlTdgGyyOi52 + +QVpgwsJfQdT4k5OcbynZ5drNtxRTgTv2UGsqDVqnLCjmVjXq+v5KF4H61hxabThz35G/Dy4VmSB + ntmozGXb2xwULYrUVAnE5VUEmiaWp2FwGXk506wM2zCxaXgmMx+OVgvbYGomOvlEHdOY8InTP66n + 2dORFfBKmk63rdS15AqKtiAXyDPZwCj3TV0Zv37dO+fbNX9/O6aH8/TmkCwPYfpJ/7JU75CghMQm + BCRHb7TtTtO/TN1LUAgvbXnY+Rxwu7pHCYigCivQJhD4cnuvwHlWVLv8BJGQ01sCMdzw/ba7xHok + TD7wKq9A+gCxDP/jPxcTsR2bpPdRdpdy7V+CNKluMbOTnN1dPrUDHvf61KJzJ9lnuPy2IzvqfIj2 + rqMNMUneBYtg1kr4c/TH+Ge75W1eG3CkyBGBcvjGGMvcM1AP41VQRox60odFto+MqNd57fwLFTcD + cMsFAAA= + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 16:23:00 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=2859 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "Can you search for information about + recurring revenue?"}], "role": "user"}, {"parts": [{"thoughtSignature": "CrUEAdHtim9VvpwVEOMESEcAERsBaQyjqZbOZeuoE7BEROqL7bl5lc-15MhW43FTLlLB4bpmysR1Pky1IMCULrXpkD_bsBYn3fUNy6qeY5Wg1aML6Uv4XjoVXQn38PLuciIrGi8pI8svYDLwQrh902ITePq5Qcd7DfQPMntCi1-u7r-8hHXqCOw62W8sSEcf-nsWMG2IlrHBPLaUgPCf7goxyLnJZfiUP7b0UbABx3aqstLhxjDjO8_MFrPtvc_tZFZUoa_13pPnUYl1iqxirqMDQSg4lmYlZiZS9u8awyRRu6Z_RT5R9PEOuE6EBR3NgUVzX1bXPds03rVV8zvI8Lp8wCV7zdueeIWLP9iU-68UacIrFLr0xeuNQXfVmyByrwE-KOO6q8QapWkcusJRqL5uuvooKvqEjhge7DAA61VWYXrYuS-GtnLnPmi0EbwV0IcEwV0jFKn3AdMo0bdUGTtrL9S-tESCdapkQqp_O-yaL5_CL7El2quu9kaausGD1Iyvzm-3yfDFIjBhXLS_SmbxzO7pssj3w-KBq8pKMpaVhB3iZQYW1dMYcsNmvhpToxnO_xsdbXrusY6khhWuwY7yN7jIiCZJXn3DOyMhi32ADvvHzwjDz_KhLYugN8wbmsbocztsFnJ3gqoIVZNfLOggMlLcDGzDaOODqk0cx2Zo1yHQVQRrRQZR1B02HFVQ1v1gkbsM5uhs8iX-jK9m4j3CrHE99S0xevEYCLOFsxe81m_QqWyvtQ==", + "functionCall": {"args": {"query": "recurring revenue"}, "name": "search_docs"}}], + "role": "model"}, {"parts": [{"functionResponse": {"name": "search_docs", "response": + {"results": ["Found reference for: recurring revenue"]}}}], "role": "user"}], + "systemInstruction": {"parts": [{"text": "You are a test agent. When needed: + (1) plan, (2) search, (3) write short Python to compute or transform results. + Be concise.\n\nYou are an agent. Your internal name is \"test_agent\".\n\n The + description about you is \"Test agent for ADK integration testing\""}], "role": + "user"}, "tools": [{"functionDeclarations": [{"description": "A tiny search + tool stub.", "name": "search_docs", "parameters": {"properties": {"query": {"type": + "STRING"}}, "type": "OBJECT"}}, {"description": "Simple arithmetic tool.", "name": + "multiply", "parameters": {"properties": {"a": {"type": "INTEGER"}, "b": {"type": + "INTEGER"}}, "type": "OBJECT"}}]}], "generationConfig": {}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1853' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + x-goog-api-client: + - google-genai-sdk/1.36.0 gl-python/3.12.11 google-adk/1.0.0 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/12Ry27CMBBF9/mKkdcFUSSE1E1VtV3QiodK1FYqLAyeBCvGE9mTAkL8e52EQIoX + tjX3eh7HxwhArKVVWklGLx7gJ0QAjtVeamQZLQehCYVgLh1fvfU6tu7BwrgvH4npexdGkFBhFUhw + mKBDu8YQcbAQDteFc9qmQflFW+BCdOFrIxl2VBgFByrA6AyBCTJLO5ArKhg0P8LCilbF0+W+vLv2 + 6chg2cSWFJrGfmoMItFW+80HSk+2tM3j6UxcVG0V7kO4FzUFqtSi8DLFMbIMxOSFi8gdbXOOKUP7 + HKYth+/3hnW2FuEbw1lnYmn+S/3mbSuxfwlltWmjb/1KmFIazYdylPj1OxYtEnzTV8OiOpfRmUoN + 6hOd1zWRFLeBUaffHXRCG1XC8Gc+J+txpErHgIZvcrKaTXju78fZLFXJuPPUE9Ep+gPnn1oDXAIA + AA== + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Tue, 16 Sep 2025 16:23:02 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=2370 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/google_adk/conftest.py b/tests/contrib/google_adk/conftest.py new file mode 100644 index 00000000000..aa93d643e3a --- /dev/null +++ b/tests/contrib/google_adk/conftest.py @@ -0,0 +1,162 @@ +import os +from typing import Any +from unittest.mock import MagicMock + +from google.adk.agents.invocation_context import InvocationContext +from google.adk.agents.llm_agent import LlmAgent +from google.adk.code_executors import UnsafeLocalCodeExecutor +from google.adk.models.google_llm import Gemini +from google.adk.runners import InMemoryRunner +from google.adk.sessions.base_session_service import BaseSessionService +from google.adk.sessions.session import Session +from google.adk.tools.function_tool import FunctionTool +from google.genai import types +import pytest + +from ddtrace._trace.pin import Pin +from ddtrace.contrib.internal.google_adk.patch import patch as adk_patch +from ddtrace.contrib.internal.google_adk.patch import unpatch as adk_unpatch +from ddtrace.llmobs import LLMObs +from tests.contrib.google_adk.utils import get_request_vcr +from tests.llmobs._utils import TestLLMObsSpanWriter +from tests.utils import DummyTracer +from tests.utils import DummyWriter +from tests.utils import override_global_config + + +@pytest.fixture +def ddtrace_global_config(): + return {} + + +@pytest.fixture +def adk(ddtrace_global_config): + # Set dummy API key for VCR mode if no real API key is present + if not os.environ.get("GOOGLE_API_KEY"): + os.environ["GOOGLE_API_KEY"] = "dummy-api-key-for-vcr" + + # Location/project may be required for client init. + os.environ.setdefault("GOOGLE_CLOUD_LOCATION", os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1")) + os.environ.setdefault("GOOGLE_CLOUD_PROJECT", os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project")) + + with override_global_config(ddtrace_global_config): + adk_patch() + import google.adk as adk + + yield adk + adk_unpatch() + + +@pytest.fixture +def mock_tracer(adk): + pin = Pin.get_from(adk) + mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) + if pin is not None: + pin._override(adk, tracer=mock_tracer) + yield mock_tracer + + +@pytest.fixture(scope="session") +def request_vcr(): + yield get_request_vcr() + + +@pytest.fixture +async def test_runner(adk, mock_tracer): + """Set up a test runner with agent.""" + runner = await setup_test_agent() + return runner + + +@pytest.fixture +def mock_invocation_context(test_runner) -> InvocationContext: + """Provides a mock InvocationContext.""" + mock_session = MagicMock(spec=Session) + mock_session_service = MagicMock(spec=BaseSessionService) + return InvocationContext( + invocation_id="test_invocation", + agent=test_runner.agent, + session=mock_session, + session_service=mock_session_service, + ) + + +@pytest.fixture +def llmobs_span_writer(): + yield TestLLMObsSpanWriter(1.0, 5.0, is_agentless=True, _site="datad0g.com", _api_key="") + + +@pytest.fixture +def adk_llmobs(mock_tracer, llmobs_span_writer): + LLMObs.disable() + with override_global_config( + { + "_dd_api_key": "", + "_llmobs_ml_app": "", + "service": "tests.contrib.google_adk", + } + ): + LLMObs.enable(_tracer=mock_tracer, integrations_enabled=False) + LLMObs._instance._llmobs_span_writer = llmobs_span_writer + yield LLMObs + LLMObs.disable() + + +@pytest.fixture +def llmobs_events(adk_llmobs, llmobs_span_writer): + return llmobs_span_writer.events + + +def search_docs(query: str) -> dict[str, Any]: + """A tiny search tool stub.""" + return {"results": [f"Found reference for: {query}"]} + + +def multiply(a: int, b: int) -> dict[str, Any]: + """Simple arithmetic tool.""" + return {"product": a * b} + + +async def setup_test_agent(): + """Set up a test agent with tools and code executor.""" + model = Gemini(model="gemini-2.5-pro") + + # Wrap Python callables as tools the agent can invoke + tools = [ + FunctionTool(func=search_docs), + FunctionTool(func=multiply), + ] + + # Enable code execution so the model can emit code blocks that get executed + code_executor = UnsafeLocalCodeExecutor() + + agent = LlmAgent( + name="test_agent", + description="Test agent for ADK integration testing", + model=model, + tools=tools, # type: ignore[arg-type] + code_executor=code_executor, + instruction=( + "You are a helpful test agent. You can: (1) call tools using the provided functions, " + "(2) execute Python code blocks when they are provided to you. " + "When you see ```python code blocks, execute them using your code execution capability. " + "Always be helpful and use your available capabilities." + ), + ) + + runner = InMemoryRunner(agent=agent, app_name="TestADKApp") + await runner.session_service.create_session( + app_name=runner.app_name, + user_id="test-user", + session_id="test-session", + ) + + return runner + + +def create_test_message(text: str) -> types.Content: + """Create a test message content.""" + return types.Content( + role="user", + parts=[types.Part(text=text)], + ) diff --git a/tests/contrib/google_adk/test_google_adk.py b/tests/contrib/google_adk/test_google_adk.py new file mode 100644 index 00000000000..9cadc4aa87a --- /dev/null +++ b/tests/contrib/google_adk/test_google_adk.py @@ -0,0 +1,245 @@ +from google.adk.code_executors.code_execution_utils import CodeExecutionInput +from google.adk.code_executors.unsafe_local_code_executor import UnsafeLocalCodeExecutor +import pytest + +from tests.contrib.google_adk.conftest import create_test_message + + +@pytest.mark.asyncio +async def test_agent_run_async(test_runner, mock_tracer, request_vcr): + """Test agent run async creates proper spans with basic APM tags.""" + message = create_test_message("Say hello") + + with request_vcr.use_cassette("agent_run_async.yaml"): + output = "" + try: + async for event in test_runner.run_async( + user_id="test-user", + session_id="test-session", + new_message=message, + ): + for part in event.content.parts: + if hasattr(part, "function_response") and part.function_response is not None: + response = part.function_response.response + if "results" in response: + output += response["results"][0] + else: + output += str(response) + output += "\n" + except (TypeError, ValueError): + # we're getting a TypeError for telemetry issues from the google adk library that + # is most likely due to the vcr cassette. we can ignore it. + # we are also getting a ValueError for the code executor when it tries to execute the code, saying + # "Function exec_python is not found in the tools_dict", this must be a bug in the google adk library. + # only happens on latest version of google adk library. + pass + + assert output == "Found reference for: test\n{'product': 15}\n" + + traces = mock_tracer.pop_traces() + spans = [s for t in traces for s in t] + + runner_spans = [s for s in spans if "InMemoryRunner.run_async" in s.resource] + assert len(runner_spans) >= 1, f"Expected InMemoryRunner.run_async span, got spans: {[s.resource for s in spans]}" + + span = runner_spans[0] + assert span.name == "google_adk.request" + assert span.get_tag("component") == "google_adk" + assert span.get_tag("google_adk.request.provider") == "google" + assert span.get_tag("google_adk.request.model") == "gemini-2.5-pro" + + +@pytest.mark.asyncio +async def test_agent_with_tool_usage(test_runner, mock_tracer, request_vcr): + """Test E2E agent run that triggers tool usage.""" + message = create_test_message("Can you search for information about recurring revenue?") + + with request_vcr.use_cassette("agent_tool_usage.yaml"): + try: + output = "" + async for event in test_runner.run_async( + user_id="test-user", + session_id="test-session", + new_message=message, + ): + for part in event.content.parts: + if hasattr(part, "function_response") and part.function_response is not None: + response = part.function_response.response + if "results" in response: + output += response["results"][0] + else: + output += str(response) + output += "\n" + except TypeError: + # we're getting a TypeError for telemetry issues from the google adk library that + # is most likely due to the vcr cassette. we can ignore it. + pass + + assert output == "Found reference for: recurring revenue\n" + + traces = mock_tracer.pop_traces() + spans = [s for t in traces for s in t] + + runner_spans = [s for s in spans if "Runner.run_async" in s.resource] + assert len(runner_spans) >= 1, f"Expected Runner.run_async spans, got spans: {[s.resource for s in spans]}" + + tool_spans = [s for s in spans if "FunctionTool.__call_tool_async" in s.resource] + assert ( + len(tool_spans) >= 1 + ), f"Expected FunctionTool.__call_tool_async spans, got spans: {[s.resource for s in spans]}" + + runner_span = runner_spans[0] + assert runner_span.name == "google_adk.request" + assert runner_span.get_tag("component") == "google_adk" + assert runner_span.get_tag("google_adk.request.provider") == "google" + assert runner_span.get_tag("google_adk.request.model") == "gemini-2.5-pro" + + tool_span = tool_spans[0] + assert tool_span.name == "google_adk.request" + assert tool_span.get_tag("component") == "google_adk" + + +@pytest.mark.asyncio +async def test_agent_with_tool_calculation(test_runner, mock_tracer, request_vcr): + """Test E2E agent run that triggers tool usage for calculations.""" + message = create_test_message("Please use the multiply tool to calculate 37 times 29.") + + with request_vcr.use_cassette("agent_math_and_code.yaml"): + try: + output = "" + async for event in test_runner.run_async( + user_id="test-user", + session_id="test-session", + new_message=message, + ): + if event.content is None: + # Skip events with no content (e.g., malformed function calls) + continue + + for part in event.content.parts: + # Capture all text output + if hasattr(part, "text") and part.text: + output += part.text + "\n" + + # Capture function responses + if hasattr(part, "function_response") and part.function_response is not None: + response = part.function_response.response + if "results" in response: + output += response["results"][0] + "\n" + elif "product" in response: + output += str(response["product"]) + "\n" + else: + output += str(response) + "\n" + + except Exception: + # we're getting a TypeError for telemetry issues from the google adk library that + # is most likely due to the vcr cassette. we can ignore it. + pass + + # Check for tool calculation result + assert output.strip() != "", f"Expected some output but got: '{output}'" + assert "1073" in output or "product" in output.lower(), f"Expected multiply tool result (1073) but got: '{output}'" + + traces = mock_tracer.pop_traces() + spans = [s for t in traces for s in t] + + runner_spans = [s for s in spans if "Runner.run_async" in s.resource] + assert len(runner_spans) >= 1, f"Expected Runner.run_async spans, got spans: {[s.resource for s in spans]}" + + tool_spans = [s for s in spans if "FunctionTool.__call_tool_async" in s.resource] + assert ( + len(tool_spans) >= 1 + ), f"Expected FunctionTool.__call_tool_async spans, got spans: {[s.resource for s in spans]}" + + runner_span = runner_spans[0] + assert runner_span.name == "google_adk.request" + assert runner_span.get_tag("component") == "google_adk" + assert runner_span.get_tag("google_adk.request.provider") == "google" + assert runner_span.get_tag("google_adk.request.model") == "gemini-2.5-pro" + + tool_span = tool_spans[0] + assert tool_span.name == "google_adk.request" + assert tool_span.get_tag("component") == "google_adk" + + +def test_execute_code_creates_span(mock_invocation_context, mock_tracer): + """Test that a span is created when code is executed.""" + executor = UnsafeLocalCodeExecutor() + code_input = CodeExecutionInput(code='print("hello world")') + executor.execute_code(mock_invocation_context, code_input) + + traces = mock_tracer.pop_traces() + spans = [s for t in traces for s in t] + assert len(spans) == 1 + span = spans[0] + assert span.name == "google_adk.request" + assert span.resource == "UnsafeLocalCodeExecutor.execute_code" + assert span.get_tag("component") == "google_adk" + assert span.get_tag("google_adk.request.provider") == "google" + assert span.get_tag("google_adk.request.model") == "gemini-2.5-pro" + assert span.error == 0 + + +def test_execute_code_with_error_creates_span(mock_invocation_context, mock_tracer): + """Test that a span is created with error tags when code execution fails.""" + executor = UnsafeLocalCodeExecutor() + code_input = CodeExecutionInput(code='raise ValueError("Test error")') + executor.execute_code(mock_invocation_context, code_input) + + traces = mock_tracer.pop_traces() + spans = [s for t in traces for s in t] + assert len(spans) == 1 + span = spans[0] + assert span.name == "google_adk.request" + assert span.resource == "UnsafeLocalCodeExecutor.execute_code" + assert span.get_tag("component") == "google_adk" + assert span.get_tag("google_adk.request.provider") == "google" + assert span.get_tag("google_adk.request.model") == "gemini-2.5-pro" + # we don't set error tags for code execution failures + assert span.error == 0 + + +@pytest.mark.asyncio +async def test_error_handling_e2e(test_runner, mock_tracer, request_vcr): + """Test error handling in E2E agent execution.""" + from google.adk.tools.function_tool import FunctionTool + + def failing_tool(query: str) -> dict: + raise ValueError("Test error message") + + failing_tool_obj = FunctionTool(func=failing_tool) + test_runner.agent.tools.append(failing_tool_obj) + + message = create_test_message("Can you use the failing_tool to test error handling?") + + with request_vcr.use_cassette("agent_error_handling.yaml"): + try: + count = 0 + async for _ in test_runner.run_async( + user_id="test-user", + session_id="test-session", + new_message=message, + ): + count += 1 + if count > 5: + break + except Exception: + pass + + traces = mock_tracer.pop_traces() + spans = [s for t in traces for s in t] + + runner_spans = [s for s in spans if "Runner.run_async" in s.resource] + assert len(runner_spans) >= 1, f"Expected Runner.run_async spans, got spans: {[s.resource for s in spans]}" + + tool_spans = [s for s in spans if "FunctionTool.__call_tool_async" in s.resource] + if tool_spans: + tool_span = tool_spans[0] + assert tool_span.name == "google_adk.request" + assert tool_span.get_tag("component") == "google_adk" + + runner_span = runner_spans[0] + assert runner_span.name == "google_adk.request" + assert runner_span.get_tag("component") == "google_adk" + assert runner_span.get_tag("google_adk.request.provider") == "google" + assert runner_span.get_tag("google_adk.request.model") == "gemini-2.5-pro" diff --git a/tests/contrib/google_adk/test_google_adk_llmobs.py b/tests/contrib/google_adk/test_google_adk_llmobs.py new file mode 100644 index 00000000000..9f600b95d38 --- /dev/null +++ b/tests/contrib/google_adk/test_google_adk_llmobs.py @@ -0,0 +1,200 @@ +from unittest import mock + +import pytest + +from tests.contrib.google_adk.conftest import create_test_message +from tests.llmobs._utils import _expected_llmobs_non_llm_span_event + + +@pytest.mark.parametrize( + "ddtrace_global_config", + [dict(_llmobs_enabled=True, _llmobs_sample_rate=1.0, _llmobs_ml_app="")], +) +class TestLLMObsGoogleADK: + @pytest.mark.asyncio + async def test_agent_run1(self, test_runner, llmobs_events, request_vcr, mock_tracer): + """Test that a simple agent run creates a valid LLMObs span event.""" + error_type = mock.ANY + with request_vcr.use_cassette("agent_run_async.yaml"): + message = create_test_message("Say hello") + try: + async for _ in test_runner.run_async( + user_id="test-user", + session_id="test-session", + new_message=message, + ): + pass + except (TypeError, ValueError) as e: + # Handle known ADK library issues with VCR cassettes + if any(phrase in str(e) for phrase in ["exec_python", "Function", "JSON serializable", "bytes"]): + error_type = "builtins.TypeError" if isinstance(e, TypeError) else "builtins.ValueError" + else: + raise + + spans = mock_tracer.pop_traces()[0] + + # We expect 3 events: 2 tool calls and 1 agent run + assert len(llmobs_events) == 3 + assert len(spans) == 3 + + agent_span = spans[0] + search_tool_span = spans[1] + multiply_tool_span = spans[2] + + expected_llmobs_tool_span_events_agent_run( + llmobs_events, agent_span, search_tool_span, multiply_tool_span, error_type + ) + + @pytest.mark.asyncio + async def test_agent_run_with_tools(self, test_runner, llmobs_events, request_vcr, mock_tracer): + """Test that an agent run with tool usage creates a valid LLMObs span event.""" + error_type = 0 + with request_vcr.use_cassette("agent_tool_usage.yaml"): + message = create_test_message("Can you search for information about recurring revenue?") + try: + async for _ in test_runner.run_async( + user_id="test-user", + session_id="test-session", + new_message=message, + ): + pass + except (TypeError, ValueError) as e: + # Handle known ADK library issues with VCR cassettes + if any(phrase in str(e) for phrase in ["exec_python", "Function", "JSON serializable", "bytes"]): + error_type = "builtins.TypeError" if isinstance(e, TypeError) else "builtins.ValueError" + else: + raise + + spans = mock_tracer.pop_traces()[0] + assert len(llmobs_events) == 2 + assert len(spans) == 2 + + agent_span = spans[0] + tool_span = spans[1] + + expected_llmobs_agent_span_event_with_tools(llmobs_events, agent_span, tool_span, error_type) + + def test_code_execution(self, mock_invocation_context, mock_tracer, llmobs_events): + """Test that code execution creates a valid LLMObs span event.""" + from google.adk.code_executors.code_execution_utils import CodeExecutionInput + from google.adk.code_executors.unsafe_local_code_executor import UnsafeLocalCodeExecutor + + executor = UnsafeLocalCodeExecutor() + code_input = CodeExecutionInput(code='print("hello world")') + executor.execute_code(mock_invocation_context, code_input) + + span = mock_tracer.pop_traces()[0][0] + assert len(llmobs_events) == 1 + expected_llmobs_code_execution_event(llmobs_events[0], span) + + +def expected_llmobs_tool_span_events_agent_run( + llmobs_event, agent_span, search_tool_span, multiply_tool_span, error_type +): + assert llmobs_event[0] == _expected_llmobs_non_llm_span_event( + span=search_tool_span, + span_kind="tool", + input_value='{"query": "test"}', + output_value='{"results": ["Found reference for: test"]}', + metadata={"description": "A tiny search tool stub."}, + tags={"ml_app": "", "service": "tests.contrib.google_adk"}, + ) + + assert llmobs_event[1] == _expected_llmobs_non_llm_span_event( + span=multiply_tool_span, + span_kind="tool", + input_value='{"b": 3, "a": 5}', + output_value='{"product": 15}', + metadata={"description": "Simple arithmetic tool."}, + tags={"ml_app": "", "service": "tests.contrib.google_adk"}, + ) + + assert llmobs_event[2] == _expected_llmobs_non_llm_span_event( + span=agent_span, + span_kind="agent", + error_message=mock.ANY, + error_stack=mock.ANY, + error=error_type, + input_value="Say hello", + tags={"ml_app": "", "service": "tests.contrib.google_adk"}, + metadata={ + "agent_manifest": { + "description": "Test agent for ADK integration testing", + "framework": "Google ADK", + "instructions": "You are a helpful test agent. You can: " + "(1) call tools using the provided " + "functions, (2) execute Python code " + "blocks when they are provided to you. " + "When you see ```python code blocks, " + "execute them using your code execution " + "capability. Always be helpful and use " + "your available capabilities.", + "model": "gemini-2.5-pro", + "model_configuration": '{"arbitrary_types_allowed": true, "extra": "forbid"}', + "name": "test_agent", + "session_management": {"session_id": "test-session", "user_id": "test-user"}, + "tools": [ + {"description": "A tiny search tool stub.", "name": "search_docs"}, + {"description": "Simple arithmetic tool.", "name": "multiply"}, + ], + } + }, + output_value=mock.ANY, + token_metrics={}, + ) + + +def expected_llmobs_agent_span_event_with_tools(llmobs_event, agent_span, tool_span, error_type): + assert llmobs_event[0] == _expected_llmobs_non_llm_span_event( + span=tool_span, + span_kind="tool", + input_value='{"query": "recurring revenue"}', + output_value='{"results": ["Found reference for: recurring revenue"]}', + metadata={"description": "A tiny search tool stub."}, + tags={"ml_app": "", "service": "tests.contrib.google_adk"}, + ) + + assert llmobs_event[1] == _expected_llmobs_non_llm_span_event( + span=agent_span, + span_kind="agent", + error_message=mock.ANY, + error_stack=mock.ANY, + error=error_type, + input_value="Can you search for information about recurring revenue?", + tags={"ml_app": "", "service": "tests.contrib.google_adk"}, + metadata={ + "agent_manifest": { + "description": "Test agent for ADK integration testing", + "framework": "Google ADK", + "instructions": "You are a helpful test agent. You can: " + "(1) call tools using the provided " + "functions, (2) execute Python code " + "blocks when they are provided to you. " + "When you see ```python code blocks, " + "execute them using your code execution " + "capability. Always be helpful and use " + "your available capabilities.", + "model": "gemini-2.5-pro", + "model_configuration": '{"arbitrary_types_allowed": true, "extra": "forbid"}', + "name": "test_agent", + "session_management": {"session_id": "test-session", "user_id": "test-user"}, + "tools": [ + {"description": "A tiny search tool stub.", "name": "search_docs"}, + {"description": "Simple arithmetic tool.", "name": "multiply"}, + ], + } + }, + output_value=mock.ANY, + token_metrics={}, + ) + + +def expected_llmobs_code_execution_event(llmobs_event, span): + assert llmobs_event == _expected_llmobs_non_llm_span_event( + span=span, + span_kind="code_execute", + input_value='print("hello world")', + output_value="hello world\n", + metadata={}, + tags={"ml_app": "", "service": "tests.contrib.google_adk"}, + ) diff --git a/tests/contrib/google_adk/test_google_adk_patch.py b/tests/contrib/google_adk/test_google_adk_patch.py new file mode 100644 index 00000000000..a2e28045d89 --- /dev/null +++ b/tests/contrib/google_adk/test_google_adk_patch.py @@ -0,0 +1,43 @@ +from ddtrace.contrib.internal.google_adk.patch import get_version +from ddtrace.contrib.internal.google_adk.patch import patch +from ddtrace.contrib.internal.google_adk.patch import unpatch +from tests.contrib.patch import PatchTestCase + + +class TestGoogleADKPatch(PatchTestCase.Base): + __integration_name__ = "google_adk" + __module_name__ = "google.adk" + __patch_func__ = patch + __unpatch_func__ = unpatch + __get_version__ = get_version + + def assert_module_patched(self, module): + self.assert_wrapped(module.runners.Runner.run_async) + self.assert_wrapped(module.runners.Runner.run_live) + # self.assert_wrapped(module.flows.llm_flows.functions.__call_tool_async) + # self.assert_wrapped(module.flows.llm_flows.functions.__call_tool_live) + # TODO: fix this + # it fails with AttributeError: module 'google.adk.flows.llm_flows.functions' has no + # attribute '_TestGoogleADKPatch__call_tool_async'. Note the class name prepended with + # wrapped function name. + self.assert_wrapped(module.code_executors.BuiltInCodeExecutor.execute_code) + self.assert_wrapped(module.code_executors.VertexAiCodeExecutor.execute_code) + self.assert_wrapped(module.code_executors.UnsafeLocalCodeExecutor.execute_code) + + def assert_not_module_patched(self, module): + self.assert_not_wrapped(module.runners.Runner.run_async) + self.assert_not_wrapped(module.runners.Runner.run_live) + # self.assert_not_wrapped(module.flows.llm_flows.functions.__call_tool_async) + # self.assert_not_wrapped(module.flows.llm_flows.functions.__call_tool_live) + self.assert_not_wrapped(module.code_executors.BuiltInCodeExecutor.execute_code) + self.assert_not_wrapped(module.code_executors.VertexAiCodeExecutor.execute_code) + self.assert_not_wrapped(module.code_executors.UnsafeLocalCodeExecutor.execute_code) + + def assert_not_module_double_patched(self, module): + self.assert_not_double_wrapped(module.runners.Runner.run_async) + self.assert_not_double_wrapped(module.runners.Runner.run_live) + # self.assert_not_double_wrapped(module.flows.llm_flows.functions.__call_tool_async) + # self.assert_not_double_wrapped(module.flows.llm_flows.functions.__call_tool_live) + self.assert_not_double_wrapped(module.code_executors.BuiltInCodeExecutor.execute_code) + self.assert_not_double_wrapped(module.code_executors.VertexAiCodeExecutor.execute_code) + self.assert_not_double_wrapped(module.code_executors.UnsafeLocalCodeExecutor.execute_code) diff --git a/tests/contrib/google_adk/utils.py b/tests/contrib/google_adk/utils.py new file mode 100644 index 00000000000..4b082576a8c --- /dev/null +++ b/tests/contrib/google_adk/utils.py @@ -0,0 +1,13 @@ +import os + +import vcr + + +def get_request_vcr(): + return vcr.VCR( + cassette_library_dir=os.path.join(os.path.dirname(__file__), "cassettes"), + record_mode="once", + match_on=["path"], + filter_headers=["authorization", "x-goog-api-key"], + ignore_localhost=True, + ) diff --git a/tests/contrib/integration_registry/registry_update_helpers/integration_update_orchestrator.py b/tests/contrib/integration_registry/registry_update_helpers/integration_update_orchestrator.py index c1f1dabef4c..b30370eedd9 100644 --- a/tests/contrib/integration_registry/registry_update_helpers/integration_update_orchestrator.py +++ b/tests/contrib/integration_registry/registry_update_helpers/integration_update_orchestrator.py @@ -1,5 +1,6 @@ import json import os +import shutil import subprocess import sys import tempfile @@ -32,8 +33,10 @@ def _acquire_lock(self, lock_file_path): return True except FileExistsError: time.sleep(0.5) - except Exception: + except Exception as e: + print(f"Error acquiring lock {lock_file_path}: {e}", file=sys.stderr) return False + print(f"Timeout acquiring lock {lock_file_path}", file=sys.stderr) return False def _release_lock(self, lock_file_path): @@ -47,6 +50,15 @@ def _ensure_tooling_venv(self): tooling_python = os.path.join(self.tooling_env_path, "bin", "python") pip_timeout = 20 + # If tooling python does not exist, the venv is either missing or corrupted. + # If the directory exists, remove it to ensure a clean slate for venv creation. + if os.path.exists(self.tooling_env_path): + try: + shutil.rmtree(self.tooling_env_path) + except OSError as e: + print(f"Error removing tooling venv '{self.tooling_env_path}': {e}", file=sys.stderr) + return False + if os.path.exists(tooling_python): try: cmd = [tooling_python, "-m", "pip", "install", "-U"] + self.TOOLING_DEPS @@ -136,6 +148,13 @@ def run(self, data_file_path: str): updater_succeeded = False try: + # Remove potentially stale venv lock file + if os.path.exists(self.venv_lock_file_path): + try: + os.remove(self.venv_lock_file_path) + except OSError: + pass + # Setup Tooling Venv try: if not self._acquire_lock(self.venv_lock_file_path): diff --git a/tests/contrib/langchain/test_langchain_llmobs.py b/tests/contrib/langchain/test_langchain_llmobs.py index 7d70fc92101..e43e107abb8 100644 --- a/tests/contrib/langchain/test_langchain_llmobs.py +++ b/tests/contrib/langchain/test_langchain_llmobs.py @@ -27,9 +27,50 @@ PINECONE_VERSION = parse_version(pinecone_.__version__) +# Multi-message prompt template test constants + +PROMPT_TEMPLATE_EXPECTED_CHAT_TEMPLATE = [ + {"role": "system", "content": "You are a {role} assistant."}, + {"role": "system", "content": "Your expertise is in {domain}."}, + {"role": "system", "content": "Additional context: {context}"}, + {"role": "user", "content": "I'm a user seeking help."}, + {"role": "user", "content": "Please help with {task}"}, + {"role": "user", "content": "Specifically, I need {specific_help}"}, + {"role": "assistant", "content": "I understand your request."}, + {"role": "assistant", "content": "I'll help you with {task}."}, + {"role": "assistant", "content": "Let me provide {output_type}"}, + {"role": "developer", "content": "Make it {style} and under {limit} words"}, +] + + +def _create_multi_message_prompt_template(langchain_core): + """Helper function to create multi-message ChatPromptTemplate with mixed input types.""" + from langchain_core.messages import AIMessage + from langchain_core.messages import HumanMessage + from langchain_core.messages import SystemMessage + from langchain_core.prompts import AIMessagePromptTemplate + from langchain_core.prompts import ChatMessagePromptTemplate + from langchain_core.prompts import HumanMessagePromptTemplate + from langchain_core.prompts import SystemMessagePromptTemplate + + return langchain_core.prompts.ChatPromptTemplate.from_messages( + [ + SystemMessage(content="You are a {role} assistant."), # while this has handlebars, it is not a template + ("system", "Your expertise is in {domain}."), + SystemMessagePromptTemplate.from_template("Additional context: {context}"), + HumanMessage(content="I'm a user seeking help."), + ("human", "Please help with {task}"), + HumanMessagePromptTemplate.from_template("Specifically, I need {specific_help}"), + AIMessage(content="I understand your request."), + ("ai", "I'll help you with {task}."), + AIMessagePromptTemplate.from_template("Let me provide {output_type}"), + ChatMessagePromptTemplate.from_template("Make it {style} and under {limit} words", role="developer"), + ] + ) + def _expected_langchain_llmobs_llm_span( - span, input_role=None, mock_io=False, mock_token_metrics=False, span_links=False, metadata=None + span, input_role=None, mock_io=False, mock_token_metrics=False, span_links=False, metadata=None, prompt=None ): provider = span.get_tag("langchain.request.provider") @@ -55,6 +96,7 @@ def _expected_langchain_llmobs_llm_span( token_metrics=metrics, tags={"ml_app": "langchain_test", "service": "tests.contrib.langchain"}, span_links=span_links, + prompt=prompt, ) @@ -100,7 +142,7 @@ def test_llmobs_openai_llm_proxy(mock_generate, langchain_openai, llmobs_events, llm.invoke("What is the capital of France?") span = tracer.pop_traces()[0][0] assert len(llmobs_events) == 2 - assert llmobs_events[1]["meta"]["span.kind"] == "llm" + assert llmobs_events[1]["meta"]["span"]["kind"] == "llm" def test_llmobs_openai_chat_model(langchain_openai, llmobs_events, tracer, openai_url): @@ -151,7 +193,7 @@ def test_llmobs_openai_chat_model_proxy(mock_generate, langchain_openai, llmobs_ chat_model.invoke([HumanMessage(content="What is the capital of France?")]) span = tracer.pop_traces()[0][0] assert len(llmobs_events) == 2 - assert llmobs_events[1]["meta"]["span.kind"] == "llm" + assert llmobs_events[1]["meta"]["span"]["kind"] == "llm" def test_llmobs_string_prompt_template_invoke(langchain_core, langchain_openai, openai_url, llmobs_events, tracer): @@ -217,6 +259,122 @@ def test_llmobs_string_prompt_template_invoke_chat_model( assert actual_prompt["variables"] == variable_dict +def test_llmobs_string_prompt_template_single_variable_string_input( + langchain_core, langchain_openai, openai_url, llmobs_events, tracer +): + template_string = "Write a creative story about {topic}." + single_variable_template = langchain_core.prompts.PromptTemplate( + input_variables=["topic"], template=template_string + ) + llm = langchain_openai.OpenAI(base_url=openai_url) + chain = single_variable_template | llm + + # Pass string input directly instead of dict for single variable template + string_input = "time travel" + chain.invoke(string_input) + + llmobs_events.sort(key=lambda span: span["start_ns"]) + assert len(llmobs_events) == 2 + actual_prompt = llmobs_events[1]["meta"]["input"]["prompt"] + assert actual_prompt["id"] == "test_langchain_llmobs.single_variable_template" + assert actual_prompt["template"] == template_string + # Should convert string input to dict format with single variable + assert actual_prompt["variables"] == {"topic": "time travel"} + + +def test_llmobs_multi_message_prompt_template_sync_chain( + langchain_core, langchain_openai, openai_url, llmobs_events, tracer +): + multi_message_template = _create_multi_message_prompt_template(langchain_core) + llm = langchain_openai.ChatOpenAI(base_url=openai_url) + chain = multi_message_template | llm + + variable_dict = { + "domain": "creative writing", + "context": "focus on storytelling", + "task": "writing a short story", + "specific_help": "character development", + "output_type": "guidance", + "style": "engaging", + "limit": "100", + } + + chain.invoke(variable_dict) + + llmobs_events.sort(key=lambda span: span["start_ns"]) + assert len(llmobs_events) == 2 + + # Verify the prompt structure in the LLM span + actual_prompt = llmobs_events[1]["meta"]["input"]["prompt"] + assert actual_prompt["id"] == "test_langchain_llmobs.multi_message_template" + assert actual_prompt["variables"] == variable_dict + assert actual_prompt.get("template") is None + assert actual_prompt["chat_template"] == PROMPT_TEMPLATE_EXPECTED_CHAT_TEMPLATE + + +def test_llmobs_multi_message_prompt_template_sync_direct_invoke( + langchain_core, langchain_openai, openai_url, llmobs_events, tracer +): + multi_message_template = _create_multi_message_prompt_template(langchain_core) + llm = langchain_openai.ChatOpenAI(base_url=openai_url) + + variable_dict = { + "domain": "data analysis", + "context": "focus on accuracy", + "task": "analyzing datasets", + "specific_help": "statistical methods", + "output_type": "insights", + "style": "detailed", + "limit": "150", + } + + # Test direct invoke on template then LLM (no chain) + prompt_value = multi_message_template.invoke(variable_dict) + llm.invoke(prompt_value) + + llmobs_events.sort(key=lambda span: span["start_ns"]) + assert len(llmobs_events) == 1 # Only LLM span for direct invoke + + # Verify the prompt structure + actual_prompt = llmobs_events[0]["meta"]["input"]["prompt"] + assert actual_prompt["id"] == "test_langchain_llmobs.multi_message_template" + assert actual_prompt["variables"] == variable_dict + assert actual_prompt.get("template") is None + assert actual_prompt["chat_template"] == PROMPT_TEMPLATE_EXPECTED_CHAT_TEMPLATE + + +@pytest.mark.asyncio +async def test_llmobs_multi_message_prompt_template_async_direct_invoke( + langchain_core, langchain_openai, openai_url, llmobs_events, tracer +): + multi_message_template = _create_multi_message_prompt_template(langchain_core) + llm = langchain_openai.ChatOpenAI(base_url=openai_url) + + variable_dict = { + "domain": "software engineering", + "context": "focus on best practices", + "task": "code review", + "specific_help": "performance optimization", + "output_type": "recommendations", + "style": "thorough", + "limit": "200", + } + + # Test direct async invoke on template then LLM + prompt_value = await multi_message_template.ainvoke(variable_dict) + await llm.ainvoke(prompt_value) + + llmobs_events.sort(key=lambda span: span["start_ns"]) + assert len(llmobs_events) == 1 # Only LLM span for direct invoke + + # Verify the prompt structure + actual_prompt = llmobs_events[0]["meta"]["input"]["prompt"] + assert actual_prompt["id"] == "test_langchain_llmobs.multi_message_template" + assert actual_prompt["variables"] == variable_dict + assert actual_prompt.get("template") is None + assert actual_prompt["chat_template"] == PROMPT_TEMPLATE_EXPECTED_CHAT_TEMPLATE + + def test_llmobs_chain(langchain_core, langchain_openai, openai_url, llmobs_events, tracer): prompt = langchain_core.prompts.ChatPromptTemplate.from_messages( [("system", "You are world class technical documentation writer."), ("user", "{input}")] @@ -237,6 +395,17 @@ def test_llmobs_chain(langchain_core, langchain_openai, openai_url, llmobs_event mock_token_metrics=True, span_links=True, metadata={"max_tokens": 256, "temperature": 0.7}, + prompt={ + "id": "test_langchain_llmobs.prompt", + "chat_template": [ + {"content": "You are world class technical documentation writer.", "role": "system"}, + {"content": "{input}", "role": "user"}, + ], + "variables": {"input": "Can you explain what an LLM chain is?"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) @@ -271,11 +440,27 @@ def test_llmobs_chain_nested(langchain_core, langchain_openai, openai_url, llmob trace[2], mock_token_metrics=True, span_links=True, + prompt={ + "id": "langchain.unknown_prompt_template", + "chat_template": [{"content": "what is the city {person} is from?", "role": "user"}], + "variables": {"person": "Spongebob Squarepants", "language": "Spanish"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) assert llmobs_events[3] == _expected_langchain_llmobs_llm_span( trace[3], mock_token_metrics=True, span_links=True, + prompt={ + "id": "test_langchain_llmobs.prompt2", + "chat_template": [{"content": "what country is the city {city} in? respond in {language}", "role": "user"}], + "variables": {"city": mock.ANY, "language": "Spanish"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) @@ -304,12 +489,28 @@ def test_llmobs_chain_batch(langchain_core, langchain_openai, llmobs_events, tra input_role="user", mock_token_metrics=True, span_links=True, + prompt={ + "id": "langchain.unknown_prompt_template", + "chat_template": [{"content": "Tell me a short joke about {topic}", "role": "user"}], + "variables": {"topic": "chickens"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) assert llmobs_events[2] == _expected_langchain_llmobs_llm_span( trace[2], input_role="user", mock_token_metrics=True, span_links=True, + prompt={ + "id": "langchain.unknown_prompt_template", + "chat_template": [{"content": "Tell me a short joke about {topic}", "role": "user"}], + "variables": {"topic": "pigs"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) except AssertionError: assert llmobs_events[1] == _expected_langchain_llmobs_llm_span( @@ -317,12 +518,28 @@ def test_llmobs_chain_batch(langchain_core, langchain_openai, llmobs_events, tra input_role="user", mock_token_metrics=True, span_links=True, + prompt={ + "id": "langchain.unknown_prompt_template", + "chat_template": [{"content": "Tell me a short joke about {topic}", "role": "user"}], + "variables": {"topic": "chickens"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) assert llmobs_events[2] == _expected_langchain_llmobs_llm_span( trace[1], input_role="user", mock_token_metrics=True, span_links=True, + prompt={ + "id": "langchain.unknown_prompt_template", + "chat_template": [{"content": "Tell me a short joke about {topic}", "role": "user"}], + "variables": {"topic": "pigs"}, + "version": "0.0.0", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + }, ) @@ -654,8 +871,8 @@ def _assert_trace_structure_from_writer_call_args(self, span_kinds): call_args = call.args[0] assert ( - call_args["meta"]["span.kind"] == span_kind - ), f"Span kind is {call_args['meta']['span.kind']} but expected {span_kind}" + call_args["meta"]["span"]["kind"] == span_kind + ), f"Span kind is {call_args['meta']['span']['kind']} but expected {span_kind}" if span_kind == "workflow": assert len(call_args["meta"]["input"]["value"]) > 0 assert len(call_args["meta"]["output"]["value"]) > 0 diff --git a/tests/contrib/langgraph/test_langgraph_llmobs.py b/tests/contrib/langgraph/test_langgraph_llmobs.py index 58f66cbc1fa..3af7c6bde50 100644 --- a/tests/contrib/langgraph/test_langgraph_llmobs.py +++ b/tests/contrib/langgraph/test_langgraph_llmobs.py @@ -435,9 +435,9 @@ def test_agent_with_tool_calls_integrations_enabled( tool_span = llmobs_events[4] second_llm_span = llmobs_events[6] - assert first_llm_span["meta"]["span.kind"] == "llm" - assert tool_span["meta"]["span.kind"] == "tool" - assert second_llm_span["meta"]["span.kind"] == "llm" + assert first_llm_span["meta"]["span"]["kind"] == "llm" + assert tool_span["meta"]["span"]["kind"] == "tool" + assert second_llm_span["meta"]["span"]["kind"] == "llm" # assert llm -> tool span link assert tool_span["span_links"][1]["span_id"] == first_llm_span["span_id"] diff --git a/tests/contrib/litellm/test_litellm_llmobs.py b/tests/contrib/litellm/test_litellm_llmobs.py index 0efb663daea..5c039c56d55 100644 --- a/tests/contrib/litellm/test_litellm_llmobs.py +++ b/tests/contrib/litellm/test_litellm_llmobs.py @@ -320,7 +320,7 @@ def test_router_completion( router_event = llmobs_events[1] llm_event = llmobs_events[0] - assert llm_event["meta"]["span.kind"] == "llm" + assert llm_event["meta"]["span"]["kind"] == "llm" assert llm_event["name"] == "completion" assert router_event == _expected_llmobs_non_llm_span_event( router_span, @@ -362,7 +362,7 @@ async def test_router_acompletion( router_event = llmobs_events[1] llm_event = llmobs_events[0] - assert llm_event["meta"]["span.kind"] == "llm" + assert llm_event["meta"]["span"]["kind"] == "llm" assert llm_event["name"] == "acompletion" assert router_event == _expected_llmobs_non_llm_span_event( router_span, @@ -404,7 +404,7 @@ def test_router_text_completion( router_event = llmobs_events[1] llm_event = llmobs_events[0] - assert llm_event["meta"]["span.kind"] == "llm" + assert llm_event["meta"]["span"]["kind"] == "llm" assert llm_event["name"] == "text_completion" assert router_event == _expected_llmobs_non_llm_span_event( router_span, @@ -446,7 +446,7 @@ async def test_router_atext_completion( router_event = llmobs_events[1] llm_event = llmobs_events[0] - assert llm_event["meta"]["span.kind"] == "llm" + assert llm_event["meta"]["span"]["kind"] == "llm" assert llm_event["name"] == "atext_completion" assert router_event == _expected_llmobs_non_llm_span_event( router_span, diff --git a/tests/contrib/openai/test_openai_llmobs.py b/tests/contrib/openai/test_openai_llmobs.py index b19bf7970ad..4db68c588a4 100644 --- a/tests/contrib/openai/test_openai_llmobs.py +++ b/tests/contrib/openai/test_openai_llmobs.py @@ -110,7 +110,7 @@ def test_completion_proxy( ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 2 - assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span.kind"] == "llm" + assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span"]["kind"] == "llm" def test_completion(self, openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer): """Ensure llmobs records are emitted for completion endpoints when configured. @@ -197,7 +197,7 @@ def test_completion_azure_proxy( ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 2 - assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span.kind"] == "llm" + assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span"]["kind"] == "llm" @pytest.mark.skipif( parse_version(openai_module.version.VERSION) >= (1, 60), @@ -337,7 +337,7 @@ def test_chat_completion_proxy( client.chat.completions.create(model=model, messages=input_messages, top_p=0.9, n=2, user="ddtrace-test") span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 2 - assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span.kind"] == "llm" + assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span"]["kind"] == "llm" def test_chat_completion(self, openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer): """Ensure llmobs records are emitted for chat completion endpoints when configured. @@ -426,7 +426,7 @@ def test_chat_completion_azure_proxy( ) span = mock_tracer.pop_traces()[0][0] assert mock_llmobs_writer.enqueue.call_count == 2 - assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span.kind"] == "llm" + assert mock_llmobs_writer.enqueue.call_args_list[1].args[0]["meta"]["span"]["kind"] == "llm" @pytest.mark.skipif( parse_version(openai_module.version.VERSION) >= (1, 60), diff --git a/tests/contrib/pydantic_ai/test_pydantic_ai_llmobs.py b/tests/contrib/pydantic_ai/test_pydantic_ai_llmobs.py index 1742d028605..0545ff6dbed 100644 --- a/tests/contrib/pydantic_ai/test_pydantic_ai_llmobs.py +++ b/tests/contrib/pydantic_ai/test_pydantic_ai_llmobs.py @@ -218,7 +218,7 @@ async def test_agent_iter_error(self, pydantic_ai, request_vcr, llmobs_events): assert len(llmobs_events) == 1 assert llmobs_events[0]["status"] == "error" - assert llmobs_events[0]["meta"]["error.message"] == "test error" + assert llmobs_events[0]["meta"]["error"]["message"] == "test error" @pytest.mark.skipif(PYDANTIC_AI_VERSION < (0, 4, 4), reason="pydantic-ai < 0.4.4 does not support toolsets") async def test_agent_run_with_toolset(self, pydantic_ai, request_vcr, llmobs_events, mock_tracer): diff --git a/tests/contrib/pytest/test_pytest.py b/tests/contrib/pytest/test_pytest.py index 44dc5e8e224..25959a30884 100644 --- a/tests/contrib/pytest/test_pytest.py +++ b/tests/contrib/pytest/test_pytest.py @@ -2583,15 +2583,15 @@ def test_inner_2(): for skipped_test_span in skipped_test_spans: assert skipped_test_span.get_tag("test.skipped_by_itr") == "true" - def test_pytest_suite_level_skipping_counts_tests_not_suites(self): + def test_pytest_suite_level_skipping_counts_suites(self): """ Regression test for suite level skipping count bug. When ITR is enabled at suite level and suites are skipped, the `itr.tests_skipping.count` tag - should count the number of tests that were skipped (contained within those suites). + should count the number of suites that were skipped (instead of the number of tests). This test creates 2 suites with multiple tests each (4 tests total), expects all suites to be - skipped, and verifies that the count reflects the number of tests (4), not suites (2). + skipped, and verifies that the count reflects the number of suites (2), not tests (4). """ package_outer_dir = self.testdir.mkpydir("test_outer_package") os.chdir(str(package_outer_dir)) @@ -2651,12 +2651,12 @@ def test_inner_2(): assert session_span.get_tag("_dd.ci.itr.tests_skipped") == "true" assert session_span.get_tag("test.itr.tests_skipping.type") == "suite" - # This is the regression test: should count tests (4), not suites (2) - expected_test_count = 4 # 4 individual tests were skipped + # This is the regression test: should count suites (2), not tests (4) + expected_suite_count = 2 # 4 individual tests were skipped actual_count = session_span.get_metric("test.itr.tests_skipping.count") assert ( - actual_count == expected_test_count - ), f"Expected {expected_test_count} tests skipped but got {actual_count}" + actual_count == expected_suite_count + ), f"Expected {expected_suite_count} suites skipped but got {actual_count}" # Verify all test spans were skipped by ITR skipped_test_spans = [x for x in spans if x.get_tag("test.status") == "skip" and x.get_tag("type") == "test"] diff --git a/tests/contrib/pytest/test_pytest_xdist_itr.py b/tests/contrib/pytest/test_pytest_xdist_itr.py index 4ab977e0ff5..76f06005ccd 100644 --- a/tests/contrib/pytest/test_pytest_xdist_itr.py +++ b/tests/contrib/pytest/test_pytest_xdist_itr.py @@ -118,6 +118,12 @@ def test_pytest_xdist_itr_skips_tests_at_test_level_by_pytest_addopts_env_var(se return_value=itr_settings ).start() +# Mock fetch_skippable_items to return our test data +mock.patch( + "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_skippable_items", + return_value=itr_data +).start() + # Set ITR data when CIVisibility is enabled import ddtrace.internal.ci_visibility.recorder CIVisibility = ddtrace.internal.ci_visibility.recorder.CIVisibility @@ -173,6 +179,129 @@ def patched_enable(cls, *args, **kwargs): # Verify number of skipped tests in session assert session_span.get_metric("test.itr.tests_skipping.count") == 2 + def test_xdist_suite_mode_skipped_suites(self): + """Test that suite-level ITR skipping works correctly in xdist and counts suites, not individual tests.""" + + itr_skipping_sitecustomize = """ +# sitecustomize.py - ITR setup for xdist worker nodes +from unittest import mock + +# Import required modules +from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings +from ddtrace.internal.ci_visibility._api_client import EarlyFlakeDetectionSettings +from ddtrace.internal.ci_visibility._api_client import TestManagementSettings +from ddtrace.internal.ci_visibility._api_client import ITRData +from ddtrace.ext.test_visibility._test_visibility_base import TestSuiteId, TestModuleId, TestId + +# Create ITR settings and data +itr_settings = TestVisibilityAPISettings( + coverage_enabled=False, skipping_enabled=True, require_git=False, itr_enabled=True, + flaky_test_retries_enabled=False, known_tests_enabled=False, + early_flake_detection=EarlyFlakeDetectionSettings(), test_management=TestManagementSettings() +) + +# Create skippable suites for suite-level skipping +skippable_suites = { + TestSuiteId(TestModuleId(""), "test_scope1.py"), + TestSuiteId(TestModuleId(""), "test_scope2.py") +} +itr_data = ITRData(correlation_id="12345678-1234-1234-1234-123456789012", skippable_items=skippable_suites) + +# Mock API calls to return our settings +mock.patch( + "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_settings", + return_value=itr_settings +).start() + +mock.patch( + "ddtrace.internal.ci_visibility.recorder.CIVisibility._check_enabled_features", + return_value=itr_settings +).start() + +# Mock fetch_skippable_items to return our test data +mock.patch( + "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_skippable_items", + return_value=itr_data +).start() + +# Set ITR data when CIVisibility is enabled +import ddtrace.internal.ci_visibility.recorder +CIVisibility = ddtrace.internal.ci_visibility.recorder.CIVisibility +original_enable = CIVisibility.enable + +def patched_enable(cls, *args, **kwargs): + result = original_enable(*args, **kwargs) + if cls._instance: + cls._instance._itr_data = itr_data + return result + +CIVisibility.enable = classmethod(patched_enable) +""" + + # Create test files + self.testdir.makepyfile(sitecustomize=itr_skipping_sitecustomize) + self.testdir.makepyfile( + test_scope1=""" +import pytest + +class TestScope1: + def test_scope1_method1(self): + assert True + + def test_scope1_method2(self): + assert True +""", + test_scope2=""" +import pytest + +class TestScope2: + def test_scope2_method1(self): + assert True +""", + ) + self.testdir.chdir() + + itr_settings = TestVisibilityAPISettings( + coverage_enabled=False, + skipping_enabled=True, + require_git=False, + itr_enabled=True, + flaky_test_retries_enabled=False, + known_tests_enabled=False, + early_flake_detection=EarlyFlakeDetectionSettings(), + test_management=TestManagementSettings(), + ) + + with mock.patch( + "ddtrace.internal.ci_visibility.recorder.CIVisibility._check_enabled_features", return_value=itr_settings + ), mock.patch( + "ddtrace.internal.ci_visibility.recorder.CIVisibility.test_skipping_enabled", + return_value=True, + ): + # Run with xdist using loadscope mode (suite-level skipping) + rec = self.inline_run( + "--ddtrace", + "-n", + "2", + "--dist=loadscope", + "-s", + "-vvv", + extra_env={ + "DD_CIVISIBILITY_AGENTLESS_ENABLED": "1", + "DD_API_KEY": "foobar.baz", + "DD_INSTRUMENTATION_TELEMETRY_ENABLED": "0", + }, + ) + assert rec.ret == 0 # All tests skipped, so exit code is 0 + + # Assert on session span metrics - key assertion for suite-level skipping + spans = self.pop_spans() + session_span = [span for span in spans if span.get_tag("type") == "test_session_end"][0] + assert session_span.get_tag("test.itr.tests_skipping.enabled") == "true" + assert session_span.get_tag("test.itr.tests_skipping.type") == "suite" # loadscope uses suite-level skipping + # Verify number of skipped SUITES in session (should be 2 suites, not 3 tests) + assert session_span.get_metric("test.itr.tests_skipping.count") == 2 + def test_pytest_xdist_itr_skips_tests_at_test_level_without_loadscope(self): """Test that ITR tags are correctly aggregated from xdist workers.""" # Create a simplified sitecustomize with just the essential ITR setup @@ -210,6 +339,12 @@ def test_pytest_xdist_itr_skips_tests_at_test_level_without_loadscope(self): return_value=itr_settings ).start() +# Mock fetch_skippable_items to return our test data +mock.patch( + "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_skippable_items", + return_value=itr_data +).start() + # Set ITR data when CIVisibility is enabled import ddtrace.internal.ci_visibility.recorder CIVisibility = ddtrace.internal.ci_visibility.recorder.CIVisibility @@ -300,6 +435,12 @@ def test_pytest_xdist_itr_skips_tests_at_suite_level_with_loadscope(self): return_value=itr_settings ).start() +# Mock fetch_skippable_items to return our test data +mock.patch( + "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_skippable_items", + return_value=itr_data +).start() + # Set ITR data when CIVisibility is enabled import ddtrace.internal.ci_visibility.recorder CIVisibility = ddtrace.internal.ci_visibility.recorder.CIVisibility @@ -460,16 +601,16 @@ def test_handle_itr_should_skip_counts_skipped_tests_in_worker(self): test_id = TestId(TestSuiteId(TestModuleId("test_module"), "test_suite"), "test_name") mock_service = mock.MagicMock() - mock_service._suite_skipping_mode = True + mock_service._suite_skipping_mode = False # Use test-level skipping for worker count tests with mock.patch( "ddtrace.internal.test_visibility.api.InternalTestSession.is_test_skipping_enabled", return_value=True ), mock.patch( - "ddtrace.internal.test_visibility.api.InternalTestSuite.is_itr_unskippable", return_value=False + "ddtrace.internal.test_visibility.api.InternalTest.is_itr_unskippable", return_value=False ), mock.patch( "ddtrace.internal.test_visibility.api.InternalTest.is_attempt_to_fix", return_value=False ), mock.patch( - "ddtrace.internal.test_visibility.api.InternalTestSuite.is_itr_skippable", return_value=True + "ddtrace.internal.test_visibility.api.InternalTest.is_itr_skippable", return_value=True ), mock.patch( "ddtrace.internal.test_visibility.api.InternalTest.mark_itr_skipped" ), mock.patch( @@ -491,16 +632,16 @@ def test_handle_itr_should_skip_increments_existing_worker_count(self): test_id = TestId(TestSuiteId(TestModuleId("test_module"), "test_suite"), "test_name") mock_service = mock.MagicMock() - mock_service._suite_skipping_mode = True + mock_service._suite_skipping_mode = False # Use test-level skipping for worker count tests with mock.patch( "ddtrace.internal.test_visibility.api.InternalTestSession.is_test_skipping_enabled", return_value=True ), mock.patch( - "ddtrace.internal.test_visibility.api.InternalTestSuite.is_itr_unskippable", return_value=False + "ddtrace.internal.test_visibility.api.InternalTest.is_itr_unskippable", return_value=False ), mock.patch( "ddtrace.internal.test_visibility.api.InternalTest.is_attempt_to_fix", return_value=False ), mock.patch( - "ddtrace.internal.test_visibility.api.InternalTestSuite.is_itr_skippable", return_value=True + "ddtrace.internal.test_visibility.api.InternalTest.is_itr_skippable", return_value=True ), mock.patch( "ddtrace.internal.test_visibility.api.InternalTest.mark_itr_skipped" ), mock.patch( @@ -1690,6 +1831,11 @@ def test_func2(): # The ITR skipping type should be suite due to explicit env var override assert session_span.get_tag("test.itr.tests_skipping.type") == "suite" + expected_suite_count = 0 # No suites skipped + actual_count = session_span.get_metric("test.itr.tests_skipping.count") + assert ( + actual_count == expected_suite_count + ), f"Expected {expected_suite_count} suites skipped but got {actual_count}" def test_explicit_env_var_overrides_xdist_test_mode(self): """Test that explicit _DD_CIVISIBILITY_ITR_SUITE_MODE=False overrides xdist suite-level detection.""" @@ -1742,6 +1888,12 @@ def patched_enable(cls, *args, **kwargs): return_value=itr_settings ).start() +# Mock fetch_skippable_items to return our test data +mock.patch( + "ddtrace.internal.ci_visibility._api_client._TestVisibilityAPIClientBase.fetch_skippable_items", + return_value=itr_data +).start() + CIVisibility.enable = classmethod(patched_enable) """ diff --git a/tests/contrib/subprocess/test_subprocess.py b/tests/contrib/subprocess/test_subprocess.py index 85619bf739f..c9c13a62ea5 100644 --- a/tests/contrib/subprocess/test_subprocess.py +++ b/tests/contrib/subprocess/test_subprocess.py @@ -18,12 +18,9 @@ PATCH_ENABLED_CONFIGURATIONS = ( dict(_asm_enabled=True), - dict(_iast_enabled=True), dict(_asm_enabled=True, _iast_enabled=True), dict(_asm_enabled=True, _iast_enabled=False), - dict(_asm_enabled=False, _iast_enabled=True), dict(_bypass_instrumentation_for_waf=False, _asm_enabled=True, _iast_enabled=True), - dict(_bypass_instrumentation_for_waf=False, _asm_enabled=False, _iast_enabled=True), dict(_bypass_instrumentation_for_waf=False, _asm_enabled=True, _iast_enabled=False), ) @@ -32,14 +29,18 @@ PATCH_DISABLED_CONFIGURATIONS = ( dict(), dict(_asm_enabled=False), + dict(_iast_enabled=True), dict(_iast_enabled=False), + dict(_asm_enabled=False, _iast_enabled=True), dict(_remote_config_enabled=False), + dict(_remote_config_enabled=False, _iast_enabled=True), dict(_asm_enabled=False, _iast_enabled=False), dict(_bypass_instrumentation_for_waf=True, _asm_enabled=False, _iast_enabled=False), dict(_bypass_instrumentation_for_waf=True), dict(_bypass_instrumentation_for_waf=False, _asm_enabled=False, _iast_enabled=False), dict(_bypass_instrumentation_for_waf=True, _asm_enabled=True, _iast_enabled=False), dict(_bypass_instrumentation_for_waf=True, _asm_enabled=False, _iast_enabled=True), + dict(_bypass_instrumentation_for_waf=False, _asm_enabled=False, _iast_enabled=True), ) CONFIGURATIONS = PATCH_ENABLED_CONFIGURATIONS + PATCH_DISABLED_CONFIGURATIONS diff --git a/tests/contrib/suitespec.yml b/tests/contrib/suitespec.yml index 48080b1aa9a..f6ef1832570 100644 --- a/tests/contrib/suitespec.yml +++ b/tests/contrib/suitespec.yml @@ -357,10 +357,29 @@ suites: - '@azure_functions' - tests/contrib/azure_functions/* - tests/snapshots/tests.contrib.azure_functions.* + pattern: ^azure_functions$ runner: riot snapshot: true services: - azurite + azure_functions:servicebus: + parallelism: 4 + paths: + - '@bootstrap' + - '@core' + - '@contrib' + - '@tracing' + - '@azure_functions' + - '@azure_servicebus' + - tests/contrib/azure_functions_servicebus/* + - tests/snapshots/tests.contrib.azure_functions_servicebus.* + pattern: azure_functions:servicebus + runner: riot + snapshot: true + services: + - azurite + - azuresqledge + - azureservicebusemulator azure_servicebus: parallelism: 4 paths: @@ -374,8 +393,8 @@ suites: runner: riot snapshot: true services: - - azureservicebusemulator - azuresqledge + - azureservicebusemulator botocore: parallelism: 11 paths: diff --git a/tests/contrib/unittest/test_unittest_snapshot.py b/tests/contrib/unittest/test_unittest_snapshot.py index fb3ade56fb2..98a0fcbf2e2 100644 --- a/tests/contrib/unittest/test_unittest_snapshot.py +++ b/tests/contrib/unittest/test_unittest_snapshot.py @@ -401,6 +401,9 @@ def ret_false(): return False """ self.testdir.makepyfile(ret_false=ret_false) + # work around apparent issue with the coverage library in which it + # doesn't track repeated imports under python 3.14 + self.testdir.makepyfile(ret_false2=ret_false) lib_fn = """ def lib_fn(): return True @@ -447,7 +450,7 @@ def test_second(self): from ret_false import ret_false assert not ret_false() def test_third(self): - from ret_false import ret_false + from ret_false2 import ret_false assert not ret_false() """ ) diff --git a/tests/debugging/exploration/debugger.py b/tests/debugging/exploration/debugger.py index 64f3ae8768d..a1d7eed0305 100644 --- a/tests/debugging/exploration/debugger.py +++ b/tests/debugging/exploration/debugger.py @@ -22,7 +22,7 @@ from ddtrace.debugging._probe.remoteconfig import ProbePollerEvent from ddtrace.debugging._signal.collector import SignalCollector from ddtrace.debugging._signal.snapshot import Snapshot -from ddtrace.debugging._uploader import LogsIntakeUploaderV1 +from ddtrace.debugging._uploader import SignalUploader from ddtrace.internal.remoteconfig.worker import RemoteConfigPoller @@ -157,7 +157,7 @@ def probes(self) -> t.List[t.Optional[Probe]]: return self._probes or [None] -class NoopLogsIntakeUploader(LogsIntakeUploaderV1): +class NoopSignalUploader(SignalUploader): __collector__ = ExplorationSignalCollector _count = 0 @@ -184,7 +184,7 @@ def set_emitting(self, probe: Probe) -> None: class ExplorationDebugger(Debugger): __rc__ = NoopDebuggerRC - __uploader__ = NoopLogsIntakeUploader + __uploader__ = NoopSignalUploader __watchdog__ = ModuleCollector __logger__ = NoopProbeStatusLogger diff --git a/tests/debugging/live/test_live_debugger.py b/tests/debugging/live/test_live_debugger.py index 6e9e1a4019a..4b97c6476b4 100644 --- a/tests/debugging/live/test_live_debugger.py +++ b/tests/debugging/live/test_live_debugger.py @@ -5,7 +5,7 @@ from ddtrace.debugging._origin.span import SpanCodeOriginProcessorExit from ddtrace.debugging._probe.model import ProbeEvalTiming from ddtrace.internal import core -from tests.debugging.mocking import MockLogsIntakeUploaderV1 +from tests.debugging.mocking import MockSignalUploader from tests.debugging.mocking import debugger from tests.debugging.utils import create_snapshot_function_probe from tests.debugging.utils import create_trigger_function_probe @@ -13,11 +13,11 @@ class MockSpanCodeOriginProcessor(SpanCodeOriginProcessorExit): - __uploader__ = MockLogsIntakeUploaderV1 + __uploader__ = MockSignalUploader @classmethod - def get_uploader(cls) -> MockLogsIntakeUploaderV1: - return t.cast(MockLogsIntakeUploaderV1, cls.__uploader__._instance) + def get_uploader(cls) -> MockSignalUploader: + return t.cast(MockSignalUploader, cls.__uploader__._instance) class SpanProbeTestCase(TracerTestCase): diff --git a/tests/debugging/mocking.py b/tests/debugging/mocking.py index 6d916c65c04..6b339a8512f 100644 --- a/tests/debugging/mocking.py +++ b/tests/debugging/mocking.py @@ -19,7 +19,7 @@ from ddtrace.debugging._redaction import redact from ddtrace.debugging._signal.collector import SignalCollector from ddtrace.debugging._signal.snapshot import Snapshot -from ddtrace.debugging._uploader import LogsIntakeUploaderV1 +from ddtrace.debugging._uploader import SignalUploader from ddtrace.settings._core import DDConfig from tests.debugging.probe.test_status import DummyProbeStatusLogger @@ -89,12 +89,13 @@ def wait(self, cond=lambda q: q, timeout=1.0): raise PayloadWaitTimeout() -class MockLogsIntakeUploaderV1(LogsIntakeUploaderV1): +class MockSignalUploader(SignalUploader): __collector__ = TestSignalCollector def __init__(self, interval=0.0): - super(MockLogsIntakeUploaderV1, self).__init__(interval) + super(MockSignalUploader, self).__init__(interval) self.queue = [] + self._state = self._online def _write(self, payload, endpoint): self.queue.append(payload.decode()) @@ -126,7 +127,7 @@ def snapshots(self) -> List[Snapshot]: class TestDebugger(Debugger): __logger__ = MockProbeStatusLogger - __uploader__ = MockLogsIntakeUploaderV1 + __uploader__ = MockSignalUploader def add_probes(self, *probes: Probe) -> None: self._on_configuration(ProbePollerEvent.NEW_PROBES, probes) @@ -216,11 +217,11 @@ def debugger(**config_overrides: Any) -> Generator[TestDebugger, None, None]: class MockSpanExceptionHandler(SpanExceptionHandler): - __uploader__ = MockLogsIntakeUploaderV1 + __uploader__ = MockSignalUploader @contextmanager -def exception_replay(**config_overrides: Any) -> Generator[MockLogsIntakeUploaderV1, None, None]: +def exception_replay(**config_overrides: Any) -> Generator[MockSignalUploader, None, None]: MockSpanExceptionHandler.enable() handler = MockSpanExceptionHandler._instance diff --git a/tests/debugging/origin/test_span.py b/tests/debugging/origin/test_span.py index b9b3e8cb6ad..62267401480 100644 --- a/tests/debugging/origin/test_span.py +++ b/tests/debugging/origin/test_span.py @@ -7,24 +7,24 @@ from ddtrace.debugging._session import Session from ddtrace.ext import SpanTypes from ddtrace.internal import core -from tests.debugging.mocking import MockLogsIntakeUploaderV1 +from tests.debugging.mocking import MockSignalUploader from tests.utils import TracerTestCase class MockSpanCodeOriginProcessorEntry(SpanCodeOriginProcessorEntry): - __uploader__ = MockLogsIntakeUploaderV1 + __uploader__ = MockSignalUploader @classmethod - def get_uploader(cls) -> MockLogsIntakeUploaderV1: - return t.cast(MockLogsIntakeUploaderV1, cls.__uploader__._instance) + def get_uploader(cls) -> MockSignalUploader: + return t.cast(MockSignalUploader, cls.__uploader__._instance) class MockSpanCodeOriginProcessor(SpanCodeOriginProcessorExit): - __uploader__ = MockLogsIntakeUploaderV1 + __uploader__ = MockSignalUploader @classmethod - def get_uploader(cls) -> MockLogsIntakeUploaderV1: - return t.cast(MockLogsIntakeUploaderV1, cls.__uploader__._instance) + def get_uploader(cls) -> MockSignalUploader: + return t.cast(MockSignalUploader, cls.__uploader__._instance) class SpanProbeTestCase(TracerTestCase): diff --git a/tests/debugging/test_debugger.py b/tests/debugging/test_debugger.py index 0e9ad67698d..54335f7a972 100644 --- a/tests/debugging/test_debugger.py +++ b/tests/debugging/test_debugger.py @@ -553,6 +553,8 @@ def test_debugger_multiple_function_probes_on_same_lazy_module(): for i in range(3) ] + sys.modules.pop("tests.submod.stuff", None) + with debugger() as d: d.add_probes(*probes) @@ -560,6 +562,7 @@ def test_debugger_multiple_function_probes_on_same_lazy_module(): assert len(d._probe_registry) == len(probes) assert all(_.error_type is None for _ in d._probe_registry.values()) + assert len(d._probe_registry._pending) == 0 # DEV: The following tests are to ensure compatibility with the tracer diff --git a/tests/debugging/test_encoding.py b/tests/debugging/test_encoding.py index bae78362838..7604c98afe3 100644 --- a/tests/debugging/test_encoding.py +++ b/tests/debugging/test_encoding.py @@ -235,8 +235,9 @@ def test_batch_json_encoder(): for _ in range(2 * n_snapshots): queue.put(s) - count = queue.count - payload = queue.flush() + data = queue.flush() + assert data is not None + payload, count = data decoded = json.loads(payload.decode()) assert len(decoded) == count assert n_snapshots <= count + 1 # Allow for rounding errors @@ -261,13 +262,19 @@ def test_batch_flush_reencode(): queue = SignalQueue(LogSignalJsonEncoder(None)) snapshot_total_size = sum(queue.put(s) for _ in range(2)) - assert queue.count == 2 - assert len(queue.flush()) == snapshot_total_size + 3 + data = queue.flush() + assert data is not None + payload, count = data + assert count == 2 + assert len(payload) == snapshot_total_size + 3 a, b = queue.put(s), queue.put(s) assert abs(a - b) < 1024 - assert queue.count == 2 - assert len(queue.flush()) == a + b + 3 + data = queue.flush() + assert data is not None + payload, count = data + assert count == 2 + assert len(payload) == a + b + 3 # ---- Side effects ---- diff --git a/tests/debugging/test_expressions.py b/tests/debugging/test_expressions.py index a4ffe0a48bf..a6f7db1ff9f 100644 --- a/tests/debugging/test_expressions.py +++ b/tests/debugging/test_expressions.py @@ -1,4 +1,5 @@ from dis import dis +import re import pytest @@ -129,15 +130,98 @@ def __getitem__(self, name): {"bar": CustomObject("foo")}, True, ), + # Direct predicates + ({"not": True}, {}, False), + ({"not": False}, {}, True), + ({"isEmpty": {"ref": "empty_str"}}, {"empty_str": ""}, True), + ({"isEmpty": {"ref": "s"}}, {"s": "foo"}, False), + ({"isEmpty": {"ref": "empty_list"}}, {"empty_list": []}, True), + ({"isEmpty": {"ref": "l"}}, {"l": [1]}, False), + ({"isEmpty": {"ref": "empty_dict"}}, {"empty_dict": {}}, True), + ({"isEmpty": {"ref": "d"}}, {"d": {"a": 1}}, False), + # Arg predicates + ({"ne": [1, 2]}, {}, True), + ({"ne": [1, 1]}, {}, False), + ({"gt": [2, 1]}, {}, True), + ({"gt": [1, 2]}, {}, False), + ({"ge": [2, 1]}, {}, True), + ({"ge": [1, 1]}, {}, True), + ({"ge": [1, 2]}, {}, False), + ({"lt": [1, 2]}, {}, True), + ({"lt": [2, 1]}, {}, False), + ({"le": [1, 2]}, {}, True), + ({"le": [1, 1]}, {}, True), + ({"le": [2, 1]}, {}, False), + ({"all": [{"ref": "collection"}, {"not": {"isEmpty": {"ref": "@it"}}}]}, {"collection": ["foo", "bar"]}, True), + ( + {"all": [{"ref": "collection"}, {"not": {"isEmpty": {"ref": "@it"}}}]}, + {"collection": ["foo", "bar", ""]}, + False, + ), + ({"endsWith": [{"ref": "local_string"}, "world!"]}, {"local_string": "hello world!"}, True), + ({"endsWith": [{"ref": "local_string"}, "hello"]}, {"local_string": "hello world!"}, False), + # Nested expressions + ( + {"len": {"filter": [{"ref": "collection"}, {"gt": [{"ref": "@it"}, 1]}]}}, + {"collection": [1, 2, 3]}, + 2, + ), + ( + {"getmember": [{"getmember": [{"getmember": [{"ref": "self"}, "field1"]}, "field2"]}, "name"]}, + {"self": CustomObject("test-me")}, + "field2", + ), + ( + { + "any": [ + {"getmember": [{"ref": "self"}, "collectionField"]}, + {"startsWith": [{"getmember": [{"ref": "@it"}, "name"]}, "foo"]}, + ] + }, + {"self": CustomObject("test-me")}, + True, + ), + ( + {"and": [{"eq": [{"ref": "hits"}, 42]}, {"gt": [{"len": {"ref": "payload"}}, 5]}]}, + {"hits": 42, "payload": "hello world"}, + True, + ), + ( + {"and": [{"eq": [{"ref": "hits"}, 42]}, {"gt": [{"len": {"ref": "payload"}}, 20]}]}, + {"hits": 42, "payload": "hello world"}, + False, + ), + ( + {"index": [{"filter": [{"ref": "collection"}, {"gt": [{"ref": "@it"}, 2]}]}, 0]}, + {"collection": [1, 2, 3, 4]}, + 3, + ), + # Edge cases + ({"any": [{"ref": "empty_list"}, {"ref": "@it"}]}, {"empty_list": []}, False), + ({"all": [{"ref": "empty_list"}, {"ref": "@it"}]}, {"empty_list": []}, True), + ({"count": {"ref": "payload"}}, {"payload": "hello"}, 5), + ({"substring": [{"ref": "s"}, -5, -1]}, {"s": "hello world"}, "worl"), # codespell:ignore worl + ({"substring": [{"ref": "s"}, 0, 100]}, {"s": "hello"}, "hello"), + ({"matches": [{"ref": "s"}, "["]}, {"s": "a"}, re.error), + ( + {"or": [True, {"ref": "side_effect"}]}, + {"side_effect": SideEffect("or should short-circuit")}, + True, + ), + ({"ref": "@it"}, {}, ValueError), + ( + {"len": {"filter": [{"ref": "collection"}, {"any": [{"ref": "@it"}, {"eq": [{"ref": "@it"}, 1]}]}]}}, + {"collection": [[1, 2], [3, 4], [5]]}, + 1, + ), ], ) def test_parse_expressions(ast, _locals, value): - compiled = dd_compile(ast) - if isinstance(value, type) and issubclass(value, Exception): with pytest.raises(value): - compiled(_locals) + dd_compile(ast)(_locals) else: + compiled = dd_compile(ast) assert compiled(_locals) == value, dis(compiled) diff --git a/tests/debugging/test_uploader.py b/tests/debugging/test_uploader.py index a16bb8e7254..b99d7d4a698 100644 --- a/tests/debugging/test_uploader.py +++ b/tests/debugging/test_uploader.py @@ -5,7 +5,7 @@ from ddtrace.debugging._encoding import BufferFull from ddtrace.debugging._encoding import SignalQueue -from ddtrace.debugging._uploader import LogsIntakeUploaderV1 +from ddtrace.debugging._uploader import SignalUploader # DEV: Using float('inf') with lock wait intervals may cause an OverflowError @@ -13,10 +13,11 @@ LONG_INTERVAL = 2147483647.0 -class MockLogsIntakeUploaderV1(LogsIntakeUploaderV1): +class MockSignalUploader(SignalUploader): def __init__(self, *args, **kwargs): - super(MockLogsIntakeUploaderV1, self).__init__(*args, **kwargs) + super(MockSignalUploader, self).__init__(*args, **kwargs) self.queue = Queue() + self._state = self._online def _write(self, payload, endpoint): self.queue.put(payload.decode()) @@ -26,7 +27,7 @@ def payloads(self): return [json.loads(data) for data in self.queue] -class ActiveBatchJsonEncoder(MockLogsIntakeUploaderV1): +class ActiveBatchJsonEncoder(MockSignalUploader): def __init__(self, size=1 << 10, interval=1): super(ActiveBatchJsonEncoder, self).__init__(interval) diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py index 7d227fbc32a..d6a6c1db1ba 100644 --- a/tests/integration/test_integration.py +++ b/tests/integration/test_integration.py @@ -98,6 +98,7 @@ def test_uds_wrong_socket_path(): 1, "unix:///tmp/ddagent/nosockethere/{}/traces".format(encoding if encoding else "v0.5"), "client error (Connect)", + extra={"send_to_telemetry": False}, ) ] else: @@ -371,6 +372,7 @@ def test_trace_generates_error_logs_when_trace_agent_url_invalid(): 1, "http://localhost:8125/{}/traces".format(encoding if encoding else "v0.5"), "client error (Connect)", + extra={"send_to_telemetry": False}, ) ] else: @@ -530,6 +532,7 @@ def test_trace_with_invalid_payload_generates_error_log(): "http://localhost:8126/v0.5/traces", 400, "Bad Request", + extra={"send_to_telemetry": False}, ) ] ) @@ -565,6 +568,7 @@ def test_trace_with_invalid_payload_logs_payload_when_LOG_ERROR_PAYLOADS(): 400, "Bad Request", "6261645f7061796c6f6164", + extra={"send_to_telemetry": False}, ) ] ) @@ -595,6 +599,7 @@ def encode_traces(self, traces): 400, "Bad Request", "bad_payload", + extra={"send_to_telemetry": False}, ) ] ) diff --git a/tests/integration/test_integration_snapshots.py b/tests/integration/test_integration_snapshots.py index 7430296d660..8bb70cf70a6 100644 --- a/tests/integration/test_integration_snapshots.py +++ b/tests/integration/test_integration_snapshots.py @@ -4,6 +4,7 @@ import mock import pytest +from ddtrace.internal.compat import PYTHON_VERSION_INFO from ddtrace.trace import tracer from tests.integration.utils import AGENT_VERSION from tests.utils import override_global_config @@ -127,6 +128,10 @@ def test_synchronous_writer(writer_class): pass +@pytest.mark.skipif( + PYTHON_VERSION_INFO >= (3, 14), + reason="The default multiprocessing start_method 'forkserver' causes this test to fail", +) @snapshot(async_mode=False) @pytest.mark.subprocess(ddtrace_run=True) def test_tracer_trace_across_popen(): @@ -153,6 +158,10 @@ def task(tracer): tracer.flush() +@pytest.mark.skipif( + PYTHON_VERSION_INFO >= (3, 14), + reason="The default multiprocessing start_method 'forkserver' causes this test to fail", +) @snapshot(async_mode=False) @pytest.mark.subprocess(ddtrace_run=True) def test_tracer_trace_across_multiple_popens(): diff --git a/tests/integration/test_settings.py b/tests/integration/test_settings.py index c2f1291b419..8d5aa263d9e 100644 --- a/tests/integration/test_settings.py +++ b/tests/integration/test_settings.py @@ -59,6 +59,7 @@ def test_setting_origin_code(test_agent_session, run_python_code_in_subprocess): "DD_TAGS": "team:apm,component:web", "DD_TRACE_ENABLED": "true", "DD_CIVISIBILITY_AGENTLESS_ENABLED": "false", + "_DD_INSTRUMENTATION_TELEMETRY_TESTS_FORCE_APP_STARTED": "true", } ) out, err, status, _ = run_python_code_in_subprocess( @@ -69,10 +70,6 @@ def test_setting_origin_code(test_agent_session, run_python_code_in_subprocess): config._trace_http_header_tags = {"header": "value"} config.tags = {"header": "value"} config._tracing_enabled = False - -from ddtrace.internal.telemetry import telemetry_writer -# simulate app start event, this occurs when the first span is sent to the datadog agent -telemetry_writer._app_started() """, env=env, ) diff --git a/tests/internal/crashtracker/test_crashtracker.py b/tests/internal/crashtracker/test_crashtracker.py index b9380d7c925..2047d39abec 100644 --- a/tests/internal/crashtracker/test_crashtracker.py +++ b/tests/internal/crashtracker/test_crashtracker.py @@ -129,7 +129,8 @@ def test_crashtracker_simple(): # 2. Listens on that port for new connections # 3. Starts the crashtracker with the URL set to the port # 4. Crashes the process - # 5. Verifies that the crashtracker sends a crash report to the server + # 5. Verifies that the crashtracker sends a crash ping to the server + # 6. Verifies that the crashtracker sends a crash report to the server import ctypes import os @@ -147,8 +148,10 @@ def test_crashtracker_simple(): ctypes.string_at(0) sys.exit(-1) - # Part 5 - # Check to see if the listening socket was triggered, if so accept the connection + # Part 5, Check for the crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, Check to see if the listening socket was triggered, if so accept the connection # then check to see if the resulting connection is readable report = utils.get_crash_report(client) # The crash came from string_at. Since the over-the-wire format is multipart, chunked HTTP, @@ -181,7 +184,10 @@ def test_crashtracker_simple_fork(): ctypes.string_at(0) sys.exit(-1) # just in case - # Part 5, check + # Part 5, check for crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, check for crash report report = utils.get_crash_report(client) assert b"string_at" in report["body"] @@ -235,7 +241,10 @@ def test_crashtracker_simple_sigbus(): arr[4095] = b"x" # sigbus sys.exit(-1) # just in case - # Part 5, check + # Part 5, check for crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, check for crash report report = utils.get_crash_report(client) assert report["body"] @@ -261,7 +270,10 @@ def test_crashtracker_raise_sigsegv(): os.kill(os.getpid(), signal.SIGSEGV.value) sys.exit(-1) - # Part 5, check + # Part 5, check for crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, check for crash report report = utils.get_crash_report(client) assert b"os_kill" in report["body"] @@ -287,7 +299,10 @@ def test_crashtracker_raise_sigbus(): os.kill(os.getpid(), signal.SIGBUS.value) sys.exit(-1) - # Part 5, check + # Part 5, check for crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, check for crash report report = utils.get_crash_report(client) assert b"os_kill" in report["body"] @@ -311,7 +326,10 @@ def test_crashtracker_preload_default(ddtrace_run_python_code_in_subprocess): assert not stderr assert exitcode == -11 # exit code for SIGSEGV - # Wait for the connection + # Part 5, check for crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, check for crash report report = utils.get_crash_report(client) assert b"string_at" in report["body"] @@ -330,7 +348,7 @@ def test_crashtracker_preload_disabled(ddtrace_run_python_code_in_subprocess): assert exitcode == -11 # No crash reports should be sent - assert client.crash_reports() == [] + assert client.crash_messages() == [] auto_code = """ @@ -352,7 +370,10 @@ def test_crashtracker_auto_default(run_python_code_in_subprocess): assert not stderr assert exitcode == -11 - # Wait for the connection + # Part 5, check for crash ping + _ping = utils.get_crash_ping(client) + + # Part 6, check for crash report report = utils.get_crash_report(client) assert b"string_at" in report["body"] @@ -370,6 +391,9 @@ def test_crashtracker_auto_nostack(run_python_code_in_subprocess): assert not stderr assert exitcode == -11 + # Check for crash ping + _ping = utils.get_crash_ping(client) + # Wait for the connection report = utils.get_crash_report(client) assert b"string_at" not in report["body"] @@ -389,7 +413,7 @@ def test_crashtracker_auto_disabled(run_python_code_in_subprocess): assert exitcode == -11 # No crash reports should be sent - assert client.crash_reports() == [] + assert client.crash_messages() == [] @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") @@ -413,6 +437,10 @@ def test_crashtracker_tags_required(): ctypes.string_at(0) sys.exit(-1) + # Check for crash ping + _ping = utils.get_crash_ping(client) + + # Check for crash report report = utils.get_crash_report(client) assert b"string_at" in report["body"] @@ -446,7 +474,10 @@ def test_crashtracker_user_tags_envvar(run_python_code_in_subprocess): assert not stderr assert exitcode == -11 - # Wait for the connection + # Check for crash ping + _ping = utils.get_crash_ping(client) + + # Check for crash report report = utils.get_crash_report(client) # Now check for the tags @@ -456,6 +487,7 @@ def test_crashtracker_user_tags_envvar(run_python_code_in_subprocess): @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") +@pytest.mark.skipif(sys.version_info >= (3, 14), reason="Stack v2 not supported on 3.14") def test_crashtracker_set_tag_profiler_config(snapshot_context, run_python_code_in_subprocess): with utils.with_test_agent() as client: env = os.environ.copy() @@ -466,6 +498,10 @@ def test_crashtracker_set_tag_profiler_config(snapshot_context, run_python_code_ assert not stderr assert exitcode == -11 + # Check for crash ping + _ping = utils.get_crash_ping(client) + + # Check for crash report report = utils.get_crash_report(client) # Now check for the profiler_config tag assert b"profiler_config" in report["body"] @@ -474,6 +510,7 @@ def test_crashtracker_set_tag_profiler_config(snapshot_context, run_python_code_ @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") +@pytest.mark.skipif(sys.version_info >= (3, 14), reason="Stack v2 not supported on 3.14") @pytest.mark.subprocess() def test_crashtracker_user_tags_profiling(): # Tests tag ingestion in the backend API (which is currently out of profiling) @@ -501,6 +538,10 @@ def test_crashtracker_user_tags_profiling(): ctypes.string_at(0) sys.exit(-1) + # Check for crash ping + _ping = utils.get_crash_ping(client) + + # Check for crash report report = utils.get_crash_report(client) assert b"string_at" in report["body"] @@ -539,6 +580,10 @@ def test_crashtracker_user_tags_core(): ctypes.string_at(0) sys.exit(-1) + # Check for crash ping + _ping = utils.get_crash_ping(client) + + # Check for crash report report = utils.get_crash_report(client) assert b"string_at" in report["body"] @@ -619,6 +664,91 @@ def test_crashtracker_echild_hang(): pytest.fail("Unexpected exception: %s" % e) +@pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") +@pytest.mark.subprocess() +def test_crashtracker_runtime_callback(): + import ctypes + import os + + import tests.internal.crashtracker.utils as utils + + with utils.with_test_agent() as client: + pid = os.fork() + + def func1(): + return func2() + + def func2(): + return func3() + + def func3(): + return func4() + + def func4(): + return func5() + + def func5(): + return func6() + + def func6(): + return func7() + + def func7(): + return func8() + + def func8(): + return func9() + + def func9(): + return func10() + + def func10(): + return func11() + + def func11(): + return func12() + + def func12(): + return func13() + + def func13(): + return func14() + + def func14(): + return func15() + + def func15(): + return func16() + + def func16(): + ctypes.string_at(0) + sys.exit(-1) + + if pid == 0: + ct = utils.CrashtrackerWrapper(base_name="runtime_runtime_callback") + assert ct.start() + stdout_msg, stderr_msg = ct.logs() + assert not stdout_msg, stdout_msg + assert not stderr_msg, stderr_msg + + func1() + + report = utils.get_crash_report(client) + + import json + try: + report_dict = json.loads(report["body"].decode('utf-8')) + message = report_dict["payload"][0]["message"] + message_dict = json.loads(message) + experimental = message_dict['experimental'] + + with open("experimental_debug_string_dump.json", "w") as f: + json.dump(experimental, f, indent=2) + + except (json.JSONDecodeError, UnicodeDecodeError) as e: + print(f"Could not parse report as JSON: {e}") + + @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") @pytest.mark.subprocess() def test_crashtracker_no_zombies(): diff --git a/tests/internal/crashtracker/utils.py b/tests/internal/crashtracker/utils.py index f3a966857e8..e26d05788dc 100644 --- a/tests/internal/crashtracker/utils.py +++ b/tests/internal/crashtracker/utils.py @@ -112,35 +112,65 @@ def logs(self): return read_files([self.stdout, self.stderr]) -def wait_for_crash_reports(test_agent_client: TestAgentClient) -> List[TestAgentRequest]: - crash_reports = [] - for _ in range(10): # 10 iterations * 0.1 second = 1 second total - incoming_reports = test_agent_client.crash_reports() - if incoming_reports: - crash_reports.extend(incoming_reports) +def get_all_crash_messages(test_agent_client: TestAgentClient) -> List[TestAgentRequest]: + """ + A test helper to get *all* crash messages is necessary, because crash pings and crash reports + are sent through async network requests, so we don't have a guarantee of the order they are received. + We differentiate between crash pings and crash reports downstream + """ + seen_report_ids = set() + crash_messages = [] + # 5 iterations * 0.2 second = 1 second total should be enough to get ping + report + for _ in range(5): + incoming_messages = test_agent_client.crash_messages() + for message in incoming_messages: + body = message.get("body", b"") + if isinstance(body, str): + body = body.encode("utf-8") + report_id = (hash(body), frozenset(message.get("headers", {}).items())) + if report_id not in seen_report_ids: + seen_report_ids.add(report_id) + crash_messages.append(message) + # If we have both crash ping and crash report (2 reports), we can return early - if len(crash_reports) >= 2: - return crash_reports - time.sleep(0.1) + if len(crash_messages) >= 2: + return crash_messages + time.sleep(0.2) - return crash_reports + return crash_messages def get_crash_report(test_agent_client: TestAgentClient) -> TestAgentRequest: """Wait for a crash report from the crashtracker listener socket.""" - crash_reports = wait_for_crash_reports(test_agent_client) + crash_messages = get_all_crash_messages(test_agent_client) # We want at least the crash report - assert len(crash_reports) == 2, f"Expected at 2 messages; one ping and one report, got {len(crash_reports)}" + assert len(crash_messages) == 2, f"Expected at least 2 messages; got {len(crash_messages)}" + + # Find the crash report (the one with "is_crash":"true") + crash_report = None + for message in crash_messages: + if b"is_crash:true" in message["body"]: + crash_report = message + break + + assert crash_report is not None, "Could not find crash report with 'is_crash:true' tag" + return crash_report - # Find the actual crash report (the one with "is_crash":"true") - actual_crash_report = None - for report in crash_reports: - if b"is_crash:true" in report["body"]: - actual_crash_report = report + +def get_crash_ping(test_agent_client: TestAgentClient) -> TestAgentRequest: + """Wait for a crash report from the crashtracker listener socket.""" + crash_messages = get_all_crash_messages(test_agent_client) + assert len(crash_messages) == 2, f"Expected at least 2 messages; got {len(crash_messages)}" + + # Find the crash ping (the one with "is_crash_ping":"true") + crash_ping = None + for message in crash_messages: + if b"is_crash_ping:true" in message["body"]: + crash_ping = message break - assert actual_crash_report is not None, "Could not find crash report with 'is_crash:true' tag" - return actual_crash_report + assert crash_ping is not None, "Could not find crash ping with 'is_crash_ping:true' tag" + return crash_ping @contextmanager diff --git a/tests/internal/test_context_events_api.py b/tests/internal/test_context_events_api.py index dba0cafadff..3beb15331ca 100644 --- a/tests/internal/test_context_events_api.py +++ b/tests/internal/test_context_events_api.py @@ -1,7 +1,6 @@ import threading from time import sleep from typing import Any -from typing import List import unittest import mock @@ -185,45 +184,14 @@ def target(): for listener in listeners: assert listener.calls == 1 - def test_core_dispatch_all_listeners(self): - class Listener: - calls: List[tuple] - - def __init__(self): - self.calls = [] - - def __call__(self, event_id: str, args: tuple) -> None: - self.calls.append((event_id, args)) - - l1 = Listener() - - core.event_hub.on_all(l1) - - core.dispatch("event.1", (1, 2)) - core.dispatch("event.2", ()) - - with core.context_with_data("my.cool.context") as ctx: - pass - - assert l1.calls == [ - ("event.1", (1, 2)), - ("event.2", ()), - ("context.started.my.cool.context", (ctx,)), - ("context.ended.my.cool.context", (ctx, (None, None, None))), - ] - @with_config_raise_value(raise_value=False) def test_core_dispatch_exceptions_no_raise(self): def on_exception(*_): raise RuntimeError("OH NO!") - def on_all_exception(*_): - raise TypeError("OH NO!") - core.on("my.cool.event", on_exception, "res") core.on("context.started.my.cool.context", on_exception) core.on("context.ended.my.cool.context", on_exception) - core.event_hub.on_all(on_all_exception) # Dispatch does not raise any exceptions, and returns nothing assert core.dispatch("my.cool.event", (1, 2, 3)) is None @@ -241,82 +209,32 @@ def on_all_exception(*_): # The default raise value for tests is True, but let's be explicit to be safe @with_config_raise_value(raise_value=True) def test_core_dispatch_exceptions_all_raise(self): - def on_exception(*_): + def on_runtime_error(*_): raise RuntimeError("OH NO!") - def on_all_exception(*_): + def on_type_error(*_): raise TypeError("OH NO!") - core.on("my.cool.event", on_exception) - core.on("context.started.my.cool.context", on_exception) - core.on("context.ended.my.cool.context", on_exception) - core.event_hub.on_all(on_all_exception) - - # We stop after the first exception is raised, on_all listeners get called first - with pytest.raises(TypeError): - core.dispatch("my.cool.event", (1, 2, 3)) + # Register 2 listeners for 1 event, the first one gets called first + core.on("my.cool.event", on_runtime_error) + core.on("my.cool.event", on_type_error) - # We stop after the first exception is raised, on_all listeners get called first - with pytest.raises(TypeError): - core.dispatch_with_results("my.cool.event", (1, 2, 3)) - - # We stop after the first exception is raised, on_all listeners get called first - with pytest.raises(TypeError): - with core.context_with_data("my.cool.context"): - pass - - # The default raise value for tests is True, but let's be explicit to be safe - @with_config_raise_value(raise_value=True) - def test_core_dispatch_exceptions_raise(self): - def on_exception(*_): - raise RuntimeError("OH NO!") - - def noop(*_): - pass - - core.on("my.cool.event", on_exception) - core.on("context.started.my.cool.context", noop) - core.on("context.ended.my.cool.context", on_exception) - core.event_hub.on_all(noop) + core.on("context.started.my.cool.context", on_type_error) + core.on("context.ended.my.cool.context", on_runtime_error) + # We stop after the first exception is raised, on_runtime_error listeners get called first with pytest.raises(RuntimeError): core.dispatch("my.cool.event", (1, 2, 3)) + # We stop after the first exception is raised, on_runtime_error listeners get called first with pytest.raises(RuntimeError): core.dispatch_with_results("my.cool.event", (1, 2, 3)) - with pytest.raises(RuntimeError): + # We stop after the first exception raised, which is the context started event + with pytest.raises(TypeError): with core.context_with_data("my.cool.context"): pass - def test_core_dispatch_with_results_all_listeners(self): - class Listener: - calls: List[tuple] - - def __init__(self): - self.calls = [] - - def __call__(self, event_id: str, args: tuple) -> None: - self.calls.append((event_id, args)) - - l1 = Listener() - - core.event_hub.on_all(l1) - - # The results/exceptions from all listeners don't get reported - assert core.dispatch_with_results("event.1", (1, 2)) is core.event_hub._MissingEventDict - assert core.dispatch_with_results("event.2", ()) is core.event_hub._MissingEventDict - - with core.context_with_data("my.cool.context") as ctx: - pass - - assert l1.calls == [ - ("event.1", (1, 2)), - ("event.2", ()), - ("context.started.my.cool.context", (ctx,)), - ("context.ended.my.cool.context", (ctx, (None, None, None))), - ] - def test_core_dispatch_context_ended(self): context_id = "my.cool.context" event_name = "context.ended.%s" % context_id diff --git a/tests/internal/test_tracer_flare.py b/tests/internal/test_tracer_flare.py index 2e2999bfd64..8b9e58ebcf6 100644 --- a/tests/internal/test_tracer_flare.py +++ b/tests/internal/test_tracer_flare.py @@ -15,6 +15,7 @@ from pyfakefs.fake_filesystem_unittest import TestCase import pytest +from ddtrace.internal.compat import PYTHON_VERSION_INFO from ddtrace.internal.flare._subscribers import TracerFlareSubscriber from ddtrace.internal.flare.flare import TRACER_FLARE_FILE_HANDLER_NAME from ddtrace.internal.flare.flare import Flare @@ -628,6 +629,9 @@ def test_payload_field_order(self): self.flare.revert_configs() +@pytest.mark.skipif( + PYTHON_VERSION_INFO >= (3, 14), reason="pyfakefs seems not to fully work with multiprocessing under Python 3.14" +) class TracerFlareMultiprocessTests(TestCase): def setUp(self): self.setUpPyfakefs() diff --git a/tests/internal/test_utils_http.py b/tests/internal/test_utils_http.py index 5505b1c842c..950a16ed4b0 100644 --- a/tests/internal/test_utils_http.py +++ b/tests/internal/test_utils_http.py @@ -1,10 +1,17 @@ import httpretty import pytest +from ddtrace.internal.compat import PYTHON_VERSION_INFO from ddtrace.internal.utils.http import connector -@pytest.mark.parametrize("scheme", ["http", "https"]) +parameters = ["http"] +# httpretty doesn't work with https/http.client/Python 3.14 and there is no apparent replacement +if PYTHON_VERSION_INFO < (3, 14): + parameters.append("https") + + +@pytest.mark.parametrize("scheme", parameters) def test_connector(scheme): with httpretty.enabled(): httpretty.register_uri(httpretty.GET, "%s://localhost:8181/api/test" % scheme, body='{"hello": "world"}') diff --git a/tests/lib_injection/test_denylist.py b/tests/lib_injection/test_denylist.py new file mode 100644 index 00000000000..0a191ee4920 --- /dev/null +++ b/tests/lib_injection/test_denylist.py @@ -0,0 +1,228 @@ +import os +import sys +from unittest.mock import patch + +import pytest + + +# Python interpreters for parametrized testing +PYTHON_INTERPRETERS = [ + "/usr/bin/python", + "/usr/bin/python3", + "/usr/bin/python3.8", + "/usr/bin/python3.9", + "/usr/bin/python3.10", + "/usr/bin/python3.11", + "/usr/bin/python3.12", + "/usr/local/bin/python", + "/usr/local/bin/python3", + "/opt/python/bin/python3.10", + "/home/user/.pyenv/versions/3.11.0/bin/python", + "python", + "python3", + "python3.10", + "./python", + "../bin/python3", +] + + +@pytest.fixture +def mock_sitecustomize(): + lib_injection_path = os.path.join(os.path.dirname(__file__), "../../lib-injection/sources") + if lib_injection_path not in sys.path: + sys.path.insert(0, lib_injection_path) + + import sitecustomize + + sitecustomize.EXECUTABLES_DENY_LIST = sitecustomize.build_denied_executables() + sitecustomize.EXECUTABLE_MODULES_DENY_LIST = sitecustomize.build_denied_executable_modules() + + return sitecustomize + + +@pytest.mark.parametrize("python_exe", PYTHON_INTERPRETERS) +def test_python_module_denylist_denied_basic(mock_sitecustomize, python_exe): + assert "py_compile" in mock_sitecustomize.EXECUTABLE_MODULES_DENY_LIST, "py_compile should be in modules deny list" + + with patch.object(sys, "argv", [python_exe, "-m", "py_compile", "test.py"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result == "-m py_compile", f"Expected '-m py_compile' for {python_exe}, got '{result}'" + + +@pytest.mark.parametrize( + "python_exe, argv_pattern, description", + [ + (PYTHON_INTERPRETERS[1], ["-v", "-m", "py_compile", "test.py"], "python -v -m py_compile"), + (PYTHON_INTERPRETERS[8], ["-u", "-m", "py_compile"], "python -u -m py_compile"), + (PYTHON_INTERPRETERS[12], ["-O", "-v", "-m", "py_compile"], "python -O -v -m py_compile"), + (PYTHON_INTERPRETERS[1], ["-W", "ignore", "-m", "py_compile"], "python -W ignore -m py_compile"), + (PYTHON_INTERPRETERS[8], ["-u", "-v", "-m", "py_compile"], "python -u -v -m py_compile"), + (PYTHON_INTERPRETERS[12], ["-O", "-m", "py_compile", "file.py"], "python -O -m py_compile"), + ], +) +def test_python_module_denylist_denied_with_flags(mock_sitecustomize, python_exe, argv_pattern, description): + assert "py_compile" in mock_sitecustomize.EXECUTABLE_MODULES_DENY_LIST, "py_compile should be in modules deny list" + + argv = [python_exe] + argv_pattern + with patch.object(sys, "argv", argv): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result == "-m py_compile", f"Expected '-m py_compile' for {description} ({python_exe}), got '{result}'" + + +@pytest.mark.parametrize("python_exe", [PYTHON_INTERPRETERS[4], PYTHON_INTERPRETERS[11], PYTHON_INTERPRETERS[1]]) +def test_regular_python_nondenied(mock_sitecustomize, python_exe): + with patch.object(sys, "argv", [python_exe, "script.py"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"Normal python execution should not be denied for {python_exe}, got '{result}'" + + +@pytest.mark.parametrize( + "python_exe, module_name, description", + [ + (PYTHON_INTERPRETERS[4], "json.tool", "python -m json.tool"), + (PYTHON_INTERPRETERS[11], "json.tool", "python -m json.tool"), + (PYTHON_INTERPRETERS[8], "json.tool", "python -m json.tool"), + (PYTHON_INTERPRETERS[4], "pip", "python -m pip"), + (PYTHON_INTERPRETERS[11], "pip", "python -m pip"), + (PYTHON_INTERPRETERS[8], "pip", "python -m pip"), + ], +) +def test_python_module_notdenylist_notdenied(mock_sitecustomize, python_exe, module_name, description): + argv = [python_exe, "-m", module_name] + (["install", "something"] if module_name == "pip" else []) + with patch.object(sys, "argv", argv): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"{description} should not be denied for {python_exe}, got '{result}'" + + +def test_binary_denylist_denied(mock_sitecustomize): + denied_binaries = ["/usr/bin/py3compile", "/usr/bin/gcc", "/usr/bin/make", "/usr/sbin/chkrootkit"] + + for binary in denied_binaries: + assert binary in mock_sitecustomize.EXECUTABLES_DENY_LIST, f"{binary} should be in deny list" + with patch.object(sys, "argv", [binary, "some", "args"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result == binary, f"Expected '{binary}' to be denied, got '{result}'" + + with patch.object(sys, "argv", ["py3compile", "test.py"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result == "py3compile", f"Expected 'py3compile' (basename) to be denied, got '{result}'" + + +def test_binary_not_in_denylist_allowed(mock_sitecustomize): + candidate_allowed_binaries = [ + "/usr/bin/python3", + "/usr/bin/python3.10", + "/bin/bash", + "/usr/bin/cat", + "/usr/bin/ls", + "/usr/bin/echo", + "/usr/bin/node", + "/usr/bin/ruby", + "/usr/bin/java", + "/usr/bin/wget", + "/usr/bin/vim", + "/usr/bin/nano", + "/usr/local/bin/custom_app", + ] + + allowed_binaries = [] + for binary in candidate_allowed_binaries: + if ( + binary not in mock_sitecustomize.EXECUTABLES_DENY_LIST + and os.path.basename(binary) not in mock_sitecustomize.EXECUTABLES_DENY_LIST + ): + allowed_binaries.append(binary) + + for binary in allowed_binaries: + with patch.object(sys, "argv", [binary, "some", "args"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"Expected '{binary}' to be allowed, but got denied: '{result}'" + + safe_basenames = ["myapp", "custom_script", "user_program"] + for basename in safe_basenames: + assert basename not in mock_sitecustomize.EXECUTABLES_DENY_LIST, f"'{basename}' should not be in deny list" + + with patch.object(sys, "argv", [basename, "arg1", "arg2"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"Expected '{basename}' to be allowed, but got denied: '{result}'" + + +@pytest.mark.parametrize("python_exe", PYTHON_INTERPRETERS) +def test_single_argument_not_denied(mock_sitecustomize, python_exe): + with patch.object(sys, "argv", [python_exe]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"Single argument should not be denied for {python_exe}, got '{result}'" + + +@pytest.mark.parametrize("python_exe", [PYTHON_INTERPRETERS[4], PYTHON_INTERPRETERS[11], PYTHON_INTERPRETERS[9]]) +def test_m_without_module_not_denied(mock_sitecustomize, python_exe): + with patch.object(sys, "argv", [python_exe, "-m"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"-m without module should not be denied for {python_exe}, got '{result}'" + + +@pytest.mark.parametrize("python_exe", [PYTHON_INTERPRETERS[1], PYTHON_INTERPRETERS[7], PYTHON_INTERPRETERS[10]]) +def test_m_as_last_argument_not_denied(mock_sitecustomize, python_exe): + with patch.object(sys, "argv", [python_exe, "-v", "-m"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"-m as last argument should not be denied for {python_exe}, got '{result}'" + + +@pytest.mark.parametrize("python_exe", [PYTHON_INTERPRETERS[4], PYTHON_INTERPRETERS[11], PYTHON_INTERPRETERS[8]]) +def test_multiple_m_flags_uses_first(mock_sitecustomize, python_exe): + with patch.object(sys, "argv", [python_exe, "-m", "json.tool", "-m", "py_compile"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"First -m should be used (json.tool is allowed) for {python_exe}, got '{result}'" + + +@pytest.mark.parametrize( + "python_exe", + [ + PYTHON_INTERPRETERS[11], + PYTHON_INTERPRETERS[1], + PYTHON_INTERPRETERS[2], + PYTHON_INTERPRETERS[9], + PYTHON_INTERPRETERS[14], + ], +) +def test_py_compile_denied_all_interpreters(mock_sitecustomize, python_exe): + with patch.object(sys, "argv", [python_exe, "-m", "py_compile", "test.py"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result == "-m py_compile", f"py_compile should be denied for {python_exe}, got '{result}'" + + +def test_missing_sys_argv_not_denied(mock_sitecustomize): + with patch("builtins.hasattr", return_value=False): + result = mock_sitecustomize.get_first_incompatible_sysarg() + assert result is None, f"Missing sys.argv should not be denied, got '{result}'" + + +def test_non_python_executable_with_m_flag_allowed(mock_sitecustomize): + assert "py_compile" in mock_sitecustomize.EXECUTABLE_MODULES_DENY_LIST + + non_python_executables = [ + "/bin/whatever", + "/usr/bin/some_tool", + "/usr/local/bin/custom_app", + "/usr/bin/gcc", # This is actually in deny list, but not for -m + "/bin/bash", + "/usr/bin/node", + "/usr/bin/java", + ] + + for executable in non_python_executables: + with patch.object(sys, "argv", [executable, "-m", "py_compile", "test.py"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + + if result is not None: + assert result == executable or result == os.path.basename( + executable + ), f"Expected '{executable}' itself to be denied (if at all), not '-m py_compile'. Got: '{result}'" + + with patch.object(sys, "argv", [executable, "-m", "some_other_module"]): + result = mock_sitecustomize.get_first_incompatible_sysarg() + + if result is not None: + assert result == executable or result == os.path.basename( + executable + ), f"Non-Python executable '{executable}' should not be denied for -m patterns. Got: '{result}'" diff --git a/tests/llmobs/_utils.py b/tests/llmobs/_utils.py index 00494a3eb0c..a12154f2c34 100644 --- a/tests/llmobs/_utils.py +++ b/tests/llmobs/_utils.py @@ -2,6 +2,10 @@ import mock +from ddtrace.llmobs.types import _ErrorField +from ddtrace.llmobs.types import _Meta +from ddtrace.llmobs.types import _SpanField + try: import vcr @@ -226,7 +230,7 @@ def _llmobs_base_span_event( "start_ns": span.start_ns, "duration": span.duration_ns, "status": "error" if error else "ok", - "meta": {"span.kind": span_kind}, + "meta": _Meta(span=_SpanField(kind=span_kind)), "metrics": {}, "tags": _expected_llmobs_tags(span, tags=tags, error=error, session_id=session_id), "_dd": { @@ -238,9 +242,7 @@ def _llmobs_base_span_event( if session_id: span_event["session_id"] = session_id if error: - span_event["meta"]["error.type"] = error - span_event["meta"]["error.message"] = error_message - span_event["meta"]["error.stack"] = error_stack + span_event["meta"]["error"] = _ErrorField(type=error, message=error_message or "", stack=error_stack or "") if span_links: span_event["span_links"] = mock.ANY return span_event @@ -322,7 +324,9 @@ def _completion_event(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "llm", + "span": { + "kind": "llm", + }, "model_name": "ada", "model_provider": "openai", "input": { @@ -353,7 +357,9 @@ def _chat_completion_event(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "llm", + "span": { + "kind": "llm", + }, "model_name": "gpt-3.5-turbo", "model_provider": "openai", "input": { @@ -391,7 +397,9 @@ def _chat_completion_event_with_unserializable_field(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "llm", + "span": { + "kind": "llm", + }, "model_name": "gpt-3.5-turbo", "model_provider": "openai", "metadata": {"unserializable": object()}, @@ -430,7 +438,9 @@ def _large_event(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "llm", + "span": { + "kind": "llm", + }, "model_name": "gpt-3.5-turbo", "model_provider": "openai", "input": { @@ -468,7 +478,9 @@ def _oversized_llm_event(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "llm", + "span": { + "kind": "llm", + }, "model_name": "gpt-3.5-turbo", "model_provider": "openai", "input": { @@ -506,7 +518,9 @@ def _oversized_workflow_event(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": "A" * 2_600_000}, "output": {"value": "A" * 2_600_000}, }, @@ -526,7 +540,9 @@ def _oversized_retrieval_event(): "duration": 12345678900, "status": "ok", "meta": { - "span.kind": "retrieval", + "span": { + "kind": "retrieval", + }, "input": {"documents": {"content": "A" * 2_600_000}}, "output": {"value": "A" * 2_600_000}, }, @@ -621,7 +637,9 @@ def _expected_ragas_context_precision_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": mock.ANY}, "output": {"value": "1.0"}, "metadata": {}, @@ -640,7 +658,9 @@ def _expected_ragas_context_precision_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {}, @@ -665,7 +685,9 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": mock.ANY}, "output": {"value": "1.0"}, "metadata": { @@ -686,7 +708,9 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {}, @@ -704,7 +728,9 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {}, @@ -722,7 +748,7 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "start_ns": mock.ANY, "duration": mock.ANY, "status": "ok", - "meta": {"span.kind": "task", "metadata": {}}, + "meta": {"span": {"kind": "task"}, "metadata": {}}, "metrics": {}, "tags": expected_ragas_trace_tags(), "_dd": {"span_id": mock.ANY, "trace_id": mock.ANY, "apm_trace_id": mock.ANY}, @@ -736,7 +762,9 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": { + "kind": "workflow", + }, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {}, @@ -754,7 +782,7 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "start_ns": mock.ANY, "duration": mock.ANY, "status": "ok", - "meta": {"span.kind": "task", "metadata": {}}, + "meta": {"span": {"kind": "task"}, "metadata": {}}, "metrics": {}, "tags": expected_ragas_trace_tags(), "_dd": {"span_id": mock.ANY, "trace_id": mock.ANY, "apm_trace_id": mock.ANY}, @@ -768,7 +796,7 @@ def _expected_ragas_faithfulness_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "task", + "span": {"kind": "task"}, "output": {"value": "1.0"}, "metadata": {"faithful_statements": 1, "num_statements": 1}, }, @@ -792,7 +820,7 @@ def _expected_ragas_answer_relevancy_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": {"kind": "workflow"}, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {"answer_classifications": mock.ANY, "strictness": mock.ANY}, @@ -811,7 +839,7 @@ def _expected_ragas_answer_relevancy_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": {"kind": "workflow"}, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {}, @@ -829,7 +857,7 @@ def _expected_ragas_answer_relevancy_spans(ragas_inputs=None): "duration": mock.ANY, "status": "ok", "meta": { - "span.kind": "workflow", + "span": {"kind": "workflow"}, "input": {"value": mock.ANY}, "output": {"value": mock.ANY}, "metadata": {}, diff --git a/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_1edd9d51.yaml b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_1edd9d51.yaml new file mode 100644 index 00000000000..f013548843f --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_1edd9d51.yaml @@ -0,0 +1,49 @@ +interactions: +- request: + body: '{"data": {"type": "experiments", "attributes": {"scope": "experiments", + "metrics": [{"metric_source": "custom", "span_id": "123", "trace_id": "456", + "timestamp_ms": 1234, "metric_type": "score", "label": "dummy_evaluator", "score_value": + 0, "error": null, "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}], "tags": ["ddtrace.version:1.2.3", + "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"]}}}' + headers: + Accept: + - '*/*' + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - identity + Connection: + - keep-alive + Content-Length: + - '494' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://api.datadoghq.com/api/unstable/llm-obs/v1/experiments/9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8/events + response: + body: + string: '' + headers: + content-length: + - '0' + content-security-policy: + - frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pube4f163c23bbf91c16b8f57f56af9fc58&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatadoghq.com + content-type: + - application/vnd.api+json + date: + - Thu, 18 Sep 2025 15:03:41 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-frame-options: + - SAMEORIGIN + status: + code: 202 + message: Accepted +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_977047ce.yaml b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_977047ce.yaml new file mode 100644 index 00000000000..18c184cf61d --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_977047ce.yaml @@ -0,0 +1,53 @@ +interactions: +- request: + body: '{"data": {"type": "experiments", "attributes": {"scope": "experiments", + "metrics": [{"metric_source": "custom", "span_id": "123", "trace_id": "456", + "timestamp_ms": 1234, "metric_type": "score", "label": "dummy_evaluator", "score_value": + 0, "error": null, "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}, {"metric_source": + "summary", "span_id": "", "trace_id": "", "timestamp_ms": 1234, "metric_type": + "score", "label": "dummy_summary_evaluator", "score_value": 4, "error": null, + "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}], "tags": ["ddtrace.version:1.2.3", + "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"]}}}' + headers: + Accept: + - '*/*' + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - identity + Connection: + - keep-alive + Content-Length: + - '816' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://api.datadoghq.com/api/unstable/llm-obs/v1/experiments/9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8/events + response: + body: + string: '' + headers: + content-length: + - '0' + content-security-policy: + - frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pube4f163c23bbf91c16b8f57f56af9fc58&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatadoghq.com + content-type: + - application/vnd.api+json + date: + - Thu, 18 Sep 2025 21:00:41 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-frame-options: + - SAMEORIGIN + status: + code: 202 + message: Accepted +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_ac74921f.yaml b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_ac74921f.yaml new file mode 100644 index 00000000000..16d25d51485 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_ac74921f.yaml @@ -0,0 +1,53 @@ +interactions: +- request: + body: '{"data": {"type": "experiments", "attributes": {"scope": "experiments", + "metrics": [{"metric_source": "custom", "span_id": "123", "trace_id": "456", + "timestamp_ms": 1234, "metric_type": "score", "label": "dummy_evaluator", "score_value": + 0, "error": null, "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}, {"metric_source": + "summary", "span_id": "", "trace_id": "", "timestamp_ms": 1758223545604, "metric_type": + "score", "label": "dummy_summary_evaluator", "score_value": 4, "error": null, + "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}], "tags": ["ddtrace.version:1.2.3", + "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"]}}}' + headers: + Accept: + - '*/*' + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - identity + Connection: + - keep-alive + Content-Length: + - '825' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://api.datadoghq.com/api/unstable/llm-obs/v1/experiments/9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8/events + response: + body: + string: '' + headers: + content-length: + - '0' + content-security-policy: + - frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pube4f163c23bbf91c16b8f57f56af9fc58&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatadoghq.com + content-type: + - application/vnd.api+json + date: + - Thu, 18 Sep 2025 19:25:45 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-frame-options: + - SAMEORIGIN + status: + code: 202 + message: Accepted +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_e476f464.yaml b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_e476f464.yaml new file mode 100644 index 00000000000..7c8f99c2fcb --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/datadog/datadog_api_unstable_llm-obs_v1_experiments_9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8_events_post_e476f464.yaml @@ -0,0 +1,53 @@ +interactions: +- request: + body: '{"data": {"type": "experiments", "attributes": {"scope": "experiments", + "metrics": [{"metric_source": "custom", "span_id": "123", "trace_id": "456", + "timestamp_ms": 1234, "metric_type": "score", "label": "dummy_evaluator", "score_value": + 0, "error": null, "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}, {"metric_source": + "summary", "span_id": "", "trace_id": "", "timestamp_ms": 1758223195113, "metric_type": + "score", "label": "dummy_summary_evaluator", "score_value": 4, "error": null, + "tags": ["ddtrace.version:1.2.3", "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"], + "experiment_id": "9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"}], "tags": ["ddtrace.version:1.2.3", + "experiment_id:9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8"]}}}' + headers: + Accept: + - '*/*' + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - identity + Connection: + - keep-alive + Content-Length: + - '825' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + User-Agent: + - python-requests/2.32.3 + method: POST + uri: https://api.datadoghq.com/api/unstable/llm-obs/v1/experiments/9e046fc7-cf3f-4f01-b5ed-e5e7746fefa8/events + response: + body: + string: '' + headers: + content-length: + - '0' + content-security-policy: + - frame-ancestors 'self'; report-uri https://logs.browser-intake-datadoghq.com/api/v2/logs?dd-api-key=pube4f163c23bbf91c16b8f57f56af9fc58&dd-evp-origin=content-security-policy&ddsource=csp-report&ddtags=site%3Adatadoghq.com + content-type: + - application/vnd.api+json + date: + - Thu, 18 Sep 2025 19:19:55 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-frame-options: + - SAMEORIGIN + status: + code: 202 + message: Accepted +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_2eb535d8.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_2eb535d8.yaml new file mode 100644 index 00000000000..d9657f58734 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_2eb535d8.yaml @@ -0,0 +1,131 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a {role} assistant.", "role": "system"}, + {"content": "Your expertise is in creative writing.", "role": "system"}, {"content": + "Additional context: focus on storytelling", "role": "system"}, {"content": + "I''m a user seeking help.", "role": "user"}, {"content": "Please help with + writing a short story", "role": "user"}, {"content": "Specifically, I need character + development", "role": "user"}, {"content": "I understand your request.", "role": + "assistant"}, {"content": "I''ll help you with writing a short story.", "role": + "assistant"}, {"content": "Let me provide guidance", "role": "assistant"}, {"content": + "Make it engaging and under 100 words", "role": "developer"}], "model": "gpt-3.5-turbo", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '777' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CIese1QDPr0XXSEgYvKbkeUZhfQFJ\",\n \"object\": + \"chat.completion\",\n \"created\": 1758563084,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"To start developing your character, + think about their background, personality traits, motivations, and challenges + they may face. Consider what makes them unique and how they might evolve throughout + the story. Would you like to explore a particular type of character or setting + for your story?\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 112,\n \"completion_tokens\": + 52,\n \"total_tokens\": 164,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 98339cae6aff905c-BOS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:44:45 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=ZFeMiDd6PEb_h_UZ2pG8Wvp9CBwXI.cDOavASojqbpo-1758563085-1.0.1.1-gYUZtbN_O5a1ndmJEF1UCwPJ54MFaWUM.gOB.y3XPx_5vWgvDcFFfoYYZbOqlnHi6Nq1LDfDK_g0VEaZbY5H9QlQuBLopYD2lt0I.wYnGSw; + path=/; expires=Mon, 22-Sep-25 18:14:45 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=YfXe1C9udYV5eHjDVkrLIl2_SutEyw2Sxy1B1SNSYvw-1758563085238-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '565' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '589' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999905' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_744f273b5ac44218a458a592ebe13b53 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_448095a4.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_448095a4.yaml new file mode 100644 index 00000000000..37122f72842 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_448095a4.yaml @@ -0,0 +1,140 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a {role} assistant.", "role": "system"}, + {"content": "Your expertise is in software engineering.", "role": "system"}, + {"content": "Additional context: focus on best practices", "role": "system"}, + {"content": "I''m a user seeking help.", "role": "user"}, {"content": "Please + help with code review", "role": "user"}, {"content": "Specifically, I need performance + optimization", "role": "user"}, {"content": "I understand your request.", "role": + "assistant"}, {"content": "I''ll help you with code review.", "role": "assistant"}, + {"content": "Let me provide recommendations", "role": "assistant"}, {"content": + "Make it thorough and under 200 words", "role": "developer"}], "model": "gpt-3.5-turbo", + "stream": false}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate, zstd + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '745' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - AsyncOpenAI/Python 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - async:asyncio + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-retry-count + : - '0' + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CIesnYp3ArbeZmlvm2lTqpctcCYrZ\",\n \"object\": + \"chat.completion\",\n \"created\": 1758563093,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"When optimizing for performance, focus + on reducing unnecessary computations or I/O operations, minimizing memory + usage, and improving algorithm efficiency. Consider using data structures + like hash maps for quick lookups and minimizing nested loops to reduce time + complexity. Profile your code using tools like profilers to identify bottlenecks. + Utilize caching mechanisms where applicable to reduce redundant computations. + Use asynchronous processing for I/O-bound operations to improve responsiveness. + Remember to balance readability with performance optimizations and document + rationale behind any non-intuitive optimizations for future reference. Always + test performance improvements with realistic data and scenarios to validate + the effectiveness of the optimizations.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 109,\n \"completion_tokens\": + 118,\n \"total_tokens\": 227,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 98339ce46f8b4ce8-BOS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:44:54 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=IK_Vjf0AvDicpP_jyKYa1Fnxkvw5GvGBXQQHcxK5k04-1758563094-1.0.1.1-9Qi8HJOL2Z6zHU58xhszX5yAunjAEVs4Q7Ler05IN2B2IgS5QNEUhnOgdizPWHTI_kXpyYYCgC9YbSQny4Ryi6j9PPCZz9NorlBcNNFDeIs; + path=/; expires=Mon, 22-Sep-25 18:14:54 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=LtuICazb21rxIhiZxZ72FV8xzAwsyzGaETrRwMBnW2o-1758563094420-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '1064' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1098' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999905' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_7fde04bd038e40aa989b221d8f96ffb8 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_5244958a.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_5244958a.yaml new file mode 100644 index 00000000000..3525a9aaa13 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_5244958a.yaml @@ -0,0 +1,134 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a {role} assistant.", "role": "system"}, + {"content": "Your expertise is in data analysis.", "role": "system"}, {"content": + "Additional context: focus on accuracy", "role": "system"}, {"content": "I''m + a user seeking help.", "role": "user"}, {"content": "Please help with analyzing + datasets", "role": "user"}, {"content": "Specifically, I need statistical methods", + "role": "user"}, {"content": "I understand your request.", "role": "assistant"}, + {"content": "I''ll help you with analyzing datasets.", "role": "assistant"}, + {"content": "Let me provide insights", "role": "assistant"}, {"content": "Make + it detailed and under 150 words", "role": "developer"}], "model": "gpt-3.5-turbo", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '762' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CIesgrrGj7PzEPdG5OvPNZTzmC9Xw\",\n \"object\": + \"chat.completion\",\n \"created\": 1758563086,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Certainly! I will assist you in analyzing + datasets using statistical methods. We can start by exploring the data with + descriptive statistics like mean, median, and standard deviation. Then, we + can apply inferential statistics such as hypothesis testing or regression + analysis to draw meaningful conclusions. It's important to ensure data accuracy + throughout the analysis process to maintain the reliability of the results. + Feel free to share more details about your specific dataset and analysis goals + for a more tailored approach.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 108,\n \"completion_tokens\": + 90,\n \"total_tokens\": 198,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 98339cbb7ecb4d19-BOS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:44:47 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=euWjg3ikr4nZKHcbyuC.bYYnK.jXMyVDv3M4aT6XUzs-1758563087-1.0.1.1-7qSk1dqCq8_3JgUxMKFTaZb61GS3Enu13aoYBKeccB0N4kAz01tSFR_V1y_b9AVXq0kftzhrYul86M9PdfDKfZBIRIQ3CKZL5HvcL7vb8kI; + path=/; expires=Mon, 22-Sep-25 18:14:47 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=6l8XMsDdJN1mhrIcpj_nZ2Q2pARp460CR8cb9Teb56M-1758563087501-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '751' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '774' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999907' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_4f13c054e468464b944056e3299b282c + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_8fdba4e4.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_8fdba4e4.yaml new file mode 100644 index 00000000000..86bda0b449c --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_8fdba4e4.yaml @@ -0,0 +1,136 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a {role} assistant.", "role": "system"}, + {"content": "Your expertise is in software engineering.", "role": "system"}, + {"content": "Additional context: focus on best practices", "role": "system"}, + {"content": "I''m a user seeking help.", "role": "user"}, {"content": "Please + help with code review", "role": "user"}, {"content": "Specifically, I need performance + optimization", "role": "user"}, {"content": "I understand your request.", "role": + "assistant"}, {"content": "I''ll help you with code review.", "role": "assistant"}, + {"content": "Let me provide recommendations", "role": "assistant"}, {"content": + "Make it thorough and under 200 words", "role": "developer"}], "model": "gpt-3.5-turbo", + "n": 1, "stream": false, "temperature": 0.7}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '773' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - AsyncOpenAI/Python 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - async:asyncio + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CIesfljG3kdiFje8g6472JNaqQCgs\",\n \"object\": + \"chat.completion\",\n \"created\": 1758563085,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"When optimizing for performance, consider + the following: \\n1. Use efficient data structures and algorithms.\\n2. Minimize + unnecessary operations and avoid nested loops.\\n3. Cache results of expensive + computations.\\n4. Optimize database queries and minimize network requests.\\n5. + Utilize asynchronous processing and parallel programming.\\n6. Profile your + code to identify bottlenecks.\\n7. Avoid premature optimization; focus on + critical areas first.\\n8. Consider using a profiler tool to measure performance + improvements.\\n9. Regularly review and refactor code for better performance.\\n10. + Test performance changes and monitor for regressions. Remember, optimizing + code is a balance between performance gains and code complexity.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 109,\n \"completion_tokens\": 131,\n \"total_tokens\": 240,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 98339cb37b67904d-BOS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:44:46 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=oNqeFDsHxjp57DO51OQdISF3Rv7ullj_2sqWTb.S_30-1758563086-1.0.1.1-lQxQBATc6qZ6885iAbP4q4GcorpFkumyl4a3sbnw91vDcB2MVUB4tAFSRlpWmW3iX6s6a1Kn5auYHerQzjwqJvezKRtKrdFeKAlsTk1E3ek; + path=/; expires=Mon, 22-Sep-25 18:14:46 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=SlLwMUXOeHtVRpbotBOCe.2amWEGWxTLJrXtR.4yOoo-1758563086515-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '1073' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1094' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999905' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_1690ccbc68564cc1a3041ea020bbe6d6 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_ad7e4955.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_ad7e4955.yaml new file mode 100644 index 00000000000..88868db4d2d --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_ad7e4955.yaml @@ -0,0 +1,136 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a {role} assistant.", "role": "system"}, + {"content": "Your expertise is in data analysis.", "role": "system"}, {"content": + "Additional context: focus on accuracy", "role": "system"}, {"content": "I''m + a user seeking help.", "role": "user"}, {"content": "Please help with analyzing + datasets", "role": "user"}, {"content": "Specifically, I need statistical methods", + "role": "user"}, {"content": "I understand your request.", "role": "assistant"}, + {"content": "I''ll help you with analyzing datasets.", "role": "assistant"}, + {"content": "Let me provide insights", "role": "assistant"}, {"content": "Make + it detailed and under 150 words", "role": "developer"}], "model": "gpt-3.5-turbo", + "stream": false}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate, zstd + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '734' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-retry-count + : - '0' + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CIesmrTtUwlBMhSeGppWnLQachQUn\",\n \"object\": + \"chat.completion\",\n \"created\": 1758563092,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I will apply statistical methods to + analyze your datasets accurately. This may involve descriptive statistics, + such as mean, median, and standard deviation, to summarize the data effectively. + I can also assist with inferential statistics, like hypothesis testing and + regression analysis, to draw meaningful conclusions from the data. By ensuring + the integrity and reliability of the analysis, I will help you make informed + decisions based on the insights gained from the datasets.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 108,\n \"completion_tokens\": 83,\n \"total_tokens\": 191,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 98339cde2cc08fbd-BOS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:44:53 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=Zi4_F2uj9DS7B2RgYFRC3YxFs64CzIbXHd_ACKfAmRg-1758563093-1.0.1.1-3GqkteGETfHkM.rna3v2IoprVZ9QdZ5FlEp2RPnQKPWuKHvc3Bs0ibo0MWakyTrDaK5kS.wcnqtg.8YNaL9WtEtoxA9BNVixS_ZBh6KAm.k; + path=/; expires=Mon, 22-Sep-25 18:14:53 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=wwbsplLjFhV7b_O5Ow9.CkbrTZhXu2IDk2mMT_93KAk-1758563093057-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '781' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '800' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999908' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_ec55e7146e8c4e0997914a504609de06 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_fd55b179.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_fd55b179.yaml new file mode 100644 index 00000000000..19fff860955 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_chat_completions_post_fd55b179.yaml @@ -0,0 +1,139 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a {role} assistant.", "role": "system"}, + {"content": "Your expertise is in creative writing.", "role": "system"}, {"content": + "Additional context: focus on storytelling", "role": "system"}, {"content": + "I''m a user seeking help.", "role": "user"}, {"content": "Please help with + writing a short story", "role": "user"}, {"content": "Specifically, I need character + development", "role": "user"}, {"content": "I understand your request.", "role": + "assistant"}, {"content": "I''ll help you with writing a short story.", "role": + "assistant"}, {"content": "Let me provide guidance", "role": "assistant"}, {"content": + "Make it engaging and under 100 words", "role": "developer"}], "model": "gpt-3.5-turbo", + "stream": false}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate, zstd + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '749' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-retry-count + : - '0' + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-CIesonu85AwwwMGmoCLsLk09RCn20\",\n \"object\": + \"chat.completion\",\n \"created\": 1758563094,\n \"model\": \"gpt-3.5-turbo-0125\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Great! Let's start with developing + a compelling character in under 100 words. \\n\\nCharacter name: Lily\\n\\nLily, + a young artist with a vibrant spirit, found inspiration in the simplest of + things. Her bright green eyes sparkled with curiosity, always seeking beauty + in the world around her. However, beneath her carefree facade lay a depth + of emotion, a heart scarred by loss and longing. She channeled her inner turmoil + into her art, creating mesmerizing pieces that spoke volumes without words. + Lily's infectious laughter masked a quiet strength, a determination to find + light in the darkest of moments. When her brush touched canvas, magic unfolded, + revealing a soul as vibrant as the colors she painted.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 112,\n \"completion_tokens\": 142,\n \"total_tokens\": 254,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - 98339ceccdbb8f9f-BOS + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:44:55 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=obggGc7ZBSKl84OpWJe4C9po_TJ4QBJlnvK8RbrBCwk-1758563095-1.0.1.1-IxvHeF_BNexes781_cwNqH9JH68A0GfObQKBvTZXWYZSgQK7Mtf0tVeG5ag1vzNuDyoUtE36b6ChQ_LJ1lljSUXDeeLXhMSZa1vUu1q0HEw; + path=/; expires=Mon, 22-Sep-25 18:14:55 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=aE.QgWcfts2w4plHdsxPoq06T9dYJ33zgXbchfiPTm8-1758563095792-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - datadog-staging + openai-processing-ms: + - '1120' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1138' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '50000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '49999905' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_da856b2154d54de6b846be34f9b14579 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_completions_post_0fdd1e55.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_completions_post_0fdd1e55.yaml new file mode 100644 index 00000000000..12f2ded5967 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_completions_post_0fdd1e55.yaml @@ -0,0 +1,138 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo-instruct", "prompt": ["Write a creative story + about time travel."], "frequency_penalty": 0, "logit_bias": {}, "max_tokens": + 256, "n": 1, "presence_penalty": 0, "temperature": 0.7, "top_p": 1}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '216' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.30.3 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + method: POST + uri: https://api.openai.com/v1/completions + response: + body: + string: "{\n \"id\": \"cmpl-CIewkLZmfJsU1nplXAFDznjxKFW3Y\",\n \"object\": + \"text_completion\",\n \"created\": 1758563338,\n \"model\": \"gpt-3.5-turbo-instruct:20230824-v2\",\n + \ \"choices\": [\n {\n \"text\": \"\\n\\nOnce upon a time, in the + year 2045, a young scientist named Ava had just completed her time machine. + She had been working on it for years, fueled by her fascination with time + travel and her desire to change the course of history. Her machine was sleek + and modern, with flashing lights and advanced technology that she had designed + herself.\\n\\nAva had always been a bit of a rebel, and she had never been + content with the way things were in her time. The world was plagued by wars, + poverty, and environmental disasters. She dreamed of going back in time and + preventing these tragedies from ever happening. And now, with her time machine, + she finally had the chance to do so.\\n\\nWith a deep breath, Ava stepped + into her machine and set the date to 1920, a time she had always been drawn + to. As the machine whirred to life, she felt a rush of excitement and fear. + What if her plan didn't work? What if she caused more harm than good?\\n\\nBut + it was too late to turn back now. Before she knew it, she was transported + through time, landing in a bustling city street. As she stepped out of the + machine, she was immediately struck by the sights and sounds of the roaring + twenties. The fashion\",\n \"index\": 0,\n \"logprobs\": null,\n + \ \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 8,\n \"completion_tokens\": 256,\n \"total_tokens\": 264\n }\n}\n" + headers: + CF-RAY: + - 9833a2dd5efb8fcc-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:49:00 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=rHB1WT4cm36iY6hlJ8YrIYVdycicTQbkFBkWNoDOY9g-1758563340-1.0.1.1-dflE5EN9tBQz.NWy4o7.e6snRB6uSzOTsVG4ChFYgdFagrbmLIF1KzW9xTGCrLlJZJ24JlVWyJvf8KDj7BhJ113p5YV0kAodnvx9K3xj9.4; + path=/; expires=Mon, 22-Sep-25 18:19:00 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=epUYLQglxq8rJ1L9Wx4Wu27nkRnKVdngRy0cmHCS93A-1758563340287-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - gpt-3.5-turbo-instruct:20230824-v2 + openai-organization: + - datadog-staging + openai-processing-ms: + - '2238' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-6c4cfffd5b-r5whb + x-envoy-upstream-service-time: + - '2401' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3498' + x-ratelimit-remaining-tokens: + - '89989' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 7ms + x-request-id: + - req_6353e5a6d22645b181fda4c2fbd2d176 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/openai/openai_completions_post_b77fd180.yaml b/tests/llmobs/llmobs_cassettes/openai/openai_completions_post_b77fd180.yaml new file mode 100644 index 00000000000..c75bb2fa679 --- /dev/null +++ b/tests/llmobs/llmobs_cassettes/openai/openai_completions_post_b77fd180.yaml @@ -0,0 +1,143 @@ +interactions: +- request: + body: '{"model": "gpt-3.5-turbo-instruct", "prompt": ["Write a creative story + about time travel."], "frequency_penalty": 0, "logprobs": null, "max_tokens": + 256, "n": 1, "presence_penalty": 0, "seed": null, "temperature": 0.7, "top_p": + 1}' + headers: + ? !!python/object/apply:multidict._multidict.istr + - Accept + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - Accept-Encoding + : - gzip, deflate, zstd + ? !!python/object/apply:multidict._multidict.istr + - Connection + : - keep-alive + Content-Length: + - '230' + ? !!python/object/apply:multidict._multidict.istr + - Content-Type + : - application/json + ? !!python/object/apply:multidict._multidict.istr + - User-Agent + : - OpenAI/Python 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Arch + : - arm64 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Async + : - 'false' + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Lang + : - python + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-OS + : - MacOS + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Package-Version + : - 1.61.1 + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime + : - CPython + ? !!python/object/apply:multidict._multidict.istr + - X-Stainless-Runtime-Version + : - 3.10.13 + ? !!python/object/apply:multidict._multidict.istr + - x-stainless-retry-count + : - '0' + method: POST + uri: https://api.openai.com/v1/completions + response: + body: + string: "{\n \"id\": \"cmpl-CIewriWngbE1RFtJKNIRVOLbX9Q4S\",\n \"object\": + \"text_completion\",\n \"created\": 1758563345,\n \"model\": \"gpt-3.5-turbo-instruct:20230824-v2\",\n + \ \"choices\": [\n {\n \"text\": \"\\n\\nIt was a cold, stormy night + when Sarah stumbled upon a mysterious old bookshop in the heart of the city. + As she stepped inside, the musty smell of old books filled her nostrils. The + shop was dimly lit, and shelves upon shelves of ancient books lined the walls. + Sarah was immediately drawn to a particular book, its leather-bound cover + and golden lettering seemed to glow in the dim light.\\n\\nCuriosity getting + the best of her, Sarah reached out and opened the book. To her surprise, the + pages were blank except for one sentence written in elegant cursive, \\\"This + book holds the power of time travel.\\\" Sarah's heart raced with excitement + and fear. Could it be true? Was time travel really possible?\\n\\nWithout + hesitation, Sarah closed her eyes and wished with all her might to travel + back in time. Suddenly, a bright light enveloped her, and when she opened + her eyes, she found herself standing in a bustling market in ancient Rome. + She couldn't believe it, she had actually traveled through time!\\n\\nAs she + explored the city, Sarah couldn't help but think about all the possibilities + that time travel held. She could witness historical events, meet her ancestors, + and even change the course of history. But she also knew the dangers of altering\",\n + \ \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 8,\n \"completion_tokens\": + 256,\n \"total_tokens\": 264\n }\n}\n" + headers: + CF-RAY: + - 9833a309388b9027-BOS + Cache-Control: + - no-cache, must-revalidate + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Mon, 22 Sep 2025 17:49:07 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=Y5jbFZ.P.fPgRXhkV_s_xIwxa.dw4Qxk1esd.gWGFIY-1758563347-1.0.1.1-xVMeoAWjJ10fGJEgl1EfIUIB.NOdfS2Lgx1vDPcCeMBKr6KxCVAlSl2O_hHIKEnk5.mHJf26_4r6gjQPmeIImIULbzwCpdx_jKVC5aWfhNI; + path=/; expires=Mon, 22-Sep-25 18:19:07 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=BSFn8a4qYLV5csowZIlmQM_rvpaKLtsMPj4OiW6t8zg-1758563347404-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - gpt-3.5-turbo-instruct:20230824-v2 + openai-organization: + - datadog-staging + openai-processing-ms: + - '1978' + openai-project: + - proj_gt6TQZPRbZfoY2J9AQlEJMpd + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-7df74cfcfb-lwhf6 + x-envoy-upstream-service-time: + - '2023' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - '3500' + x-ratelimit-limit-tokens: + - '90000' + x-ratelimit-remaining-requests: + - '3499' + x-ratelimit-remaining-tokens: + - '89989' + x-ratelimit-reset-requests: + - 17ms + x-ratelimit-reset-tokens: + - 7ms + x-request-id: + - req_25f473ba84034159978e464d57b42e4c + status: + code: 200 + message: OK +version: 1 diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_chat_completion_event.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_chat_completion_event.yaml index 9fc04045726..8618855c52b 100644 --- a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_chat_completion_event.yaml +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_chat_completion_event.yaml @@ -5,7 +5,7 @@ interactions: "parent_id": "", "session_id": "98765432102", "name": "chat_completion_span", "tags": ["version:", "env:", "service:tests.llmobs", "source:integration"], "start_ns": 1707763310981223936, "duration": 12345678900, "status": "ok", "meta": - {"span.kind": "llm", "model_name": "gpt-3.5-turbo", "model_provider": "openai", + {"span": {"kind": "llm"}, "model_name": "gpt-3.5-turbo", "model_provider": "openai", "input": {"messages": [{"role": "system", "content": "You are an evil dark lord looking for his one ring to rule them all"}, {"role": "user", "content": "I am a hobbit looking to go to Mordor"}]}, "output": {"messages": [{"content": diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_completion_event.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_completion_event.yaml index bf433ac810b..6232fbcd2e1 100644 --- a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_completion_event.yaml +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_completion_event.yaml @@ -5,7 +5,7 @@ interactions: "98765432101", "parent_id": "", "session_id": "98765432101", "name": "completion_span", "tags": ["version:", "env:", "service:tests.llmobs", "source:integration"], "start_ns": 1707763310981223236, "duration": 12345678900, "status": "ok", "meta": - {"span.kind": "llm", "model_name": "ada", "model_provider": "openai", "input": + {"span": {"kind": "llm"}, "model_name": "ada", "model_provider": "openai", "input": {"messages": [{"content": "who broke enigma?"}]}, "output": {"messages": [{"content": "\n\nThe Enigma code was broken by a team of codebreakers at Bletchley Park, led by mathematician Alan Turing."}]}, "metadata": {"temperature": 0, "max_tokens": diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_multiple_events.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_multiple_events.yaml index a2d8d2c67e8..0af247005d6 100644 --- a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_multiple_events.yaml +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_multiple_events.yaml @@ -5,7 +5,7 @@ interactions: "98765432101", "parent_id": "", "session_id": "98765432101", "name": "completion_span", "tags": ["version:", "env:", "service:tests.llmobs", "source:integration"], "start_ns": 1707763310981223236, "duration": 12345678900, "status": "ok", "meta": - {"span.kind": "llm", "model_name": "ada", "model_provider": "openai", "input": + {"span": {"kind": "llm"}, "model_name": "ada", "model_provider": "openai", "input": {"messages": [{"content": "who broke enigma?"}]}, "output": {"messages": [{"content": "\n\nThe Enigma code was broken by a team of codebreakers at Bletchley Park, led by mathematician Alan Turing."}]}, "metadata": {"temperature": 0, "max_tokens": @@ -15,7 +15,7 @@ interactions: "parent_id": "", "session_id": "98765432102", "name": "chat_completion_span", "tags": ["version:", "env:", "service:tests.llmobs", "source:integration"], "start_ns": 1707763310981223936, "duration": 12345678900, "status": "ok", "meta": - {"span.kind": "llm", "model_name": "gpt-3.5-turbo", "model_provider": "openai", + {"span": {"kind": "llm"}, "model_name": "gpt-3.5-turbo", "model_provider": "openai", "input": {"messages": [{"role": "system", "content": "You are an evil dark lord looking for his one ring to rule them all"}, {"role": "user", "content": "I am a hobbit looking to go to Mordor"}]}, "output": {"messages": [{"content": diff --git a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_timed_events.yaml b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_timed_events.yaml index ee9da5290fc..c91f44a0f5f 100644 --- a/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_timed_events.yaml +++ b/tests/llmobs/llmobs_cassettes/tests.llmobs.test_llmobs_span_agentless_writer.test_send_timed_events.yaml @@ -5,7 +5,7 @@ interactions: "98765432101", "parent_id": "", "session_id": "98765432101", "name": "completion_span", "tags": ["version:", "env:", "service:tests.llmobs", "source:integration"], "start_ns": 1707763310981223236, "duration": 12345678900, "status": "ok", "meta": - {"span.kind": "llm", "model_name": "ada", "model_provider": "openai", "input": + {"span": {"kind": "llm"}, "model_name": "ada", "model_provider": "openai", "input": {"messages": [{"content": "who broke enigma?"}]}, "output": {"messages": [{"content": "\n\nThe Enigma code was broken by a team of codebreakers at Bletchley Park, led by mathematician Alan Turing."}]}, "metadata": {"temperature": 0, "max_tokens": diff --git a/tests/llmobs/suitespec.yml b/tests/llmobs/suitespec.yml index 612822426c2..57e752e1ea7 100644 --- a/tests/llmobs/suitespec.yml +++ b/tests/llmobs/suitespec.yml @@ -2,6 +2,8 @@ components: anthropic: - ddtrace/contrib/internal/anthropic/* + google_adk: + - ddtrace/contrib/internal/google_adk/* google_generativeai: - ddtrace/contrib/internal/google_generativeai/* google_genai: @@ -41,17 +43,29 @@ suites: - tests/snapshots/tests.contrib.anthropic.* runner: riot snapshot: true + google_adk: + parallelism: 3 + paths: + - '@bootstrap' + - '@core' + - '@tracing' + - '@contrib' + - '@google_adk' + - '@llmobs' + - tests/contrib/google_adk/* + runner: riot + snapshot: true google_generativeai: parallelism: 1 paths: - - '@bootstrap' - - '@core' - - '@tracing' - - '@contrib' - - '@google_generativeai' - - '@llmobs' - - tests/contrib/google_generativeai/* - - tests/snapshots/tests.contrib.google_generativeai.* + - '@bootstrap' + - '@core' + - '@tracing' + - '@contrib' + - '@google_generativeai' + - '@llmobs' + - tests/contrib/google_generativeai/* + - tests/snapshots/tests.contrib.google_generativeai.* runner: riot snapshot: true google_genai: diff --git a/tests/llmobs/test_experiments.py b/tests/llmobs/test_experiments.py index 297c1169724..4d67476ead7 100644 --- a/tests/llmobs/test_experiments.py +++ b/tests/llmobs/test_experiments.py @@ -50,6 +50,18 @@ def faulty_evaluator(input_data, output_data, expected_output): raise ValueError("This is a test error in evaluator") +def faulty_summary_evaluator(inputs, outputs, expected_outputs, evaluators_results): + raise ValueError("This is a test error in a summary evaluator") + + +def dummy_summary_evaluator(inputs, outputs, expected_outputs, evaluators_results): + return len(inputs) + len(outputs) + len(expected_outputs) + len(evaluators_results["dummy_evaluator"]) + + +def dummy_summary_evaluator_using_missing_eval_results(inputs, outputs, expected_outputs, evaluators_results): + return len(inputs) + len(outputs) + len(expected_outputs) + len(evaluators_results["non_existent_evaluator"]) + + @pytest.fixture def test_dataset_records() -> List[DatasetRecord]: return [] @@ -1191,6 +1203,27 @@ def test_experiment_run_evaluators(llmobs, test_dataset_one_record): assert eval_results[0] == {"idx": 0, "evaluations": {"dummy_evaluator": {"value": False, "error": None}}} +def test_experiment_run_summary_evaluators(llmobs, test_dataset_one_record): + exp = llmobs.experiment( + "test_experiment", + dummy_task, + test_dataset_one_record, + [dummy_evaluator], + summary_evaluators=[dummy_summary_evaluator], + ) + task_results = exp._run_task(1, raise_errors=False) + assert len(task_results) == 1 + eval_results = exp._run_evaluators(task_results, raise_errors=False) + assert len(eval_results) == 1 + assert eval_results[0] == {"idx": 0, "evaluations": {"dummy_evaluator": {"value": False, "error": None}}} + summary_eval_results = exp._run_summary_evaluators(task_results, eval_results, raise_errors=False) + assert len(summary_eval_results) == 1 + assert summary_eval_results[0] == { + "idx": 0, + "evaluations": {"dummy_summary_evaluator": {"value": 4, "error": None}}, + } + + def test_experiment_run_evaluators_error(llmobs, test_dataset_one_record): exp = llmobs.experiment("test_experiment", dummy_task, test_dataset_one_record, [faulty_evaluator]) task_results = exp._run_task(1, raise_errors=False) @@ -1204,6 +1237,54 @@ def test_experiment_run_evaluators_error(llmobs, test_dataset_one_record): assert err["stack"] is not None +def test_experiment_run_summary_evaluators_error(llmobs, test_dataset_one_record): + exp = llmobs.experiment( + "test_experiment", + dummy_task, + test_dataset_one_record, + [dummy_evaluator], + summary_evaluators=[faulty_summary_evaluator], + ) + task_results = exp._run_task(1, raise_errors=False) + assert len(task_results) == 1 + eval_results = exp._run_evaluators(task_results, raise_errors=False) + assert len(eval_results) == 1 + assert eval_results[0] == {"idx": 0, "evaluations": {"dummy_evaluator": {"value": False, "error": None}}} + summary_eval_results = exp._run_summary_evaluators(task_results, eval_results, raise_errors=False) + assert summary_eval_results[0] == { + "idx": 0, + "evaluations": {"faulty_summary_evaluator": {"value": None, "error": mock.ANY}}, + } + err = summary_eval_results[0]["evaluations"]["faulty_summary_evaluator"]["error"] + assert err["message"] == "This is a test error in a summary evaluator" + assert err["type"] == "ValueError" + assert err["stack"] is not None + + +def test_experiment_summary_evaluators_missing_eval_error(llmobs, test_dataset_one_record): + exp = llmobs.experiment( + "test_experiment", + dummy_task, + test_dataset_one_record, + [dummy_evaluator], + summary_evaluators=[dummy_summary_evaluator_using_missing_eval_results], + ) + task_results = exp._run_task(1, raise_errors=False) + assert len(task_results) == 1 + eval_results = exp._run_evaluators(task_results, raise_errors=False) + assert len(eval_results) == 1 + assert eval_results[0] == {"idx": 0, "evaluations": {"dummy_evaluator": {"value": False, "error": None}}} + summary_eval_results = exp._run_summary_evaluators(task_results, eval_results, raise_errors=False) + assert summary_eval_results[0] == { + "idx": 0, + "evaluations": {"dummy_summary_evaluator_using_missing_eval_results": {"value": None, "error": mock.ANY}}, + } + err = summary_eval_results[0]["evaluations"]["dummy_summary_evaluator_using_missing_eval_results"]["error"] + assert err["message"] == "'non_existent_evaluator'" + assert err["type"] == "KeyError" + assert err["stack"] is not None + + def test_experiment_run_evaluators_error_raises(llmobs, test_dataset_one_record): exp = llmobs.experiment("test_experiment", dummy_task, test_dataset_one_record, [faulty_evaluator]) task_results = exp._run_task(1, raise_errors=False) @@ -1212,14 +1293,46 @@ def test_experiment_run_evaluators_error_raises(llmobs, test_dataset_one_record) exp._run_evaluators(task_results, raise_errors=True) +def test_experiment_run_summary_evaluators_error_raises(llmobs, test_dataset_one_record): + exp = llmobs.experiment( + "test_experiment", + dummy_task, + test_dataset_one_record, + [dummy_evaluator], + summary_evaluators=[faulty_summary_evaluator], + ) + task_results = exp._run_task(1, raise_errors=False) + assert len(task_results) == 1 + eval_results = exp._run_evaluators(task_results, raise_errors=False) + with pytest.raises(RuntimeError, match="Summary evaluator faulty_summary_evaluator failed"): + exp._run_summary_evaluators(task_results, eval_results, raise_errors=True) + + +def test_experiment_summary_eval_missing_results_raises(llmobs, test_dataset_one_record): + exp = llmobs.experiment( + "test_experiment", + dummy_task, + test_dataset_one_record, + [dummy_evaluator], + summary_evaluators=[dummy_summary_evaluator_using_missing_eval_results], + ) + task_results = exp._run_task(1, raise_errors=False) + assert len(task_results) == 1 + eval_results = exp._run_evaluators(task_results, raise_errors=False) + with pytest.raises( + RuntimeError, match="Summary evaluator dummy_summary_evaluator_using_missing_eval_results failed" + ): + exp._run_summary_evaluators(task_results, eval_results, raise_errors=True) + + def test_experiment_merge_results(llmobs, test_dataset_one_record): exp = llmobs.experiment("test_experiment", dummy_task, test_dataset_one_record, [dummy_evaluator]) task_results = exp._run_task(1, raise_errors=False) eval_results = exp._run_evaluators(task_results, raise_errors=False) - merged_results = exp._merge_results(task_results, eval_results) + merged_results = exp._merge_results(task_results, eval_results, None) - assert len(merged_results) == 1 - exp_result = merged_results[0] + assert len(merged_results["rows"]) == 1 + exp_result = merged_results["rows"][0] assert exp_result["idx"] == 0 assert exp_result["record_id"] != "" assert exp_result["input"] == {"prompt": "What is the capital of France?"} @@ -1243,10 +1356,10 @@ def test_experiment_merge_err_results(llmobs, test_dataset_one_record): exp = llmobs.experiment("test_experiment", dummy_task, test_dataset_one_record, [faulty_evaluator]) task_results = exp._run_task(1, raise_errors=False) eval_results = exp._run_evaluators(task_results, raise_errors=False) - merged_results = exp._merge_results(task_results, eval_results) + merged_results = exp._merge_results(task_results, eval_results, None) - assert len(merged_results) == 1 - exp_result = merged_results[0] + assert len(merged_results["rows"]) == 1 + exp_result = merged_results["rows"][0] assert exp_result["idx"] == 0 assert exp_result["record_id"] != "" assert exp_result["input"] == {"prompt": "What is the capital of France?"} @@ -1290,8 +1403,49 @@ def test_experiment_run(llmobs, test_dataset_one_record): exp = llmobs.experiment("test_experiment", dummy_task, test_dataset_one_record, [dummy_evaluator]) exp._tags = {"ddtrace.version": "1.2.3"} # FIXME: this is a hack to set the tags for the experiment exp_results = exp.run() - assert len(exp_results) == 1 - exp_result = exp_results[0] + + assert len(exp_results["summary_evaluations"]) == 0 + assert len(exp_results["rows"]) == 1 + exp_result = exp_results["rows"][0] + assert exp_result["idx"] == 0 + assert exp_result["input"] == {"prompt": "What is the capital of France?"} + assert exp_result["output"] == {"prompt": "What is the capital of France?"} + assert exp_result["expected_output"] == {"answer": "Paris"} + assert exp.url == f"https://app.datadoghq.com/llm/experiments/{exp._id}" + + +def test_experiment_run_w_summary(llmobs, test_dataset_one_record): + with mock.patch("ddtrace.llmobs._experiment.Experiment._process_record") as mock_process_record: + # This is to ensure that the eval event post request contains the same span/trace IDs and timestamp. + mock_process_record.return_value = { + "idx": 0, + "span_id": "123", + "trace_id": "456", + "timestamp": 1234567890, + "output": {"prompt": "What is the capital of France?"}, + "metadata": { + "dataset_record_index": 0, + "experiment_name": "test_experiment", + "dataset_name": "test-dataset-123", + }, + "error": {"message": None, "type": None, "stack": None}, + } + exp = llmobs.experiment( + "test_experiment", + dummy_task, + test_dataset_one_record, + [dummy_evaluator], + summary_evaluators=[dummy_summary_evaluator], + ) + exp._tags = {"ddtrace.version": "1.2.3"} # FIXME: this is a hack to set the tags for the experiment + exp_results = exp.run() + + assert len(exp_results["summary_evaluations"]) == 1 + summary_eval = exp_results["summary_evaluations"]["dummy_summary_evaluator"] + assert summary_eval["value"] == 4 + assert summary_eval["error"] is None + assert len(exp_results["rows"]) == 1 + exp_result = exp_results["rows"][0] assert exp_result["idx"] == 0 assert exp_result["input"] == {"prompt": "What is the capital of France?"} assert exp_result["output"] == {"prompt": "What is the capital of France?"} diff --git a/tests/llmobs/test_llmobs.py b/tests/llmobs/test_llmobs.py index ffbdc7431ed..4d066eaae30 100644 --- a/tests/llmobs/test_llmobs.py +++ b/tests/llmobs/test_llmobs.py @@ -12,6 +12,7 @@ from ddtrace.llmobs._constants import PARENT_ID_KEY from ddtrace.llmobs._constants import ROOT_PARENT_ID from ddtrace.llmobs._utils import _get_session_id +from ddtrace.llmobs.types import Prompt from tests.llmobs._utils import _expected_llmobs_llm_span_event @@ -364,9 +365,9 @@ def test_error_is_set(tracer, llmobs_events): llm_span._set_ctx_item(const.SPAN_KIND, "llm") raise ValueError("error") span_event = llmobs_events[0] - assert span_event["meta"]["error.message"] == "error" - assert "ValueError" in span_event["meta"]["error.type"] - assert 'raise ValueError("error")' in span_event["meta"]["error.stack"] + assert span_event["meta"]["error"]["message"] == "error" + assert "ValueError" in span_event["meta"]["error"]["type"] + assert 'raise ValueError("error")' in span_event["meta"]["error"]["stack"] def test_model_provider_defaults_to_custom(tracer, llmobs_events): @@ -460,15 +461,42 @@ def test_structured_io_data(llmobs, llmobs_backend): def test_structured_prompt_data(llmobs, llmobs_backend): with llmobs.llm() as span: - llmobs.annotate(span, prompt={"template": "test {{value}}"}) + llmobs.annotate(span, input_data={"data": "test1"}, prompt={"template": "test {{value}}"}) + events = llmobs_backend.wait_for_num_events(num=1) + assert len(events) == 1 + assert events[0][0]["spans"][0]["meta"]["input"]["prompt"] == { + "id": "unnamed-ml-app_unnamed-prompt", + "template": "test {{value}}", + "_dd_context_variable_keys": ["context"], + "_dd_query_variable_keys": ["question"], + } + + +def test_structured_prompt_data_v2(llmobs, llmobs_backend): + prompt = Prompt( + id="test", + chat_template=[{"role": "user", "content": "test {{value}}"}], + variables={"value": "test", "context": "test", "question": "test"}, + tags={"env": "prod", "llm": "openai"}, + rag_context_variables=["context"], + rag_query_variables=["question"], + ) + with llmobs.llm() as span: + llmobs.annotate( + span, + prompt=prompt, + ) events = llmobs_backend.wait_for_num_events(num=1) assert len(events) == 1 assert events[0][0]["spans"][0]["meta"]["input"] == { "prompt": { - "template": "test {{value}}", + "id": "test", + "chat_template": [{"role": "user", "content": "test {{value}}"}], + "variables": {"value": "test", "context": "test", "question": "test"}, + "tags": {"env": "prod", "llm": "openai"}, "_dd_context_variable_keys": ["context"], "_dd_query_variable_keys": ["question"], - }, + } } @@ -628,3 +656,27 @@ def test_trace_id_propagation_with_non_llm_parent(llmobs, llmobs_events): # LLMObs trace IDs should be different from APM trace ID assert first_child_event["trace_id"] != first_child_event["_dd"]["apm_trace_id"] assert second_child_event["trace_id"] != second_child_event["_dd"]["apm_trace_id"] + + +@pytest.mark.parametrize("llmobs_env", [{"DD_APM_TRACING_ENABLED": "false"}]) +def test_apm_traces_dropped_when_disabled(llmobs, llmobs_events, tracer, llmobs_env): + from tests.utils import DummyWriter + + dummy_writer = DummyWriter() + tracer._span_aggregator.writer = dummy_writer + + with tracer.trace("apm_span") as apm_span: + apm_span.set_tag("operation", "test") + + # Create an LLMObs span (should be sent to LLMObs but not APM) + with llmobs.llm(model_name="test-model") as llm_span: + llmobs.annotate(llm_span, input_data="test input", output_data="test output") + + # Check that no APM traces were sent to the writer + assert len(dummy_writer.traces) == 0, "APM traces should be dropped when DD_APM_TRACING_ENABLED=false" + + # But LLMObs events should still be sent + assert len(llmobs_events) == 1 + llm_event = llmobs_events[0] + assert llm_event["meta"]["span"]["kind"] == "llm" + assert llm_event["meta"]["model_name"] == "test-model" diff --git a/tests/llmobs/test_llmobs_service.py b/tests/llmobs/test_llmobs_service.py index 536fe713949..3f080dd03ae 100644 --- a/tests/llmobs/test_llmobs_service.py +++ b/tests/llmobs/test_llmobs_service.py @@ -31,7 +31,7 @@ from ddtrace.llmobs._constants import SPAN_START_WHILE_DISABLED_WARNING from ddtrace.llmobs._constants import TAGS from ddtrace.llmobs._llmobs import SUPPORTED_LLMOBS_INTEGRATIONS -from ddtrace.llmobs.utils import Prompt +from ddtrace.llmobs.types import Prompt from ddtrace.trace import Context from tests.llmobs._utils import _expected_llmobs_eval_metric_event from tests.llmobs._utils import _expected_llmobs_llm_span_event @@ -839,11 +839,15 @@ def test_annotate_prompt_wrong_type(llmobs, mock_llmobs_logs): with llmobs.llm(model_name="test_model") as span: llmobs.annotate(span=span, prompt="prompt") assert span._get_ctx_item(INPUT_PROMPT) is None - mock_llmobs_logs.warning.assert_called_once_with("Failed to validate prompt with error: ", exc_info=True) + mock_llmobs_logs.warning.assert_called_once_with( + "Failed to validate prompt with error:", "Prompt must be a dictionary, received str.", exc_info=True + ) mock_llmobs_logs.reset_mock() llmobs.annotate(span=span, prompt={"template": 1}) - mock_llmobs_logs.warning.assert_called_once_with("Failed to validate prompt with error: ", exc_info=True) + mock_llmobs_logs.warning.assert_called_once_with( + "Failed to validate prompt with error:", "template: 1 must be a string, received int", exc_info=True + ) mock_llmobs_logs.reset_mock() @@ -1717,9 +1721,11 @@ def test_annotation_context_can_update_session_id(llmobs): def test_annotation_context_modifies_prompt(llmobs): - with llmobs.annotation_context(prompt={"template": "test_template"}): + prompt = {"template": "test_template"} + with llmobs.annotation_context(prompt=prompt): with llmobs.llm(name="test_agent", model_name="test") as span: assert span._get_ctx_item(INPUT_PROMPT) == { + "id": "unnamed-ml-app_unnamed-prompt", "template": "test_template", "_dd_context_variable_keys": ["context"], "_dd_query_variable_keys": ["question"], @@ -1862,9 +1868,11 @@ async def test_annotation_context_async_modifies_span_tags(llmobs): async def test_annotation_context_async_modifies_prompt(llmobs): - async with llmobs.annotation_context(prompt={"template": "test_template"}): + prompt = {"template": "test_template"} + async with llmobs.annotation_context(prompt=prompt): with llmobs.llm(name="test_agent", model_name="test") as span: assert span._get_ctx_item(INPUT_PROMPT) == { + "id": "unnamed-ml-app_unnamed-prompt", "template": "test_template", "_dd_context_variable_keys": ["context"], "_dd_query_variable_keys": ["question"], diff --git a/tests/llmobs/test_llmobs_span_agent_writer.py b/tests/llmobs/test_llmobs_span_agent_writer.py index 9b0470466ab..564db83e0b3 100644 --- a/tests/llmobs/test_llmobs_span_agent_writer.py +++ b/tests/llmobs/test_llmobs_span_agent_writer.py @@ -68,9 +68,9 @@ def test_truncating_oversized_events(mock_send_payload, mock_writer_logs): llmobs_span_writer.enqueue(_oversized_workflow_event()) mock_writer_logs.warning.assert_has_calls( [ - mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200724), - mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200464), - mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200445), + mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200729), + mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200469), + mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200450), ] ) diff --git a/tests/llmobs/test_llmobs_span_agentless_writer.py b/tests/llmobs/test_llmobs_span_agentless_writer.py index 0657b7aede9..f2b34519187 100644 --- a/tests/llmobs/test_llmobs_span_agentless_writer.py +++ b/tests/llmobs/test_llmobs_span_agentless_writer.py @@ -61,9 +61,9 @@ def test_truncating_oversized_events(mock_writer_logs): llmobs_span_writer.enqueue(_oversized_workflow_event()) mock_writer_logs.warning.assert_has_calls( [ - mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200724), - mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200464), - mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200445), + mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200729), + mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200469), + mock.call("dropping event input/output because its size (%d) exceeds the event size limit (5MB)", 5200450), ] ) diff --git a/tests/profiling_v2/test_uwsgi.py b/tests/profiling_v2/test_uwsgi.py index 20a0a92180b..6ab0da29b30 100644 --- a/tests/profiling_v2/test_uwsgi.py +++ b/tests/profiling_v2/test_uwsgi.py @@ -1,3 +1,4 @@ +from importlib.metadata import version import os import re import signal @@ -170,3 +171,48 @@ def test_uwsgi_threads_processes_no_primary_lazy_apps(uwsgi, tmp_path, monkeypat profile = pprof_utils.parse_newest_profile("%s.%d" % (filename, pid)) samples = pprof_utils.get_samples_with_value_type(profile, "wall-time") assert len(samples) > 0 + + +@pytest.mark.parametrize("lazy_flag", ["--lazy-apps", "--lazy"]) +@pytest.mark.skipif( + tuple(int(x) for x in version("uwsgi").split(".")) >= (2, 0, 30), + reason="uwsgi>=2.0.30 does not require --skip-atexit", +) +def test_uwsgi_require_skip_atexit_when_lazy_with_master(uwsgi, lazy_flag): + expected_warning = b"ddtrace.internal.uwsgi.uWSGIConfigDeprecationWarning: skip-atexit option must be set" + + proc = uwsgi("--enable-threads", "--master", "--processes", "2", lazy_flag) + time.sleep(1) + proc.terminate() + stdout, _ = proc.communicate() + assert expected_warning in stdout + + +@pytest.mark.parametrize("lazy_flag", ["--lazy-apps", "--lazy"]) +@pytest.mark.skipif( + tuple(int(x) for x in version("uwsgi").split(".")) >= (2, 0, 30), + reason="uwsgi>=2.0.30 does not require --skip-atexit", +) +def test_uwsgi_require_skip_atexit_when_lazy_without_master(uwsgi, lazy_flag): + expected_warning = b"ddtrace.internal.uwsgi.uWSGIConfigDeprecationWarning: skip-atexit option must be set" + num_workers = 2 + proc = uwsgi("--enable-threads", "--processes", str(num_workers), lazy_flag) + + worker_pids = [] + logged_warning = 0 + while True: + line = proc.stdout.readline() + if line == b"": + break + if expected_warning in line: + logged_warning += 1 + else: + m = re.match(r"^spawned uWSGI worker \d+ .*\(pid: (\d+),", line.decode()) + if m: + worker_pids.append(int(m.group(1))) + + if logged_warning == num_workers: + break + + for pid in worker_pids: + os.kill(pid, signal.SIGTERM) diff --git a/tests/smoke_test.py b/tests/smoke_test.py index 149d85ecd21..7f0c02b9b6c 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -73,7 +73,7 @@ def emit(self, record): print("Skipping test, 32-bit DDWAF not ready yet") # Profiling smoke test - if platform.system() in ("Linux", "Darwin") and sys.maxsize > (1 << 32): + if platform.system() in ("Linux", "Darwin") and sys.maxsize > (1 << 32) and sys.version_info[:2] < (3, 14): print("Running profiling smoke test...") profiling_cmd = [sys.executable, "-c", "import ddtrace.profiling.auto"] result = subprocess.run(profiling_cmd, capture_output=True, text=True) diff --git a/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_all_itr_skip_suite_level.json b/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_all_itr_skip_suite_level.json index 8de425a7afd..89fbf5d972a 100644 --- a/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_all_itr_skip_suite_level.json +++ b/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_all_itr_skip_suite_level.json @@ -214,7 +214,7 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, "process_id": 32464, - "test.itr.tests_skipping.count": 6 + "test.itr.tests_skipping.count": 2 }, "duration": 16637167, "start": 1733391710055763179 @@ -265,7 +265,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 3 + "test.itr.tests_skipping.count": 1 }, "duration": 15873958, "start": 1733391710055843013 @@ -315,7 +315,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 3 + "test.itr.tests_skipping.count": 1 }, "duration": 15766500, "start": 1733391710055871846 @@ -366,7 +366,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 3 + "test.itr.tests_skipping.count": 1 }, "duration": 546708, "start": 1733391710071761221 @@ -416,7 +416,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 3 + "test.itr.tests_skipping.count": 1 }, "duration": 396958, "start": 1733391710071788096 diff --git a/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_mix_fail_itr_suite_level.json b/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_mix_fail_itr_suite_level.json index 0ffa04a53ad..4ef54a5f7c9 100644 --- a/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_mix_fail_itr_suite_level.json +++ b/tests/snapshots/test_api_fake_runners.test_manual_api_fake_runner_mix_fail_itr_suite_level.json @@ -501,7 +501,7 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "true", - "test.itr.tests_skipping.enabled": "false", + "test.itr.tests_skipping.enabled": "true", "test.itr.tests_skipping.tests_skipped": "true", "test.itr.tests_skipping.type": "suite", "test.itr.unskippable": "true", @@ -517,7 +517,7 @@ "_sampling_priority_v1": 1, "process_id": 32216, "test.code_coverage.lines_pct": 79.79, - "test.itr.tests_skipping.count": 7 + "test.itr.tests_skipping.count": 1 }, "duration": 25941084, "start": 1733391663023824379 @@ -534,7 +534,7 @@ "meta": { "_dd.base_service": "test_manual_api_fake_runner_mix_fail_itr_suite_level0", "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", - "_dd.ci.itr.tests_skipped": "true", + "_dd.ci.itr.tests_skipped": "false", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", "_dd.p.tid": "6751752f00000000", @@ -571,8 +571,8 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "false", - "test.itr.tests_skipping.enabled": "false", - "test.itr.tests_skipping.tests_skipped": "true", + "test.itr.tests_skipping.enabled": "true", + "test.itr.tests_skipping.tests_skipped": "false", "test.itr.tests_skipping.type": "suite", "test.itr.unskippable": "false", "test.module": "module_1", @@ -587,7 +587,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 1 + "test.itr.tests_skipping.count": 0 }, "duration": 21794750, "start": 1733391663024534963 @@ -639,7 +639,7 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "false", - "test.itr.tests_skipping.tests_skipped": "true", + "test.itr.tests_skipping.tests_skipped": "false", "test.itr.unskippable": "false", "test.module": "module_1", "test.module_path": "", @@ -655,7 +655,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 1 + "test.itr.tests_skipping.count": 0 }, "duration": 21659000, "start": 1733391663024570463 @@ -672,7 +672,7 @@ "meta": { "_dd.base_service": "test_manual_api_fake_runner_mix_fail_itr_suite_level0", "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", - "_dd.ci.itr.tests_skipped": "true", + "_dd.ci.itr.tests_skipped": "false", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", "_dd.p.tid": "6751752f00000000", @@ -709,8 +709,8 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "false", - "test.itr.tests_skipping.enabled": "false", - "test.itr.tests_skipping.tests_skipped": "true", + "test.itr.tests_skipping.enabled": "true", + "test.itr.tests_skipping.tests_skipped": "false", "test.itr.tests_skipping.type": "suite", "test.itr.unskippable": "false", "test.module": "module_2", @@ -725,7 +725,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 1 + "test.itr.tests_skipping.count": 0 }, "duration": 1061417, "start": 1733391663046384504 @@ -777,7 +777,7 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "false", - "test.itr.tests_skipping.tests_skipped": "true", + "test.itr.tests_skipping.tests_skipped": "false", "test.itr.unskippable": "false", "test.module": "module_2", "test.module_path": "", @@ -793,7 +793,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 1 + "test.itr.tests_skipping.count": 0 }, "duration": 945708, "start": 1733391663046413046 @@ -847,7 +847,7 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "false", - "test.itr.tests_skipping.enabled": "false", + "test.itr.tests_skipping.enabled": "true", "test.itr.tests_skipping.tests_skipped": "false", "test.itr.tests_skipping.type": "suite", "test.itr.unskippable": "false", @@ -1053,7 +1053,7 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "true", - "test.itr.tests_skipping.enabled": "false", + "test.itr.tests_skipping.enabled": "true", "test.itr.tests_skipping.tests_skipped": "true", "test.itr.tests_skipping.type": "suite", "test.itr.unskippable": "true", @@ -1069,7 +1069,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 5 + "test.itr.tests_skipping.count": 1 }, "duration": 928208, "start": 1733391663048698171 @@ -1138,7 +1138,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 3 + "test.itr.tests_skipping.count": 1 }, "duration": 411791, "start": 1733391663048719213 @@ -1190,7 +1190,7 @@ "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", "test.itr.forced_run": "true", - "test.itr.tests_skipping.tests_skipped": "true", + "test.itr.tests_skipping.tests_skipped": "false", "test.itr.unskippable": "true", "test.module": "module_4", "test.module_path": "", @@ -1206,7 +1206,7 @@ "_dd.py.partial_flush": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "test.itr.tests_skipping.count": 2 + "test.itr.tests_skipping.count": 0 }, "duration": 353125, "start": 1733391663049183129 diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_many_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_many_distributed_tracing_disabled].json new file mode 100644 index 00000000000..e1774f90346 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_many_distributed_tracing_disabled].json @@ -0,0 +1,140 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4448800000000", + "aas.function.name": "queue_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "c9093bcc39cf49438447885b9e88eeb6", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 24547 + }, + "duration": 1027379959, + "start": 1757693064664656877 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "4fe806b4-a7e8-4784-856c-8d51361ebcdb", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 1471333, + "start": 1757693065176034377 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c4448800000000", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "f83ae125-a10a-437e-bab7-55d62d1ad48b", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 259208, + "start": 1757693065178186252 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c4448800000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 7187625, + "start": 1757693065178551794 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4448900000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "c9093bcc39cf49438447885b9e88eeb6", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 24547 + }, + "duration": 159750, + "start": 1757693065323525502 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_many_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_many_distributed_tracing_enabled].json new file mode 100644 index 00000000000..e6706d7d5ed --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_many_distributed_tracing_enabled].json @@ -0,0 +1,172 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4447d00000000", + "aas.function.name": "queue_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "9994c89dd23f45c3a338314a1ada910e", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 24153 + }, + "duration": 1026084917, + "start": 1757693053712524928 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "f0849b12-776c-4fee-94f8-a8dd6ec47942", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 4526209, + "start": 1757693054216085844 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c4447d00000000", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "c95d409f-c354-446d-8215-0d55c0f59f74", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 681000, + "start": 1757693054222520219 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c4447d00000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c4447d00000000", + "flags": 2147483649, + "trace_id_high": 7549234179041394688 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c4447d00000000", + "flags": 2147483649, + "trace_id_high": 7549234179041394688 + } + ], + "duration": 14731042, + "start": 1757693054223442386 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4447e00000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "9994c89dd23f45c3a338314a1ada910e", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 24153 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c4447d00000000", + "flags": 2147483649, + "trace_id_high": 7549234179041394688 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c4447d00000000", + "flags": 2147483649, + "trace_id_high": 7549234179041394688 + } + ], + "duration": 393792, + "start": 1757693054374546636 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_one_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_one_distributed_tracing_disabled].json new file mode 100644 index 00000000000..f3427e4c6b5 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_one_distributed_tracing_disabled].json @@ -0,0 +1,90 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4447200000000", + "aas.function.name": "queue_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "9186ba5464c44eaaa9d6781d31ada4d1", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23755 + }, + "duration": 1025866250, + "start": 1757693042734824464 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "d5ffac2a-b39a-4039-840d-f6590ea0e1f5", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 8549708, + "start": 1757693043245823006 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4447300000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "d5ffac2a-b39a-4039-840d-f6590ea0e1f5", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "9186ba5464c44eaaa9d6781d31ada4d1", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23755 + }, + "duration": 150458, + "start": 1757693043343096756 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_one_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_one_distributed_tracing_enabled].json new file mode 100644 index 00000000000..fdba68ffdf5 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_async_consume_one_distributed_tracing_enabled].json @@ -0,0 +1,99 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4446700000000", + "aas.function.name": "queue_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "8f96ac6bc65540329e7b9bad89695f6d", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23359 + }, + "duration": 1030611251, + "start": 1757693031621019500 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "9ede8e39-740a-453b-bd4c-7f41432d9ce6", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 7591917, + "start": 1757693032131878334 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4446800000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "9ede8e39-740a-453b-bd4c-7f41432d9ce6", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "8f96ac6bc65540329e7b9bad89695f6d", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23359 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c4446700000000", + "flags": 2147483649, + "trace_id_high": 7549234084552114176 + } + ], + "duration": 938375, + "start": 1757693032231033126 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_many_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_many_distributed_tracing_disabled].json new file mode 100644 index 00000000000..49c4376b3c5 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_many_distributed_tracing_disabled].json @@ -0,0 +1,140 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332cc00000000", + "aas.function.name": "queue_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "cdda564dbbdd46eeb80690f69af5be5e", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 7094 + }, + "duration": 1025611292, + "start": 1757622988816079169 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "a2755684-08f2-45b8-94a0-ac41b0d28172", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 1518542, + "start": 1757622989320127210 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c332cc00000000", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "14789e1b-8a6b-450f-9d06-3184a828fc1d", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 273042, + "start": 1757622989322330252 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c332cc00000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 9228666, + "start": 1757622989322707169 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332cd00000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "cdda564dbbdd46eeb80690f69af5be5e", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 7094 + }, + "duration": 180542, + "start": 1757622989513373419 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_many_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_many_distributed_tracing_enabled].json new file mode 100644 index 00000000000..9521daee4e6 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_many_distributed_tracing_enabled].json @@ -0,0 +1,172 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332c100000000", + "aas.function.name": "queue_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "a32304239262435eaff3c61ac2259134", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 6699 + }, + "duration": 1025491875, + "start": 1757622977808263053 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "ad794064-f0c9-4ec6-901f-b62f98d66de8", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 1494666, + "start": 1757622978323712303 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c332c100000000", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "2808dfe5-2ac5-4100-8c5a-0b04c66f8d57", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 320625, + "start": 1757622978325943803 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c332c100000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c332c100000000", + "flags": 2147483649, + "trace_id_high": 7548933204913160192 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c332c100000000", + "flags": 2147483649, + "trace_id_high": 7548933204913160192 + } + ], + "duration": 7203333, + "start": 1757622978326402553 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332c200000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a32304239262435eaff3c61ac2259134", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 6699 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c332c100000000", + "flags": 2147483649, + "trace_id_high": 7548933204913160192 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c332c100000000", + "flags": 2147483649, + "trace_id_high": 7548933204913160192 + } + ], + "duration": 479833, + "start": 1757622978468174470 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_one_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_one_distributed_tracing_disabled].json new file mode 100644 index 00000000000..b472d8be732 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_one_distributed_tracing_disabled].json @@ -0,0 +1,90 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332b600000000", + "aas.function.name": "queue_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "e62368bce3ba4067a3693958c82c91af", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 6301 + }, + "duration": 1029055958, + "start": 1757622966818108006 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "12f75303-8e51-4432-8d37-baba56f43abb", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 7160250, + "start": 1757622967335385631 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332b700000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "12f75303-8e51-4432-8d37-baba56f43abb", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "e62368bce3ba4067a3693958c82c91af", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 6301 + }, + "duration": 176125, + "start": 1757622967441064381 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_one_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_one_distributed_tracing_enabled].json new file mode 100644 index 00000000000..958d5108eed --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[queue_consume_one_distributed_tracing_enabled].json @@ -0,0 +1,99 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/queuesendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332ab00000000", + "aas.function.name": "queue_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/queuesendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/queuesendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "81cca026b58743589887188896fc8db4", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5905 + }, + "duration": 1025797334, + "start": 1757622955835397584 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "queue.1", + "messaging.message_id": "e5869af3-a906-4d80-833e-1957c537eef2", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 7967125, + "start": 1757622956349970084 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebusqueue", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c332ac00000000", + "aas.function.name": "servicebusqueue", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "e5869af3-a906-4d80-833e-1957c537eef2", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "81cca026b58743589887188896fc8db4", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5905 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c332ab00000000", + "flags": 2147483649, + "trace_id_high": 7548933110423879680 + } + ], + "duration": 1011375, + "start": 1757622956471232792 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_many_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_many_distributed_tracing_disabled].json new file mode 100644 index 00000000000..1d4bae6ddef --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_many_distributed_tracing_disabled].json @@ -0,0 +1,140 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444de00000000", + "aas.function.name": "topic_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "7bc8c64168fd48038b44fa1a4f706754", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 27714 + }, + "duration": 1025169375, + "start": 1757693150593013917 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "261fd5c4-057f-40a3-b857-ac7ca736a581", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 2946834, + "start": 1757693151121473583 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c444de00000000", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "74664dd3-3711-41d7-9cba-5af434478090", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 526000, + "start": 1757693151125750083 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c444de00000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 10245334, + "start": 1757693151126463583 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444df00000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "7bc8c64168fd48038b44fa1a4f706754", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 27714 + }, + "duration": 173875, + "start": 1757693151297943250 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_many_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_many_distributed_tracing_enabled].json new file mode 100644 index 00000000000..285e3a62917 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_many_distributed_tracing_enabled].json @@ -0,0 +1,172 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444d400000000", + "aas.function.name": "topic_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "b9b3623e4b4248d2840b679c68f31320", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 27319 + }, + "duration": 1025033042, + "start": 1757693140121650926 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "6686def7-beae-48f7-b705-32ec47ef1e2e", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 5319792, + "start": 1757693140629205134 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c444d400000000", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "36b7708f-7b8c-4160-81d2-a03378818efc", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 1247750, + "start": 1757693140636104468 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c444d400000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c444d400000000", + "flags": 2147483649, + "trace_id_high": 7549234552703549440 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c444d400000000", + "flags": 2147483649, + "trace_id_high": 7549234552703549440 + } + ], + "duration": 14555750, + "start": 1757693140637898343 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444d400000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "b9b3623e4b4248d2840b679c68f31320", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 27319 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c444d400000000", + "flags": 2147483649, + "trace_id_high": 7549234552703549440 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c444d400000000", + "flags": 2147483649, + "trace_id_high": 7549234552703549440 + } + ], + "duration": 354209, + "start": 1757693140817381884 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_one_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_one_distributed_tracing_disabled].json new file mode 100644 index 00000000000..2b5dbc1d903 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_one_distributed_tracing_disabled].json @@ -0,0 +1,90 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444c900000000", + "aas.function.name": "topic_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "90837f88efbd42c5aa2ca764e6310309", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 26923 + }, + "duration": 1024802251, + "start": 1757693129641820087 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "93b1001f-b855-4ea6-8b00-d59bf8ac4255", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 8823500, + "start": 1757693130160480213 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444ca00000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "topic.1", + "messaging.message_id": "93b1001f-b855-4ea6-8b00-d59bf8ac4255", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "90837f88efbd42c5aa2ca764e6310309", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 26923 + }, + "duration": 150292, + "start": 1757693130271051421 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_one_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_one_distributed_tracing_enabled].json new file mode 100644 index 00000000000..b2c8b831715 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_async_consume_one_distributed_tracing_enabled].json @@ -0,0 +1,99 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444be00000000", + "aas.function.name": "topic_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "ab6e07e6f53e4142abc9149b26205f6b", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 26525 + }, + "duration": 1026852751, + "start": 1757693118684689638 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "837eed20-d733-4d77-a572-4c004e91f959", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 8689667, + "start": 1757693119199956180 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c444bf00000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "topic.1", + "messaging.message_id": "837eed20-d733-4d77-a572-4c004e91f959", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "ab6e07e6f53e4142abc9149b26205f6b", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 26525 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c444be00000000", + "flags": 2147483649, + "trace_id_high": 7549234458214268928 + } + ], + "duration": 1003541, + "start": 1757693119309663764 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_many_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_many_distributed_tracing_disabled].json new file mode 100644 index 00000000000..d5147d42c3a --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_many_distributed_tracing_disabled].json @@ -0,0 +1,140 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c3360700000000", + "aas.function.name": "topic_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "d0ce6213a1aa41408792fd0211d69bcf", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12372 + }, + "duration": 1032059667, + "start": 1757623815810601177 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "a27d08ce-ba5b-4e0d-8469-d422a6aa1d00", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 1895708, + "start": 1757623816317982677 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c3360700000000", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "2bd37c2c-a6ff-4d91-987e-ee9ec1cf97a0", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 353417, + "start": 1757623816320689385 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c3360700000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 7470208, + "start": 1757623816321169427 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c3360800000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "d0ce6213a1aa41408792fd0211d69bcf", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12372 + }, + "duration": 173250, + "start": 1757623816483666010 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_many_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_many_distributed_tracing_enabled].json new file mode 100644 index 00000000000..f14886ebb8d --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_many_distributed_tracing_enabled].json @@ -0,0 +1,172 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagebatch", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c335fc00000000", + "aas.function.name": "topic_send_message_batch", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagebatch", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagebatch", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "4ec1f90ca2cd45c28e8dbf9723b3c1b4", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 11978 + }, + "duration": 1030990667, + "start": 1757623804813403546 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "6dcbf430-68f6-42f7-9f85-8d273e4bbafa", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 1846000, + "start": 1757623805315511380 + }, + { + "name": "azure.servicebus.create", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c335fc00000000", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "aa072078-5eec-401a-84a0-4f90dbbf7999", + "messaging.operation": "create", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 339583, + "start": 1757623805318220630 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 4, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "_dd.p.tid": "68c335fc00000000", + "component": "azure_servicebus", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c335fc00000000", + "flags": 2147483649, + "trace_id_high": 7548936756851113984 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c335fc00000000", + "flags": 2147483649, + "trace_id_high": 7548936756851113984 + } + ], + "duration": 8163042, + "start": 1757623805318692880 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c335fd00000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "topic.1", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "4ec1f90ca2cd45c28e8dbf9723b3c1b4", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 11978 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c335fc00000000", + "flags": 2147483649, + "trace_id_high": 7548936756851113984 + }, + { + "trace_id": 0, + "span_id": 3, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c335fc00000000", + "flags": 2147483649, + "trace_id_high": 7548936756851113984 + } + ], + "duration": 431208, + "start": 1757623805482834422 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_one_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_one_distributed_tracing_disabled].json new file mode 100644 index 00000000000..b7d4976cb83 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_one_distributed_tracing_disabled].json @@ -0,0 +1,90 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c335f100000000", + "aas.function.name": "topic_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "0694dfdfc52147d182203e61cd98dab7", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 11579 + }, + "duration": 1030532153, + "start": 1757623793331846763 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "85165552-7ae7-401f-9709-b2ecd6810f5f", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 11665625, + "start": 1757623793852543305 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c335f100000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "topic.1", + "messaging.message_id": "85165552-7ae7-401f-9709-b2ecd6810f5f", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "0694dfdfc52147d182203e61cd98dab7", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 11579 + }, + "duration": 182667, + "start": 1757623793976414722 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_one_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_one_distributed_tracing_enabled].json new file mode 100644 index 00000000000..af3faa37f4d --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions_servicebus.test_azure_functions_snapshot.test_service_bus_trigger[topic_consume_one_distributed_tracing_enabled].json @@ -0,0 +1,99 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/topicsendmessagesingle", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c335e600000000", + "aas.function.name": "topic_send_single_message", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/topicsendmessagesingle", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/topicsendmessagesingle", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "cfaf2c798e854cfb946379a60687ba05", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 11181 + }, + "duration": 1027043376, + "start": 1757623782242818133 + }, + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "topic.1", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "worker", + "meta": { + "_dd.base_service": "test-func", + "component": "azure_servicebus", + "messaging.destination.name": "topic.1", + "messaging.message_id": "267837d3-0aa8-4585-bee6-e3a36d51452e", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1 + }, + "duration": 29630209, + "start": 1757623782749328633 + }], +[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "ServiceBus servicebustopic", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "68c335e600000000", + "aas.function.name": "servicebustopic", + "aas.function.trigger": "ServiceBus", + "component": "azure_functions", + "language": "python", + "messaging.destination.name": "topic.1", + "messaging.message_id": "267837d3-0aa8-4585-bee6-e3a36d51452e", + "messaging.operation": "receive", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "cfaf2c798e854cfb946379a60687ba05", + "span.kind": "consumer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 11181 + }, + "span_links": [ + { + "trace_id": 0, + "span_id": 2, + "tracestate": "dd=s:1;t.dm:-0;t.tid:68c335e600000000", + "flags": 2147483649, + "trace_id_high": 7548936662361833472 + } + ], + "duration": 1049708, + "start": 1757623782932002342 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_list_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_list_distributed_tracing_disabled].json new file mode 100644 index 00000000000..bfd6a3c4df6 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_list_distributed_tracing_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7b600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "25be7898f5224e57b6c46a8cdc37b27a", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16652 + }, + "duration": 543125000, + "start": 1757534134563309375 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7b700000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "25be7898f5224e57b6c46a8cdc37b27a", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16652 + }, + "duration": 8201250, + "start": 1757534135113038084 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_list_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_list_distributed_tracing_enabled].json new file mode 100644 index 00000000000..7d88105b4f0 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_list_distributed_tracing_enabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7b200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "aa449f45aaf044bea1cb732b86e2ddbf", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16645 + }, + "duration": 545965791, + "start": 1757534130865697763 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7b300000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "aa449f45aaf044bea1cb732b86e2ddbf", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16645 + }, + "duration": 6207334, + "start": 1757534131416182679 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_single_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_single_distributed_tracing_disabled].json new file mode 100644 index 00000000000..27f760e8081 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_single_distributed_tracing_disabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7af00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "dd33e42e-52f3-4fe6-87d3-a6366bd022ba", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "797508c91a22429790b626ec13de80be", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16638 + }, + "duration": 540241917, + "start": 1757534127423649553 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7af00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "79afa9bd-d71d-4bce-95a4-2b9a293d7af1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "797508c91a22429790b626ec13de80be", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16638 + }, + "duration": 9730125, + "start": 1757534127968615845 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7af00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "797508c91a22429790b626ec13de80be", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16638 + }, + "duration": 7403917, + "start": 1757534127978884178 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7af00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "74ef737a-1c80-4e1c-9692-dccd9f6592db", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "797508c91a22429790b626ec13de80be", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16638 + }, + "duration": 5469458, + "start": 1757534127986591553 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_single_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_single_distributed_tracing_enabled].json new file mode 100644 index 00000000000..6ca5bfffd7d --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_async_single_distributed_tracing_enabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7ab00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "5eefb639-d76b-4a5c-a7c2-ac39d8dd7ed8", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "943363ec15ae4ed3982f2068cc3e6230", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16631 + }, + "duration": 541962458, + "start": 1757534123933092718 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7ac00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "d91eaff5-01d0-4a06-a2ac-e31d552e8c85", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "943363ec15ae4ed3982f2068cc3e6230", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16631 + }, + "duration": 9909083, + "start": 1757534124481404843 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7ac00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "943363ec15ae4ed3982f2068cc3e6230", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16631 + }, + "duration": 7573625, + "start": 1757534124491884301 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7ac00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "ce12c58f-4514-4d9b-9cdd-bc30e0a8eb7f", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "943363ec15ae4ed3982f2068cc3e6230", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16631 + }, + "duration": 6929083, + "start": 1757534124499687968 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_list_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_list_distributed_tracing_disabled].json new file mode 100644 index 00000000000..9bca4f26bba --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_list_distributed_tracing_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a800000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "04d2aa74aa29471687efcc6b5af4bf33", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16624 + }, + "duration": 537728000, + "start": 1757534120399533716 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a800000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "04d2aa74aa29471687efcc6b5af4bf33", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16624 + }, + "duration": 7333000, + "start": 1757534120943349883 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_list_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_list_distributed_tracing_enabled].json new file mode 100644 index 00000000000..5043a126d3a --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_list_distributed_tracing_enabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a400000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "ea1d8e4095b64728a7ae8fc6dcc553f9", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16617 + }, + "duration": 537679834, + "start": 1757534116941075381 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a500000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "ea1d8e4095b64728a7ae8fc6dcc553f9", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16617 + }, + "duration": 7825875, + "start": 1757534117484987256 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_single_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_single_distributed_tracing_disabled].json new file mode 100644 index 00000000000..9ef3b88f5e4 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_single_distributed_tracing_disabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a100000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "2619e216-a66a-4335-b806-9eb889158ff9", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "9a81cf0e87bc414e8004d2919566b689", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16610 + }, + "duration": 544485917, + "start": 1757534113519534004 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "4a35c6a2-de73-49cc-9818-2451d9afa203", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "9a81cf0e87bc414e8004d2919566b689", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16610 + }, + "duration": 7305000, + "start": 1757534114070004213 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "9a81cf0e87bc414e8004d2919566b689", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16610 + }, + "duration": 5443208, + "start": 1757534114077671963 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d7a200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "c671e8b7-f2a1-4e75-89b5-cd40a37c2ddf", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "9a81cf0e87bc414e8004d2919566b689", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16610 + }, + "duration": 4784625, + "start": 1757534114083302255 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_single_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_single_distributed_tracing_enabled].json new file mode 100644 index 00000000000..a189e2cfa2e --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[schedule_messages_single_distributed_tracing_enabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d79e00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "8da97d46-4c5c-44de-8bda-76737f3ef270", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "27e918920a434f088da3f06ae5a1c8f5", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16603 + }, + "duration": 544760625, + "start": 1757534110033129961 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d79e00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "d49644d6-e715-49dd-a81f-f01a0750cd8b", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "27e918920a434f088da3f06ae5a1c8f5", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16603 + }, + "duration": 7860833, + "start": 1757534110583986045 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d79e00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "27e918920a434f088da3f06ae5a1c8f5", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16603 + }, + "duration": 6060292, + "start": 1757534110592179211 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d79e00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "6ee9bc36-7cc3-4d51-a6df-5ee75a2f8f78", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "27e918920a434f088da3f06ae5a1c8f5", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16603 + }, + "duration": 5126792, + "start": 1757534110598398628 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_disabled_batch_links_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_disabled_batch_links_disabled].json new file mode 100644 index 00000000000..3a0938450d7 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_disabled_batch_links_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4408e00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "e7c5a9ceeb2f4eaf951a6881f676841f", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12918 + }, + "duration": 14898917, + "start": 1757692046870829253 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4408e00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "e7c5a9ceeb2f4eaf951a6881f676841f", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12918 + }, + "duration": 6968583, + "start": 1757692046890728378 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[default_config].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_disabled_batch_links_enabled].json similarity index 65% rename from tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[default_config].json rename to tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_disabled_batch_links_enabled].json index 6e33c9afdc0..3816cc69403 100644 --- a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[default_config].json +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_disabled_batch_links_enabled].json @@ -1,8 +1,8 @@ [[ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 0, "span_id": 1, "parent_id": 0, @@ -11,28 +11,29 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906700000000", + "_dd.p.tid": "68c4408b00000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.message_id": "b0fbf4b3-2074-4ba3-ab01-281bb5d34a7c", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "704bf68557d744cb989f318a455d281a", + "runtime-id": "43054341cd014443b82544007a5120f9", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1091 + "process_id": 12911 }, - "duration": 12903583, - "start": 1750110311409889927 + "duration": 2713542, + "start": 1757692043343313793 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 1, @@ -43,24 +44,25 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906600000000", + "_dd.p.tid": "68c4408b00000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.message_id": "e368fe8a-010f-463a-afbc-86578e774171", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "704bf68557d744cb989f318a455d281a", + "runtime-id": "43054341cd014443b82544007a5120f9", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1091 + "process_id": 12911 }, - "duration": 26964375, - "start": 1750110310381765593 + "duration": 536833, + "start": 1757692043355031835 }], [ { @@ -75,28 +77,29 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906600000000", + "_dd.p.tid": "68c4408b00000000", "component": "azure_servicebus", "language": "python", + "messaging.batch_count": "2", "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "704bf68557d744cb989f318a455d281a", + "runtime-id": "43054341cd014443b82544007a5120f9", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1091 + "process_id": 12911 }, - "duration": 8572041, - "start": 1750110310415752802 + "duration": 13082417, + "start": 1757692043355939668 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 3, @@ -107,30 +110,30 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906600000000", + "_dd.p.tid": "68c4408b00000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "704bf68557d744cb989f318a455d281a", + "runtime-id": "43054341cd014443b82544007a5120f9", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1091 + "process_id": 12911 }, - "duration": 6635583, - "start": 1750110310424619510 + "duration": 864334, + "start": 1757692043369547251 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 4, "span_id": 1, "parent_id": 0, @@ -139,30 +142,31 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906700000000", + "_dd.p.tid": "68c4408b00000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.message_id": "fb17949a-6f38-4ee3-9f4f-c1ef19228adc", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "704bf68557d744cb989f318a455d281a", + "runtime-id": "43054341cd014443b82544007a5120f9", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1091 + "process_id": 12911 }, - "duration": 21951875, - "start": 1750110311368422552 + "duration": 262417, + "start": 1757692043370621293 }], [ { "name": "azure.servicebus.send", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 5, "span_id": 1, "parent_id": 0, @@ -171,22 +175,23 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906700000000", + "_dd.p.tid": "68c4408b00000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "704bf68557d744cb989f318a455d281a", + "runtime-id": "43054341cd014443b82544007a5120f9", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1091 + "process_id": 12911 }, - "duration": 18494041, - "start": 1750110311391015344 + "duration": 7188833, + "start": 1757692043371020710 }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_enabled_batch_links_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_enabled_batch_links_disabled].json new file mode 100644 index 00000000000..63fa11fad98 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_enabled_batch_links_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4408700000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "f4d6526ef2364f1c9d9876187d37882e", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12904 + }, + "duration": 15117250, + "start": 1757692039830470583 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4408700000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "f4d6526ef2364f1c9d9876187d37882e", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12904 + }, + "duration": 6735334, + "start": 1757692039852810291 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_schedule_messages_async.json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_enabled_batch_links_enabled].json similarity index 52% rename from tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_schedule_messages_async.json rename to tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_enabled_batch_links_enabled].json index 98897e1b928..5b4ec7b16c7 100644 --- a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_schedule_messages_async.json +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_batch_distributed_tracing_enabled_batch_links_enabled].json @@ -1,6 +1,6 @@ [[ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 0, @@ -9,30 +9,31 @@ "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907f00000000", + "_dd.p.tid": "68c4408400000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.message_id": "4b23aa3e-e5cd-4496-940d-5320a0c9fad9", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "ab8ada9f14f24e1fa723367401f151a5", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12897 }, - "duration": 26011500, - "start": 1750110335068599549 + "duration": 5401459, + "start": 1757692036266226470 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 1, @@ -41,26 +42,27 @@ "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907f00000000", + "_dd.p.tid": "68c4408400000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.message_id": "d8cfdb20-7b52-4f72-a718-b78a4ec48b2d", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "ab8ada9f14f24e1fa723367401f151a5", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12897 }, - "duration": 7573833, - "start": 1750110335095067674 + "duration": 1001500, + "start": 1757692036279843429 }], [ { @@ -73,120 +75,125 @@ "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907f00000000", + "_dd.p.tid": "68c4408400000000", + "_dd.span_links": "[{\"trace_id\": \"68c44084000000003a9196ea35ac2747\", \"span_id\": \"065ba32db2ba6bfe\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4408400000000\", \"flags\": 1}, {\"trace_id\": \"68c44084000000002bd91fcae6bbfb25\", \"span_id\": \"7287dc6b29eee3f0\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4408400000000\", \"flags\": 1}]", "component": "azure_servicebus", "language": "python", + "messaging.batch_count": "2", "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "ab8ada9f14f24e1fa723367401f151a5", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12897 }, - "duration": 7611833, - "start": 1750110335103109424 + "duration": 14283333, + "start": 1757692036281165762 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 3, "span_id": 1, "parent_id": 0, "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907f00000000", + "_dd.p.tid": "68c4408400000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "ab8ada9f14f24e1fa723367401f151a5", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12897 }, - "duration": 8472792, - "start": 1750110335111254674 + "duration": 624875, + "start": 1757692036297625262 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 4, "span_id": 1, "parent_id": 0, "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907f00000000", + "_dd.p.tid": "68c4408400000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.message_id": "a72cce87-bb3c-4dd1-8339-097785c11391", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "ab8ada9f14f24e1fa723367401f151a5", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12897 }, - "duration": 8241917, - "start": 1750110335120044882 + "duration": 377834, + "start": 1757692036298410220 }], [ { "name": "azure.servicebus.send", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 5, "span_id": 1, "parent_id": 0, "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907f00000000", + "_dd.p.tid": "68c4408400000000", + "_dd.span_links": "[{\"trace_id\": \"68c440840000000005eb06f23f3c3816\", \"span_id\": \"4f336a92573c49e8\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4408400000000\", \"flags\": 1}, {\"trace_id\": \"68c4408400000000c8f49bbcfab14ab5\", \"span_id\": \"0814179dbe4f0e1b\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4408400000000\", \"flags\": 1}]", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "ab8ada9f14f24e1fa723367401f151a5", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12897 }, - "duration": 9245125, - "start": 1750110335128608716 + "duration": 5809292, + "start": 1757692036298907720 }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_list_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_list_distributed_tracing_disabled].json new file mode 100644 index 00000000000..afd77f47995 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_list_distributed_tracing_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78c00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "c841d58fdc724c5ab68ac87ecd040cd7", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16568 + }, + "duration": 545070501, + "start": 1757534092621417550 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78d00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "c841d58fdc724c5ab68ac87ecd040cd7", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16568 + }, + "duration": 8116584, + "start": 1757534093172579717 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_list_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_list_distributed_tracing_enabled].json new file mode 100644 index 00000000000..0760360ed00 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_list_distributed_tracing_enabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78900000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "2a32d08d43d84de1a75713d1f883f7eb", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16561 + }, + "duration": 543648125, + "start": 1757534089144524049 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78900000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "2a32d08d43d84de1a75713d1f883f7eb", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16561 + }, + "duration": 5128125, + "start": 1757534089691485466 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_single_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_single_distributed_tracing_disabled].json new file mode 100644 index 00000000000..c7f5144ec81 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_single_distributed_tracing_disabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78500000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "59fd18b7-9add-4a2b-8a81-6b622287aaf7", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "2dc72a1361cf416889735ceafab2be50", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16554 + }, + "duration": 540886583, + "start": 1757534085640484214 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "e30e4125-c5e3-49ff-9ffb-af3423912374", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "2dc72a1361cf416889735ceafab2be50", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16554 + }, + "duration": 9716833, + "start": 1757534086188438464 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "2dc72a1361cf416889735ceafab2be50", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16554 + }, + "duration": 7792500, + "start": 1757534086198866006 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "7611e0eb-c592-499a-bc37-1dadb8af49ac", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "2dc72a1361cf416889735ceafab2be50", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16554 + }, + "duration": 5778916, + "start": 1757534086206988256 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_single_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_single_distributed_tracing_enabled].json new file mode 100644 index 00000000000..9b44b67cb99 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_async_single_distributed_tracing_enabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "549b7502-e954-4001-891c-219c5816407b", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "1a3deac69c7249818b38c53d3b642639", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16547 + }, + "duration": 535752709, + "start": 1757534082116262462 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "00faf436-d1a9-4144-8c65-f0e8a9f53a84", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "1a3deac69c7249818b38c53d3b642639", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16547 + }, + "duration": 8635084, + "start": 1757534082657032212 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "1a3deac69c7249818b38c53d3b642639", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16547 + }, + "duration": 7362500, + "start": 1757534082666083004 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d78200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "243928cb-5938-434a-a008-9b06fa0fb36e", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "1a3deac69c7249818b38c53d3b642639", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16547 + }, + "duration": 8388166, + "start": 1757534082673737171 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_disabled_batch_links_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_disabled_batch_links_disabled].json new file mode 100644 index 00000000000..65e4e9c173a --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_disabled_batch_links_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4402200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "f3c55db96b3d47bf96633e2d0aa56a94", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12306 + }, + "duration": 14948292, + "start": 1757691938194506133 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4402200000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "f3c55db96b3d47bf96633e2d0aa56a94", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12306 + }, + "duration": 6576916, + "start": 1757691938217577092 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_disabled_batch_links_enabled].json similarity index 65% rename from tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[distributed_tracing_disabled].json rename to tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_disabled_batch_links_enabled].json index fb133e38677..4c79b4cca2e 100644 --- a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[distributed_tracing_disabled].json +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_disabled_batch_links_enabled].json @@ -1,8 +1,8 @@ [[ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 0, "span_id": 1, "parent_id": 0, @@ -11,28 +11,29 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906c00000000", + "_dd.p.tid": "68c4401e00000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.message_id": "78fa810e-b024-4c32-b624-737eacb2492d", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "06488ac8750a44bf89e4dc21198721ef", + "runtime-id": "49191e0bf2694e0b9f5479717ed51720", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1116 + "process_id": 12298 }, - "duration": 12881041, - "start": 1750110316710563305 + "duration": 1629417, + "start": 1757691934724831548 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 1, @@ -43,24 +44,25 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906b00000000", + "_dd.p.tid": "68c4401e00000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.message_id": "ccd143c8-54fc-46f3-9cf1-5796e728286c", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "06488ac8750a44bf89e4dc21198721ef", + "runtime-id": "49191e0bf2694e0b9f5479717ed51720", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1116 + "process_id": 12298 }, - "duration": 23669958, - "start": 1750110315684212304 + "duration": 407708, + "start": 1757691934733772257 }], [ { @@ -75,28 +77,29 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906b00000000", + "_dd.p.tid": "68c4401e00000000", "component": "azure_servicebus", "language": "python", + "messaging.batch_count": "2", "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "06488ac8750a44bf89e4dc21198721ef", + "runtime-id": "49191e0bf2694e0b9f5479717ed51720", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1116 + "process_id": 12298 }, - "duration": 7918750, - "start": 1750110315714771012 + "duration": 12486417, + "start": 1757691934734442465 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 3, @@ -107,30 +110,30 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906b00000000", + "_dd.p.tid": "68c4401e00000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "06488ac8750a44bf89e4dc21198721ef", + "runtime-id": "49191e0bf2694e0b9f5479717ed51720", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1116 + "process_id": 12298 }, - "duration": 8373500, - "start": 1750110315723141012 + "duration": 853292, + "start": 1757691934747506423 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 4, "span_id": 1, "parent_id": 0, @@ -139,30 +142,31 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906c00000000", + "_dd.p.tid": "68c4401e00000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.message_id": "05040d13-f53a-4dbb-b84e-4659a0b80027", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "06488ac8750a44bf89e4dc21198721ef", + "runtime-id": "49191e0bf2694e0b9f5479717ed51720", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1116 + "process_id": 12298 }, - "duration": 22651667, - "start": 1750110316670762721 + "duration": 381958, + "start": 1757691934748572257 }], [ { "name": "azure.servicebus.send", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 5, "span_id": 1, "parent_id": 0, @@ -171,22 +175,23 @@ "meta": { "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850906c00000000", + "_dd.p.tid": "68c4401e00000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "06488ac8750a44bf89e4dc21198721ef", + "runtime-id": "49191e0bf2694e0b9f5479717ed51720", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 1116 + "process_id": 12298 }, - "duration": 15597917, - "start": 1750110316694552721 + "duration": 6641417, + "start": 1757691934749181048 }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_enabled_batch_links_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_enabled_batch_links_disabled].json new file mode 100644 index 00000000000..589ceba2380 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_enabled_batch_links_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4401b00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a6230f92b06f433bb939c559739e2297", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12290 + }, + "duration": 14289541, + "start": 1757691931311713297 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c4401b00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a6230f92b06f433bb939c559739e2297", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 12290 + }, + "duration": 6198167, + "start": 1757691931333198130 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_schedule_messages.json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_enabled_batch_links_enabled].json similarity index 52% rename from tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_schedule_messages.json rename to tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_enabled_batch_links_enabled].json index cd084074fd1..423e59cf650 100644 --- a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_schedule_messages.json +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_batch_distributed_tracing_enabled_batch_links_enabled].json @@ -1,6 +1,6 @@ [[ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 0, @@ -9,30 +9,31 @@ "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907a00000000", + "_dd.p.tid": "68c4401700000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.message_id": "1886fab3-4023-4027-ad1a-93ca2a2cea39", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "59f3c49fafba4c62879ad49dbccdafd2", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12282 }, - "duration": 18992750, - "start": 1750110330813074255 + "duration": 5089333, + "start": 1757691927756432212 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", "resource": "queue.1", "trace_id": 1, @@ -41,26 +42,27 @@ "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907a00000000", + "_dd.p.tid": "68c4401700000000", "component": "azure_servicebus", "language": "python", "messaging.destination.name": "queue.1", - "messaging.operation": "send", + "messaging.message_id": "ec67f9e2-aafc-4013-b8cd-be5159a1e7d6", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "59f3c49fafba4c62879ad49dbccdafd2", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12282 }, - "duration": 11015167, - "start": 1750110330832814422 + "duration": 808083, + "start": 1757691927769353670 }], [ { @@ -73,120 +75,125 @@ "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907a00000000", + "_dd.p.tid": "68c4401700000000", + "_dd.span_links": "[{\"trace_id\": \"68c44017000000001bbd12f58c419ee5\", \"span_id\": \"8b4501284983f86c\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4401700000000\", \"flags\": 1}, {\"trace_id\": \"68c4401700000000185b7708e863d8e3\", \"span_id\": \"3e3805196f5c7486\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4401700000000\", \"flags\": 1}]", "component": "azure_servicebus", "language": "python", + "messaging.batch_count": "2", "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "59f3c49fafba4c62879ad49dbccdafd2", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12282 }, - "duration": 8210791, - "start": 1750110330844454964 + "duration": 12197875, + "start": 1757691927770398337 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 3, "span_id": 1, "parent_id": 0, "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907a00000000", + "_dd.p.tid": "68c4401700000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "59f3c49fafba4c62879ad49dbccdafd2", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12282 }, - "duration": 9530041, - "start": 1750110330853229839 + "duration": 890083, + "start": 1757691927784499045 }], [ { - "name": "azure.servicebus.send", + "name": "azure.servicebus.create", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 4, "span_id": 1, "parent_id": 0, "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907a00000000", + "_dd.p.tid": "68c4401700000000", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", + "messaging.destination.name": "queue.1", + "messaging.message_id": "52863d23-2e6d-4b72-a170-7cdba5eeae29", + "messaging.operation": "create", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "59f3c49fafba4c62879ad49dbccdafd2", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12282 }, - "duration": 9027125, - "start": 1750110330863114880 + "duration": 400417, + "start": 1757691927785558253 }], [ { "name": "azure.servicebus.send", "service": "azure_servicebus", - "resource": "topic.1", + "resource": "queue.1", "trace_id": 5, "span_id": 1, "parent_id": 0, "type": "worker", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", + "_dd.base_service": "ddtrace_subprocess_dir", "_dd.p.dm": "-0", - "_dd.p.tid": "6850907a00000000", + "_dd.p.tid": "68c4401700000000", + "_dd.span_links": "[{\"trace_id\": \"68c4401700000000ac7d7984057128da\", \"span_id\": \"bc9e3de1271c9abe\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4401700000000\", \"flags\": 1}, {\"trace_id\": \"68c440170000000068381c0e0384f8ba\", \"span_id\": \"0c73e3391deda087\", \"tracestate\": \"dd=s:1;t.dm:-0;t.tid:68c4401700000000\", \"flags\": 1}]", "component": "azure_servicebus", "language": "python", - "messaging.destination.name": "topic.1", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", "messaging.operation": "send", "messaging.system": "servicebus", "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", + "runtime-id": "59f3c49fafba4c62879ad49dbccdafd2", "span.kind": "producer" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 897 + "process_id": 12282 }, - "duration": 9681291, - "start": 1750110330872561839 + "duration": 5903083, + "start": 1757691927786100170 }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_list_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_list_distributed_tracing_disabled].json new file mode 100644 index 00000000000..5838ed6d3c9 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_list_distributed_tracing_disabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d77000000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "523e462b378b4ecc851933bb5a7ae315", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16512 + }, + "duration": 546131542, + "start": 1757534064710902010 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d77100000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "523e462b378b4ecc851933bb5a7ae315", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16512 + }, + "duration": 7704042, + "start": 1757534065263791260 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_list_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_list_distributed_tracing_enabled].json new file mode 100644 index 00000000000..c877c2406b9 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_list_distributed_tracing_enabled].json @@ -0,0 +1,66 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76d00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "63daef539cbc4629b4e21e42ffb7a8a9", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16505 + }, + "duration": 531543834, + "start": 1757534061253268716 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76d00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.batch_count": "2", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "63daef539cbc4629b4e21e42ffb7a8a9", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16505 + }, + "duration": 4978833, + "start": 1757534061788785217 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_single_distributed_tracing_disabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_single_distributed_tracing_disabled].json new file mode 100644 index 00000000000..7a1dc13da1d --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_single_distributed_tracing_disabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76900000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "6a46af62-0366-4f75-a3a2-1eebfdde4aea", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a27a425a24294ea0b36295730ab196d8", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16498 + }, + "duration": 552578875, + "start": 1757534057730533090 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76a00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "71f81232-0d3c-4313-a04c-f280aab4882c", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a27a425a24294ea0b36295730ab196d8", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16498 + }, + "duration": 7029875, + "start": 1757534058289521340 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76a00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a27a425a24294ea0b36295730ab196d8", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16498 + }, + "duration": 5993583, + "start": 1757534058296938715 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76a00000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "b33b414e-7059-4c2e-a114-fb1d5563502b", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "a27a425a24294ea0b36295730ab196d8", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16498 + }, + "duration": 5056458, + "start": 1757534058303234382 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_single_distributed_tracing_enabled].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_single_distributed_tracing_enabled].json new file mode 100644 index 00000000000..233fa428fd8 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_producer[send_messages_single_distributed_tracing_enabled].json @@ -0,0 +1,131 @@ +[[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "061e2b65-4cc1-4138-939b-395ba20487e3", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "135121aa8da74f3ca97abaf9816b8289", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16491 + }, + "duration": 552660000, + "start": 1757534054134645130 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "5eae45f5-7a86-45d6-beb9-c574e6bcd0fc", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "135121aa8da74f3ca97abaf9816b8289", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16491 + }, + "duration": 7807333, + "start": 1757534054693857880 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "135121aa8da74f3ca97abaf9816b8289", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16491 + }, + "duration": 5892291, + "start": 1757534054702405797 + }], +[ + { + "name": "azure.servicebus.send", + "service": "azure_servicebus", + "resource": "queue.1", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "worker", + "error": 0, + "meta": { + "_dd.base_service": "ddtrace_subprocess_dir", + "_dd.p.dm": "-0", + "_dd.p.tid": "68c1d76600000000", + "component": "azure_servicebus", + "language": "python", + "messaging.destination.name": "queue.1", + "messaging.message_id": "eaf39d71-071a-4e17-9ca1-46d00bfd1e7c", + "messaging.operation": "send", + "messaging.system": "servicebus", + "network.destination.name": "localhost", + "runtime-id": "135121aa8da74f3ca97abaf9816b8289", + "span.kind": "producer" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 16491 + }, + "duration": 4774334, + "start": 1757534054708523463 + }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[span_attribute_schema_v1].json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[span_attribute_schema_v1].json deleted file mode 100644 index 16ab11dfa60..00000000000 --- a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages[span_attribute_schema_v1].json +++ /dev/null @@ -1,186 +0,0 @@ -[[ - { - "name": "azure.servicebus.send", - "service": "ddtrace_subprocess_dir", - "resource": "topic.1", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "6862c81800000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "dccdcc0a39b44e9abdf3e7acb3048062", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 11217 - }, - "duration": 14596875, - "start": 1751304216767767791 - }], -[ - { - "name": "azure.servicebus.send", - "service": "ddtrace_subprocess_dir", - "resource": "topic.1", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "6862c81800000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "dccdcc0a39b44e9abdf3e7acb3048062", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 11217 - }, - "duration": 11149708, - "start": 1751304216782794333 - }], -[ - { - "name": "azure.servicebus.send", - "service": "ddtrace_subprocess_dir", - "resource": "queue.1", - "trace_id": 2, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "6862c81700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "queue.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "dccdcc0a39b44e9abdf3e7acb3048062", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 11217 - }, - "duration": 25120667, - "start": 1751304215746154055 - }], -[ - { - "name": "azure.servicebus.send", - "service": "ddtrace_subprocess_dir", - "resource": "queue.1", - "trace_id": 3, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "6862c81700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "queue.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "dccdcc0a39b44e9abdf3e7acb3048062", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 11217 - }, - "duration": 9647417, - "start": 1751304215778923388 - }], -[ - { - "name": "azure.servicebus.send", - "service": "ddtrace_subprocess_dir", - "resource": "queue.1", - "trace_id": 4, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "6862c81700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "queue.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "dccdcc0a39b44e9abdf3e7acb3048062", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 11217 - }, - "duration": 6508958, - "start": 1751304215788876055 - }], -[ - { - "name": "azure.servicebus.send", - "service": "ddtrace_subprocess_dir", - "resource": "topic.1", - "trace_id": 5, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "6862c81800000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "dccdcc0a39b44e9abdf3e7acb3048062", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 11217 - }, - "duration": 27128666, - "start": 1751304216739395750 - }]] diff --git a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages_async.json b/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages_async.json deleted file mode 100644 index 5ad0bbc5524..00000000000 --- a/tests/snapshots/tests.contrib.azure_servicebus.test_azure_servicebus_snapshot.test_send_messages_async.json +++ /dev/null @@ -1,192 +0,0 @@ -[[ - { - "name": "azure.servicebus.send", - "service": "azure_servicebus", - "resource": "queue.1", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", - "_dd.p.dm": "-0", - "_dd.p.tid": "6850907600000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "queue.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 897 - }, - "duration": 12258458, - "start": 1750110326982221004 - }], -[ - { - "name": "azure.servicebus.send", - "service": "azure_servicebus", - "resource": "queue.1", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", - "_dd.p.dm": "-0", - "_dd.p.tid": "6850907600000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "queue.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 897 - }, - "duration": 7406250, - "start": 1750110326998736004 - }], -[ - { - "name": "azure.servicebus.send", - "service": "azure_servicebus", - "resource": "queue.1", - "trace_id": 2, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", - "_dd.p.dm": "-0", - "_dd.p.tid": "6850907700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "queue.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 897 - }, - "duration": 15415000, - "start": 1750110327006494920 - }], -[ - { - "name": "azure.servicebus.send", - "service": "azure_servicebus", - "resource": "topic.1", - "trace_id": 3, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", - "_dd.p.dm": "-0", - "_dd.p.tid": "6850907700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 897 - }, - "duration": 6235334, - "start": 1750110327022375170 - }], -[ - { - "name": "azure.servicebus.send", - "service": "azure_servicebus", - "resource": "topic.1", - "trace_id": 4, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", - "_dd.p.dm": "-0", - "_dd.p.tid": "6850907700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 897 - }, - "duration": 8304833, - "start": 1750110327028924837 - }], -[ - { - "name": "azure.servicebus.send", - "service": "azure_servicebus", - "resource": "topic.1", - "trace_id": 5, - "span_id": 1, - "parent_id": 0, - "type": "worker", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.azure_servicebus", - "_dd.p.dm": "-0", - "_dd.p.tid": "6850907700000000", - "component": "azure_servicebus", - "language": "python", - "messaging.destination.name": "topic.1", - "messaging.operation": "send", - "messaging.system": "servicebus", - "network.destination.name": "localhost", - "runtime-id": "620b88eb625f4cebac13bf18fb7aaace", - "span.kind": "producer" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 897 - }, - "duration": 7278208, - "start": 1750110327037538087 - }]] diff --git a/tests/snapshots/tests.contrib.unittest.test_unittest_snapshot.test_unittest_will_force_run_multiple_unskippable_tests.json b/tests/snapshots/tests.contrib.unittest.test_unittest_snapshot.test_unittest_will_force_run_multiple_unskippable_tests.json index ba53971aa34..278a9bdfa6a 100644 --- a/tests/snapshots/tests.contrib.unittest.test_unittest_snapshot.test_unittest_will_force_run_multiple_unskippable_tests.json +++ b/tests/snapshots/tests.contrib.unittest.test_unittest_snapshot.test_unittest_will_force_run_multiple_unskippable_tests.json @@ -13,21 +13,21 @@ "_dd.ci.itr.tests_skipped": "false", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "66f2759500000000", + "_dd.p.tid": "68cdc24f00000000", "component": "unittest", "language": "python", - "library_version": "2.14.0.dev72+g6df23bd02", + "library_version": "3.15.0.dev174+g91f12f74e.d20250919", "os.architecture": "aarch64", "os.platform": "Linux", - "os.version": "6.6.12-linuxkit", - "runtime-id": "f4a27bef66d54ddcba621eb68a0c729c", + "os.version": "6.10.14-linuxkit", + "runtime-id": "14361ad28ac442718b54523afc220f35", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.14.0rc1", "span.kind": "test", "test.code_coverage.enabled": "true", "test.command": "python -m unittest", "test.framework": "unittest", - "test.framework_version": "3.9.19", + "test.framework_version": "3.14.0rc1", "test.itr.forced_run": "true", "test.itr.tests_skipping.enabled": "true", "test.itr.tests_skipping.tests_skipped": "false", @@ -35,7 +35,7 @@ "test.itr.unskippable": "true", "test.status": "pass", "test.type": "test", - "test_session_id": "14675548815629683126", + "test_session_id": "6259477891686298440", "type": "test_session_end" }, "metrics": { @@ -43,11 +43,11 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 61773, + "process_id": 5062, "test.itr.tests_skipping.count": 0 }, - "duration": 30886875, - "start": 1727165845353614551 + "duration": 21623417, + "start": 1758315087327090711 }, { "name": "unittest.test_module", @@ -63,20 +63,20 @@ "_dd.ci.itr.tests_skipped": "false", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "66f2759500000000", + "_dd.p.tid": "68cdc24f00000000", "component": "unittest", "language": "python", - "library_version": "2.14.0.dev72+g6df23bd02", + "library_version": "3.15.0.dev174+g91f12f74e.d20250919", "os.architecture": "aarch64", "os.platform": "Linux", - "os.version": "6.6.12-linuxkit", + "os.version": "6.10.14-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.14.0rc1", "span.kind": "test", "test.code_coverage.enabled": "true", "test.command": "python -m unittest", "test.framework": "unittest", - "test.framework_version": "3.9.19", + "test.framework_version": "3.14.0rc1", "test.itr.forced_run": "true", "test.itr.tests_skipping.enabled": "true", "test.itr.tests_skipping.tests_skipped": "false", @@ -86,8 +86,8 @@ "test.module_path": "test_my_coverage.py", "test.status": "pass", "test.type": "test", - "test_module_id": "5102364877024901825", - "test_session_id": "14675548815629683126", + "test_module_id": "8785991147711005033", + "test_session_id": "6259477891686298440", "type": "test_module_end" }, "metrics": { @@ -96,8 +96,8 @@ "_sampling_priority_v1": 1, "test.itr.tests_skipping.count": 0 }, - "duration": 30052416, - "start": 1727165845353972843 + "duration": 20970333, + "start": 1758315087327228503 }, { "name": "unittest.test_suite", @@ -112,27 +112,27 @@ "_dd.base_service": "", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "66f2759500000000", + "_dd.p.tid": "68cdc24f00000000", "component": "unittest", "language": "python", - "library_version": "2.14.0.dev72+g6df23bd02", + "library_version": "3.15.0.dev174+g91f12f74e.d20250919", "os.architecture": "aarch64", "os.platform": "Linux", - "os.version": "6.6.12-linuxkit", + "os.version": "6.10.14-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.14.0rc1", "span.kind": "test", "test.command": "python -m unittest", "test.framework": "unittest", - "test.framework_version": "3.9.19", + "test.framework_version": "3.14.0rc1", "test.module": "test_my_coverage", "test.module_path": "test_my_coverage.py", "test.status": "pass", "test.suite": "CoverageTestCase", "test.type": "test", - "test_module_id": "5102364877024901825", - "test_session_id": "14675548815629683126", - "test_suite_id": "7531210866410348623", + "test_module_id": "8785991147711005033", + "test_session_id": "6259477891686298440", + "test_suite_id": "9280460924517435842", "type": "test_suite_end" }, "metrics": { @@ -140,8 +140,8 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1 }, - "duration": 29951625, - "start": 1727165845354030301 + "duration": 20894083, + "start": 1758315087327277420 }, { "name": "unittest.test", @@ -156,21 +156,21 @@ "_dd.base_service": "", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "66f2759500000000", + "_dd.p.tid": "68cdc24f00000000", "component": "unittest", "language": "python", - "library_version": "2.14.0.dev72+g6df23bd02", + "library_version": "3.15.0.dev174+g91f12f74e.d20250919", "os.architecture": "aarch64", "os.platform": "Linux", - "os.version": "6.6.12-linuxkit", + "os.version": "6.10.14-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.14.0rc1", "span.kind": "test", "test.class_hierarchy": "CoverageTestCase", "test.command": "python -m unittest", "test.coverage": "{\"files\": [{\"filename\": \"test_my_coverage.py\", \"segments\": [[33, 0, 34, 0, -1]]}, {\"filename\": \"lib_fn.py\", \"segments\": [[1, 0, 2, 0, -1]]}]}", "test.framework": "unittest", - "test.framework_version": "3.9.19", + "test.framework_version": "3.14.0rc1", "test.itr.forced_run": "true", "test.itr.unskippable": "true", "test.module": "test_my_coverage", @@ -180,9 +180,9 @@ "test.status": "pass", "test.suite": "CoverageTestCase", "test.type": "test", - "test_module_id": "5102364877024901825", - "test_session_id": "14675548815629683126", - "test_suite_id": "7531210866410348623", + "test_module_id": "8785991147711005033", + "test_session_id": "6259477891686298440", + "test_suite_id": "9280460924517435842", "type": "test" }, "metrics": { @@ -192,8 +192,8 @@ "test.source.end": 35, "test.source.start": 31 }, - "duration": 27417792, - "start": 1727165845354050634 + "duration": 18375959, + "start": 1758315087327296836 }, { "name": "unittest.test", @@ -208,21 +208,21 @@ "_dd.base_service": "", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "66f2759500000000", + "_dd.p.tid": "68cdc24f00000000", "component": "unittest", "language": "python", - "library_version": "2.14.0.dev72+g6df23bd02", + "library_version": "3.15.0.dev174+g91f12f74e.d20250919", "os.architecture": "aarch64", "os.platform": "Linux", - "os.version": "6.6.12-linuxkit", + "os.version": "6.10.14-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.14.0rc1", "span.kind": "test", "test.class_hierarchy": "CoverageTestCase", "test.command": "python -m unittest", "test.coverage": "{\"files\": [{\"filename\": \"test_my_coverage.py\", \"segments\": [[37, 0, 38, 0, -1]]}, {\"filename\": \"ret_false.py\", \"segments\": [[1, 0, 2, 0, -1]]}]}", "test.framework": "unittest", - "test.framework_version": "3.9.19", + "test.framework_version": "3.14.0rc1", "test.itr.forced_run": "true", "test.itr.unskippable": "true", "test.module": "test_my_coverage", @@ -232,9 +232,9 @@ "test.status": "pass", "test.suite": "CoverageTestCase", "test.type": "test", - "test_module_id": "5102364877024901825", - "test_session_id": "14675548815629683126", - "test_suite_id": "7531210866410348623", + "test_module_id": "8785991147711005033", + "test_session_id": "6259477891686298440", + "test_suite_id": "9280460924517435842", "type": "test" }, "metrics": { @@ -244,8 +244,8 @@ "test.source.end": 39, "test.source.start": 35 }, - "duration": 948291, - "start": 1727165845382541218 + "duration": 944833, + "start": 1758315087346664628 }, { "name": "unittest.test", @@ -260,21 +260,21 @@ "_dd.base_service": "", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "66f2759500000000", + "_dd.p.tid": "68cdc24f00000000", "component": "unittest", "language": "python", - "library_version": "2.14.0.dev72+g6df23bd02", + "library_version": "3.15.0.dev174+g91f12f74e.d20250919", "os.architecture": "aarch64", "os.platform": "Linux", - "os.version": "6.6.12-linuxkit", + "os.version": "6.10.14-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.14.0rc1", "span.kind": "test", "test.class_hierarchy": "CoverageTestCase", "test.command": "python -m unittest", - "test.coverage": "{\"files\": [{\"filename\": \"test_my_coverage.py\", \"segments\": [[40, 0, 41, 0, -1]]}, {\"filename\": \"ret_false.py\", \"segments\": [[2, 0, 2, 0, -1]]}]}", + "test.coverage": "{\"files\": [{\"filename\": \"test_my_coverage.py\", \"segments\": [[40, 0, 41, 0, -1]]}, {\"filename\": \"ret_false2.py\", \"segments\": [[1, 0, 2, 0, -1]]}]}", "test.framework": "unittest", - "test.framework_version": "3.9.19", + "test.framework_version": "3.14.0rc1", "test.module": "test_my_coverage", "test.module_path": "test_my_coverage.py", "test.name": "test_third", @@ -282,9 +282,9 @@ "test.status": "pass", "test.suite": "CoverageTestCase", "test.type": "test", - "test_module_id": "5102364877024901825", - "test_session_id": "14675548815629683126", - "test_suite_id": "7531210866410348623", + "test_module_id": "8785991147711005033", + "test_session_id": "6259477891686298440", + "test_suite_id": "9280460924517435842", "type": "test" }, "metrics": { @@ -294,6 +294,6 @@ "test.source.end": 42, "test.source.start": 39 }, - "duration": 323083, - "start": 1727165845383601176 + "duration": 419292, + "start": 1758315087347708628 }]] diff --git a/tests/sourcecode/test_source_code_env_vars.py b/tests/sourcecode/test_source_code_env_vars.py new file mode 100644 index 00000000000..92c4683700a --- /dev/null +++ b/tests/sourcecode/test_source_code_env_vars.py @@ -0,0 +1,55 @@ +import os +from unittest import mock + +from ddtrace.sourcecode._utils import get_commit_sha +from ddtrace.sourcecode._utils import get_repository_url + + +class TestSourceCodeEnvVars: + def test_get_commit_sha_uses_env_var_when_present(self): + test_sha = "abc123def456" + with mock.patch.dict(os.environ, {"DD_GIT_COMMIT_SHA": test_sha}): + with mock.patch("ddtrace.sourcecode._utils._query_git") as mock_git: + result = get_commit_sha() + assert result == test_sha + mock_git.assert_not_called() + + def test_get_commit_sha_calls_git_when_env_var_not_present(self): + test_sha = "git_result_sha" + with mock.patch.dict(os.environ, {}, clear=True): + with mock.patch("ddtrace.sourcecode._utils._query_git", return_value=test_sha) as mock_git: + result = get_commit_sha() + assert result == test_sha + mock_git.assert_called_once_with(["rev-parse", "HEAD"]) + + def test_get_commit_sha_calls_git_when_env_var_empty(self): + test_sha = "git_result_sha" + with mock.patch.dict(os.environ, {"DD_GIT_COMMIT_SHA": ""}): + with mock.patch("ddtrace.sourcecode._utils._query_git", return_value=test_sha) as mock_git: + result = get_commit_sha() + assert result == test_sha + mock_git.assert_called_once_with(["rev-parse", "HEAD"]) + + def test_get_repository_url_uses_env_var_when_present(self): + test_url = "https://github.com/user/repo.git" + with mock.patch.dict(os.environ, {"DD_GIT_REPOSITORY_URL": test_url}): + with mock.patch("ddtrace.sourcecode._utils._query_git") as mock_git: + result = get_repository_url() + assert result == test_url + mock_git.assert_not_called() + + def test_get_repository_url_calls_git_when_env_var_not_present(self): + test_url = "git_result_url" + with mock.patch.dict(os.environ, {}, clear=True): + with mock.patch("ddtrace.sourcecode._utils._query_git", return_value=test_url) as mock_git: + result = get_repository_url() + assert result == test_url + mock_git.assert_called_once_with(["config", "--get", "remote.origin.url"]) + + def test_get_repository_url_calls_git_when_env_var_empty(self): + test_url = "git_result_url" + with mock.patch.dict(os.environ, {"DD_GIT_REPOSITORY_URL": ""}): + with mock.patch("ddtrace.sourcecode._utils._query_git", return_value=test_url) as mock_git: + result = get_repository_url() + assert result == test_url + mock_git.assert_called_once_with(["config", "--get", "remote.origin.url"]) diff --git a/tests/suitespec.yml b/tests/suitespec.yml index f7a383f2cc9..977f6685e79 100644 --- a/tests/suitespec.yml +++ b/tests/suitespec.yml @@ -177,7 +177,7 @@ suites: pattern: integration-latest* runner: riot integration_testagent: - parallelism: 2 + parallelism: 3 paths: - '@tracing' - '@bootstrap' diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index 623a23c47d1..783f258cabb 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -1,5 +1,4 @@ import os -import re import pytest @@ -55,12 +54,12 @@ def test_enable_fork(test_agent_session, run_python_code_in_subprocess): runtime_id = stdout.strip().decode("utf-8") # Validate that one app-closing event was sent and it was queued in the parent process - app_closing = test_agent_session.get_events("app-closing", subprocess=True) + app_closing = test_agent_session.get_events("app-closing") assert len(app_closing) == 1 assert app_closing[0]["runtime_id"] == runtime_id # Validate that one app-started event was sent and it was queued in the parent process - app_started = test_agent_session.get_events("app-started", subprocess=True) + app_started = test_agent_session.get_events("app-started") assert len(app_started) == 1 assert app_started[0]["runtime_id"] == runtime_id @@ -93,7 +92,7 @@ def test_enable_fork_heartbeat(test_agent_session, run_python_code_in_subprocess assert stderr == b"", stderr # Allow test agent session to capture all heartbeat events - app_heartbeats = test_agent_session.get_events("app-heartbeat", filter_heartbeats=False, subprocess=True) + app_heartbeats = test_agent_session.get_events("app-heartbeat", filter_heartbeats=False) assert len(app_heartbeats) > 1 @@ -161,28 +160,25 @@ def process_trace(self, trace): # force app_started event (instead of waiting for 10 seconds) from ddtrace.internal.telemetry import telemetry_writer -telemetry_writer._app_started() +telemetry_writer.periodic(force_flush=True) """ _, stderr, status, _ = run_python_code_in_subprocess(code) assert status == 0, stderr assert b"Exception raised in trace filter" in stderr - events = test_agent_session.get_events("app-started", subprocess=True) + events = test_agent_session.get_events("app-started") assert len(events) == 1 app_started_events = [event for event in events if event["request_type"] == "app-started"] assert len(app_started_events) == 1 - assert app_started_events[0]["payload"]["error"]["code"] == 1 - assert ( - "error applying processor <__main__.FailingFilture object at" - not in app_started_events[0]["payload"]["error"]["message"] - ) - assert "error applying processor %r" in app_started_events[0]["payload"]["error"]["message"] - pattern = re.compile(".*ddtrace/_trace/processor/__init__.py/__init__.py:[0-9]+: " "error applying processor %r") - assert pattern.match(app_started_events[0]["payload"]["error"]["message"]), app_started_events[0]["payload"][ - "error" - ]["message"] + + logs_event = test_agent_session.get_events("logs") + error_log = logs_event[0]["payload"]["logs"][0] + assert error_log["message"] == "error applying processor %r to trace %d" + assert error_log["level"] == "ERROR" + assert "in on_span_finish" in error_log["stack_trace"] + assert "spans = tp.process_trace(spans) or []" in error_log["stack_trace"] def test_register_telemetry_excepthook_after_another_hook(test_agent_session, run_python_code_in_subprocess): @@ -209,13 +205,12 @@ def pre_ddtrace_exc_hook(exctype, value, traceback): # Regression test for invalid number of arguments in wrapped exception hook assert b"3 positional arguments but 4 were given" not in stderr - app_starteds = test_agent_session.get_events("app-started", subprocess=True) + app_starteds = test_agent_session.get_events("app-started") assert len(app_starteds) == 1 - # app-started captures unhandled exceptions raised in application code - assert app_starteds[0]["payload"]["error"]["code"] == 1 - assert re.search(r"test\.py:\d+:\sbad_code$", app_starteds[0]["payload"]["error"]["message"]), app_starteds[0][ - "payload" - ]["error"]["message"] + + # the tracer does not capture non ddtrace related errors + logs_event = test_agent_session.get_events("logs") + assert len(logs_event) == 0 def test_handled_integration_error(test_agent_session, run_python_code_in_subprocess): @@ -239,7 +234,7 @@ def test_handled_integration_error(test_agent_session, run_python_code_in_subpro assert status == 0, stderr assert b"failed to enable ddtrace support for sqlite3" in stderr - integrations_events = test_agent_session.get_events("app-integrations-change", subprocess=True) + integrations_events = test_agent_session.get_events("app-integrations-change") assert len(integrations_events) == 1 assert ( integrations_events[0]["payload"]["integrations"][0]["error"] == "module 'sqlite3' has no attribute 'connect'" @@ -269,13 +264,16 @@ def test_unhandled_integration_error(test_agent_session, ddtrace_run_python_code assert b"not enough values to unpack (expected 2, got 0)" in stderr, stderr - app_started_event = test_agent_session.get_events("app-started", subprocess=True) + app_started_event = test_agent_session.get_events("app-started") assert len(app_started_event) == 1 - assert app_started_event[0]["payload"]["error"]["code"] == 1 - assert "ddtrace/contrib/internal/flask/patch.py" in app_started_event[0]["payload"]["error"]["message"] - assert "not enough values to unpack (expected 2, got 0)" in app_started_event[0]["payload"]["error"]["message"] - integration_events = test_agent_session.get_events("app-integrations-change", subprocess=True) + logs_event = test_agent_session.get_events("logs") + error_log = logs_event[0]["payload"]["logs"][0] + assert error_log["message"] == "Unhandled exception from ddtrace code" + assert error_log["level"] == "ERROR" + assert "patched_wsgi_app" in error_log["stack_trace"] + + integration_events = test_agent_session.get_events("app-integrations-change") integrations = integration_events[0]["payload"]["integrations"] (flask_integration,) = [integration for integration in integrations if integration["name"] == "flask"] @@ -308,7 +306,7 @@ def test_app_started_with_install_metrics(test_agent_session, run_python_code_in _, stderr, status, _ = run_python_code_in_subprocess("import ddtrace", env=env) assert status == 0, stderr - app_started_event = test_agent_session.get_events("app-started", subprocess=True) + app_started_event = test_agent_session.get_events("app-started") assert len(app_started_event) == 1 assert app_started_event[0]["payload"]["install_signature"] == { "install_id": "68e75c48-57ca-4a12-adfc-575c4b05fcbe", @@ -331,7 +329,7 @@ def test_instrumentation_telemetry_disabled(test_agent_session, run_python_code_ """ _, stderr, status, _ = run_python_code_in_subprocess(code, env=env) - events = test_agent_session.get_events(subprocess=True) + events = test_agent_session.get_events() assert len(events) == 0 assert status == 0, stderr diff --git a/tests/telemetry/test_telemetry_metrics_e2e.py b/tests/telemetry/test_telemetry_metrics_e2e.py index dcc9cad0cca..8eed0b55426 100644 --- a/tests/telemetry/test_telemetry_metrics_e2e.py +++ b/tests/telemetry/test_telemetry_metrics_e2e.py @@ -5,6 +5,9 @@ import subprocess import sys +import pytest + +from ddtrace.internal.compat import PYTHON_VERSION_INFO from ddtrace.internal.utils.retry import RetryError from tests.utils import _build_env from tests.webclient import Client @@ -64,6 +67,7 @@ def parse_payload(data): return json.loads(data) +@pytest.mark.skipif(PYTHON_VERSION_INFO >= (3, 14), reason="Gunicorn doesn't yet work with Python 3.14") def test_telemetry_metrics_enabled_on_gunicorn_child_process(test_agent_session): token = "tests.telemetry.test_telemetry_metrics_e2e.test_telemetry_metrics_enabled_on_gunicorn_child_process" with gunicorn_server(telemetry_metrics_enabled="true", token=token) as context: diff --git a/tests/telemetry/test_writer.py b/tests/telemetry/test_writer.py index 74458d3edbe..55399012d5b 100644 --- a/tests/telemetry/test_writer.py +++ b/tests/telemetry/test_writer.py @@ -4,12 +4,14 @@ import time from typing import Any # noqa:F401 from typing import Dict # noqa:F401 +from typing import Optional # noqa:F401 import httpretty import mock import pytest from ddtrace import config +from ddtrace.internal.compat import PYTHON_VERSION_INFO import ddtrace.internal.telemetry from ddtrace.internal.telemetry.constants import TELEMETRY_APM_PRODUCT from ddtrace.internal.telemetry.constants import TELEMETRY_LOG_LEVEL @@ -18,7 +20,6 @@ from ddtrace.internal.telemetry.writer import TelemetryWriter from ddtrace.internal.telemetry.writer import get_runtime_id from ddtrace.internal.utils.version import _pep440_to_semver -from ddtrace.settings._config import DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP_DEFAULT from ddtrace.settings._telemetry import config as telemetry_config from tests.conftest import DEFAULT_DDTRACE_SUBPROCESS_TEST_SERVICE_NAME from tests.utils import call_program @@ -34,15 +35,18 @@ def test_add_event(telemetry_writer, test_agent_session, mock_time): # send request to the agent telemetry_writer.periodic(force_flush=True) - requests = test_agent_session.get_requests(payload_type) + requests = test_agent_session.get_requests() assert len(requests) == 1 assert requests[0]["headers"]["Content-Type"] == "application/json" assert requests[0]["headers"]["DD-Client-Library-Language"] == "python" assert requests[0]["headers"]["DD-Client-Library-Version"] == _pep440_to_semver() - assert requests[0]["headers"]["DD-Telemetry-Request-Type"] == payload_type + assert requests[0]["headers"]["DD-Telemetry-Request-Type"] == "message-batch" assert requests[0]["headers"]["DD-Telemetry-API-Version"] == "v2" assert requests[0]["headers"]["DD-Telemetry-Debug-Enabled"] == "False" - assert requests[0]["body"] == _get_request_body(payload, payload_type) + + events = test_agent_session.get_events(payload_type) + assert len(events) == 1 + validate_request_body(events[0], payload, payload_type) def test_add_event_disabled_writer(telemetry_writer, test_agent_session): @@ -54,7 +58,7 @@ def test_add_event_disabled_writer(telemetry_writer, test_agent_session): # ensure no request were sent telemetry_writer.periodic(force_flush=True) - assert len(test_agent_session.get_requests(payload_type)) == 1 + assert len(test_agent_session.get_events(payload_type)) == 1 @pytest.mark.parametrize( @@ -88,129 +92,21 @@ def test_app_started_event(telemetry_writer, test_agent_session, mock_time): """asserts that app_started() queues a valid telemetry request which is then sent by periodic()""" with override_global_config(dict(_telemetry_dependency_collection=False)): # queue an app started event - telemetry_writer._app_started() + event = telemetry_writer._app_started() + assert event is not None, "app_started() did not return an event" + telemetry_writer.add_event(event["payload"], "app-started") # force a flush telemetry_writer.periodic(force_flush=True) - requests = test_agent_session.get_requests("app-started") + requests = test_agent_session.get_requests() assert len(requests) == 1 - assert requests[0]["headers"]["DD-Telemetry-Request-Type"] == "app-started" - - payload = { - "configuration": sorted( - [ - {"name": "DD_AGENT_HOST", "origin": "unknown", "value": None}, - {"name": "DD_AGENT_PORT", "origin": "unknown", "value": None}, - {"name": "DD_DOGSTATSD_PORT", "origin": "unknown", "value": None}, - {"name": "DD_DOGSTATSD_URL", "origin": "unknown", "value": None}, - {"name": "DD_DYNAMIC_INSTRUMENTATION_ENABLED", "origin": "unknown", "value": False}, - {"name": "DD_EXCEPTION_REPLAY_ENABLED", "origin": "unknown", "value": False}, - {"name": "DD_FASTAPI_ASYNC_BODY_TIMEOUT_SECONDS", "origin": "default", "value": 0.1}, - {"name": "DD_INSTRUMENTATION_TELEMETRY_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_PROFILING_STACK_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_PROFILING_MEMORY_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_PROFILING_HEAP_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_PROFILING_LOCK_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_PROFILING_CAPTURE_PCT", "origin": "unknown", "value": 1.0}, - {"name": "DD_PROFILING_UPLOAD_INTERVAL", "origin": "unknown", "value": 60.0}, - {"name": "DD_PROFILING_MAX_FRAMES", "origin": "unknown", "value": 64}, - {"name": "DD_REMOTE_CONFIGURATION_ENABLED", "origin": "unknown", "value": False}, - {"name": "DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS", "origin": "unknown", "value": 5.0}, - {"name": "DD_RUNTIME_METRICS_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_SERVICE_MAPPING", "origin": "unknown", "value": ""}, - {"name": "DD_SPAN_SAMPLING_RULES", "origin": "unknown", "value": None}, - {"name": "DD_SPAN_SAMPLING_RULES_FILE", "origin": "unknown", "value": None}, - {"name": "DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_TRACE_AGENT_HOSTNAME", "origin": "default", "value": None}, - {"name": "DD_TRACE_AGENT_PORT", "origin": "default", "value": None}, - {"name": "DD_TRACE_AGENT_TIMEOUT_SECONDS", "origin": "unknown", "value": 2.0}, - {"name": "DD_TRACE_API_VERSION", "origin": "unknown", "value": None}, - {"name": "DD_TRACE_CLIENT_IP_ENABLED", "origin": "unknown", "value": None}, - {"name": "DD_TRACE_COMPUTE_STATS", "origin": "unknown", "value": False}, - {"name": "DD_TRACE_DEBUG", "origin": "unknown", "value": False}, - {"name": "DD_TRACE_HEALTH_METRICS_ENABLED", "origin": "unknown", "value": False}, - { - "name": "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP", - "origin": "unknown", - "value": DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP_DEFAULT, - }, - {"name": "DD_TRACE_OTEL_ENABLED", "origin": "unknown", "value": False}, - {"name": "DD_TRACE_PARTIAL_FLUSH_ENABLED", "origin": "unknown", "value": True}, - {"name": "DD_TRACE_PARTIAL_FLUSH_MIN_SPANS", "origin": "unknown", "value": 300}, - { - "name": "DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED", - "origin": "default", - "value": False, - }, - { - "name": "DD_TRACE_PEER_SERVICE_MAPPING", - "origin": "env_var", - "value": "default_service:remapped_service", - }, - {"name": "DD_TRACE_PEER_SERVICE_DEFAULTS_ENABLED", "origin": "unknown", "value": False}, - {"name": "DD_TRACE_PEER_SERVICE_MAPPING", "origin": "unknown", "value": ""}, - { - "name": "DD_TRACE_PROPAGATION_STYLE_EXTRACT", - "origin": "unknown", - "value": "datadog,tracecontext", - }, - {"name": "DD_TRACE_PROPAGATION_STYLE_INJECT", "origin": "unknown", "value": "datadog,tracecontext"}, - {"name": "DD_TRACE_RATE_LIMIT", "origin": "unknown", "value": 100}, - {"name": "DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED", "origin": "unknown", "value": False}, - {"name": "DD_TRACE_SPAN_ATTRIBUTE_SCHEMA", "origin": "unknown", "value": "v0"}, - {"name": "DD_TRACE_STARTUP_LOGS", "origin": "unknown", "value": False}, - {"name": "DD_TRACE_WRITER_BUFFER_SIZE_BYTES", "origin": "unknown", "value": 20 << 20}, - {"name": "DD_TRACE_WRITER_INTERVAL_SECONDS", "origin": "unknown", "value": 1.0}, - {"name": "DD_TRACE_WRITER_MAX_PAYLOAD_SIZE_BYTES", "origin": "unknown", "value": 20 << 20}, - {"name": "DD_TRACE_WRITER_REUSE_CONNECTIONS", "origin": "unknown", "value": False}, - {"name": "instrumentation_source", "origin": "code", "value": "manual"}, - {"name": "profiling_enabled", "origin": "default", "value": "false"}, - {"name": "data_streams_enabled", "origin": "default", "value": "false"}, - {"name": "appsec_enabled", "origin": "default", "value": "false"}, - {"name": "crashtracking_create_alt_stack", "origin": "unknown", "value": True}, - {"name": "crashtracking_use_alt_stack", "origin": "unknown", "value": True}, - {"name": "crashtracking_available", "origin": "unknown", "value": sys.platform == "linux"}, - {"name": "crashtracking_debug_url", "origin": "unknown", "value": None}, - {"name": "crashtracking_enabled", "origin": "unknown", "value": sys.platform == "linux"}, - {"name": "crashtracking_stacktrace_resolver", "origin": "unknown", "value": "full"}, - {"name": "crashtracking_started", "origin": "unknown", "value": False}, - {"name": "crashtracking_stderr_filename", "origin": "unknown", "value": None}, - {"name": "crashtracking_stdout_filename", "origin": "unknown", "value": None}, - { - "name": "python_build_gnu_type", - "origin": "unknown", - "value": sysconfig.get_config_var("BUILD_GNU_TYPE"), - }, - { - "name": "python_host_gnu_type", - "origin": "unknown", - "value": sysconfig.get_config_var("HOST_GNU_TYPE"), - }, - {"name": "python_soabi", "origin": "unknown", "value": sysconfig.get_config_var("SOABI")}, - {"name": "trace_sample_rate", "origin": "default", "value": "1.0"}, - {"name": "trace_sampling_rules", "origin": "default", "value": ""}, - {"name": "trace_header_tags", "origin": "default", "value": ""}, - {"name": "logs_injection_enabled", "origin": "default", "value": True}, - {"name": "trace_tags", "origin": "default", "value": ""}, - {"name": "trace_enabled", "origin": "default", "value": "true"}, - {"name": "instrumentation_config_id", "origin": "default", "value": ""}, - {"name": "DD_INJECT_FORCE", "origin": "unknown", "value": True}, - {"name": "DD_LIB_INJECTED", "origin": "unknown", "value": False}, - {"name": "DD_LIB_INJECTION_ATTEMPTED", "origin": "unknown", "value": False}, - ], - key=lambda x: x["name"], - ), - "error": { - "code": 0, - "message": "", - }, - } - requests[0]["body"]["payload"]["configuration"].sort(key=lambda c: c["name"]) - result = _get_request_body(payload, "app-started") - result["payload"]["configuration"] = [ - a for a in result["payload"]["configuration"] if a["name"] != "DD_TRACE_AGENT_URL" - ] - assert payload == result["payload"] + assert requests[0]["headers"]["DD-Telemetry-Request-Type"] == "message-batch" + app_started_events = test_agent_session.get_events("app-started") + assert len(app_started_events) == 1 + validate_request_body(app_started_events[0], None, "app-started") + assert len(app_started_events[0]["payload"]) == 2 + assert app_started_events[0]["payload"].get("configuration") + assert app_started_events[0]["payload"].get("products") def test_app_started_event_configuration_override(test_agent_session, run_python_code_in_subprocess, tmpdir): @@ -288,9 +184,8 @@ def test_app_started_event_configuration_override(test_agent_session, run_python env["DD_INJECT_FORCE"] = "true" env["DD_INJECTION_ENABLED"] = "tracer" - # By default telemetry collection is enabled after 10 seconds, so we either need to - # to sleep for 10 seconds or manually call _app_started() to generate the app started event. - # This delay allows us to collect start up errors and dynamic configurations + # Ensures app-started event is queued immediately after ddtrace is imported + # instead of waiting for 10 seconds. env["_DD_INSTRUMENTATION_TELEMETRY_TESTS_FORCE_APP_STARTED"] = "true" _, stderr, status, _ = run_python_code_in_subprocess(code, env=env) @@ -433,7 +328,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_PROFILING_AGENTLESS", "origin": "default", "value": False}, {"name": "DD_PROFILING_API_TIMEOUT", "origin": "default", "value": 10.0}, {"name": "DD_PROFILING_CAPTURE_PCT", "origin": "env_var", "value": 5.0}, - {"name": "DD_PROFILING_ENABLED", "origin": "env_var", "value": True}, + {"name": "DD_PROFILING_ENABLED", "origin": "env_var", "value": PYTHON_VERSION_INFO < (3, 14)}, {"name": "DD_PROFILING_ENABLE_ASSERTS", "origin": "default", "value": False}, {"name": "DD_PROFILING_ENABLE_CODE_PROVENANCE", "origin": "default", "value": True}, {"name": "DD_PROFILING_ENDPOINT_COLLECTION_ENABLED", "origin": "default", "value": True}, @@ -451,7 +346,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_PROFILING_PYTORCH_EVENTS_LIMIT", "origin": "default", "value": 1000000}, {"name": "DD_PROFILING_SAMPLE_POOL_CAPACITY", "origin": "default", "value": 4}, {"name": "DD_PROFILING_STACK_ENABLED", "origin": "env_var", "value": False}, - {"name": "DD_PROFILING_STACK_V2_ENABLED", "origin": "default", "value": True}, + {"name": "DD_PROFILING_STACK_V2_ENABLED", "origin": "default", "value": PYTHON_VERSION_INFO < (3, 14)}, {"name": "DD_PROFILING_TAGS", "origin": "default", "value": ""}, {"name": "DD_PROFILING_TIMELINE_ENABLED", "origin": "default", "value": True}, {"name": "DD_PROFILING_UPLOAD_INTERVAL", "origin": "env_var", "value": 10.0}, @@ -519,6 +414,8 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_TRACE_PROPAGATION_STYLE_INJECT", "origin": "env_var", "value": "tracecontext"}, {"name": "DD_TRACE_RATE_LIMIT", "origin": "env_var", "value": 50}, {"name": "DD_TRACE_REPORT_HOSTNAME", "origin": "default", "value": False}, + {"name": "DD_TRACE_RESOURCE_RENAMING_ALWAYS_SIMPLIFIED_ENDPOINT", "origin": "default", "value": False}, + {"name": "DD_TRACE_RESOURCE_RENAMING_ENABLED", "origin": "default", "value": False}, {"name": "DD_TRACE_SAFE_INSTRUMENTATION_ENABLED", "origin": "default", "value": False}, { "name": "DD_TRACE_SAMPLING_RULES", @@ -706,7 +603,7 @@ def test_update_dependencies_event_when_disabled(test_agent_session, ddtrace_run # Import httppretty after ddtrace is imported, this ensures that the module is sent in a dependencies event # Imports httpretty twice and ensures only one dependency entry is sent _, stderr, status, _ = ddtrace_run_python_code_in_subprocess("import xmltodict", env=env) - events = test_agent_session.get_events("app-dependencies-loaded", subprocess=True) + events = test_agent_session.get_events("app-dependencies-loaded") assert len(events) == 0, events @@ -734,17 +631,17 @@ def test_update_dependencies_event_not_stdlib(test_agent_session, ddtrace_run_py def test_app_closing_event(telemetry_writer, test_agent_session, mock_time): """asserts that app_shutdown() queues and sends an app-closing telemetry request""" - # app started event must be queued before any other telemetry event - telemetry_writer._app_started(register_app_shutdown=False) - assert telemetry_writer.started + # Telemetry writer must start before app-closing event is queued + telemetry_writer.started = True # send app closed event telemetry_writer.app_shutdown() - requests = test_agent_session.get_requests("app-closing") - assert len(requests) == 1 + num_requests = len(test_agent_session.get_requests()) + assert num_requests == 1 # ensure a valid request body was sent - totel_events = len(test_agent_session.get_events()) - assert requests[0]["body"] == _get_request_body({}, "app-closing", totel_events) + events = test_agent_session.get_events("app-closing") + assert len(events) == 1 + validate_request_body(events[0], {}, "app-closing", num_requests) def test_add_integration(telemetry_writer, test_agent_session, mock_time): @@ -756,12 +653,11 @@ def test_add_integration(telemetry_writer, test_agent_session, mock_time): # send integrations to the agent telemetry_writer.periodic(force_flush=True) - requests = test_agent_session.get_requests("app-integrations-change") + events = test_agent_session.get_events("app-integrations-change") # assert integration change telemetry request was sent - assert len(requests) == 1 - + assert len(events) == 1 # assert that the request had a valid request body - requests[0]["body"]["payload"]["integrations"].sort(key=lambda x: x["name"]) + events[0]["payload"]["integrations"].sort(key=lambda x: x["name"]) expected_payload = { "integrations": [ { @@ -782,7 +678,7 @@ def test_add_integration(telemetry_writer, test_agent_session, mock_time): }, ] } - assert requests[0]["body"] == _get_request_body(expected_payload, "app-integrations-change", seq_id=2) + validate_request_body(events[0], expected_payload, "app-integrations-change") def test_app_client_configuration_changed_event(telemetry_writer, test_agent_session, mock_time): @@ -822,7 +718,7 @@ def test_add_integration_disabled_writer(telemetry_writer, test_agent_session): telemetry_writer.add_integration("integration-name", True, False, "") telemetry_writer.periodic(force_flush=True) - assert len(test_agent_session.get_requests("app-integrations-change")) == 0 + assert len(test_agent_session.get_events("app-integrations-change")) == 0 @pytest.mark.parametrize("mock_status", [300, 400, 401, 403, 500]) @@ -839,7 +735,7 @@ def test_send_failing_request(mock_status, telemetry_writer): telemetry_writer.periodic(force_flush=True) # asserts unsuccessful status code was logged log.debug.assert_called_with( - "Failed to send Instrumentation Telemetry to %s. response: %s", + "Failed to send Instrumentation Telemetry to %s. Response: %s", telemetry_writer._client.url, mock_status, ) @@ -888,7 +784,7 @@ def test_app_product_change_event(mock_time, telemetry_writer, test_agent_sessio telemetry_writer.product_activated(TELEMETRY_APM_PRODUCT.APPSEC, True) assert all(telemetry_writer._product_enablement.values()) - telemetry_writer._app_started() + telemetry_writer.periodic(force_flush=True) # Assert that there's only an app_started event (since product activation happened before the app started) events = test_agent_session.get_events("app-product-change") @@ -920,20 +816,21 @@ def test_app_product_change_event(mock_time, telemetry_writer, test_agent_sessio } -def _get_request_body(payload, payload_type, seq_id=1): - # type: (Dict, str, int) -> Dict +def validate_request_body(received_body, payload, payload_type, seq_id=None): + # type: (Dict, Dict, str, Optional[int]) -> Dict """used to test the body of requests received by the testagent""" - return { - "tracer_time": time.time(), - "runtime_id": get_runtime_id(), - "api_version": "v2", - "debug": False, - "seq_id": seq_id, - "application": get_application(config.service, config.version, config.env), - "host": get_host_info(), - "payload": payload, - "request_type": payload_type, - } + assert len(received_body) == 9 + assert received_body["tracer_time"] == time.time() + assert received_body["runtime_id"] == get_runtime_id() + assert received_body["api_version"] == "v2" + assert received_body["debug"] is False + if seq_id is not None: + assert received_body["seq_id"] == seq_id + assert received_body["application"] == get_application(config.service, config.version, config.env) + assert received_body["host"] == get_host_info() + if payload is not None: + assert received_body["payload"] == payload + assert received_body["request_type"] == payload_type def test_telemetry_writer_agent_setup(): @@ -1089,12 +986,14 @@ def test_otel_config_telemetry(test_agent_session, run_python_code_in_subprocess assert tags == [["config_opentelemetry:otel_logs_exporter"]] -def test_add_integration_error_log(mock_time, telemetry_writer, test_agent_session): +def test_add_error_log(mock_time, telemetry_writer, test_agent_session): """Test add_integration_error_log functionality with real stack trace""" try: - raise ValueError("Test exception") - except ValueError as e: - telemetry_writer.add_integration_error_log("Test error message", e) + import json + + json.loads("{invalid: json,}") + except Exception as e: + telemetry_writer.add_error_log("Test error message", e) telemetry_writer.periodic(force_flush=True) log_events = test_agent_session.get_events("logs") @@ -1110,9 +1009,14 @@ def test_add_integration_error_log(mock_time, telemetry_writer, test_agent_sessi stack_trace = log_entry["stack_trace"] expected_lines = [ "Traceback (most recent call last):", - " ", - " ", - "builtins.ValueError: Test exception", + "", # User code gets redacted + ' File "json/__init__.py', + " return _default_decoder.decode(s)", + ' File "json/decoder.py"', + " obj, end = self.raw_decode(s, idx=_w(s, 0).end())", + ' File "json/decoder.py"', + " obj, end = self.scan_once(s, idx)", + "json.decoder.JSONDecodeError: ", ] for expected_line in expected_lines: assert expected_line in stack_trace @@ -1127,27 +1031,32 @@ def test_add_integration_error_log_with_log_collection_disabled(mock_time, telem try: raise ValueError("Test exception") except ValueError as e: - telemetry_writer.add_integration_error_log("Test error message", e) + telemetry_writer.add_error_log("Test error message", e) telemetry_writer.periodic(force_flush=True) - log_events = test_agent_session.get_events("logs", subprocess=True) + log_events = test_agent_session.get_events("logs") assert len(log_events) == 0 finally: telemetry_config.LOG_COLLECTION_ENABLED = original_value @pytest.mark.parametrize( - "filename, is_redacted", + "filename, result", [ - ("/path/to/file.py", True), - ("/path/to/ddtrace/contrib/flask/file.py", False), - ("/path/to/dd-trace-something/file.py", True), + ("/path/to/file.py", ""), + ("/path/to/ddtrace/contrib/flask/file.py", ""), + ("/path/to/lib/python3.13/site-packages/ddtrace/_trace/tracer.py", "ddtrace/_trace/tracer.py"), + ("/path/to/lib/python3.13/site-packages/requests/api.py", "requests/api.py"), + ( + "/path/to/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/json/__init__.py", + "json/__init__.py", + ), ], ) -def test_redact_filename(filename, is_redacted): +def test_redact_filename(filename, result): """Test file redaction logic""" writer = TelemetryWriter(is_periodic=False) - assert writer._should_redact(filename) == is_redacted + assert writer._format_file_path(filename) == result def test_telemetry_writer_multiple_sources_config(telemetry_writer, test_agent_session): diff --git a/tests/tracer/test_encoders.py b/tests/tracer/test_encoders.py index 4684d7dadf8..468ce89a03a 100644 --- a/tests/tracer/test_encoders.py +++ b/tests/tracer/test_encoders.py @@ -404,6 +404,69 @@ def test_long_span_start(encoding): assert decode(encoder.encode()[0][0]) is not None +@allencodings +def test_span_encoding_large_resource(encoding): + encoder = MSGPACK_ENCODERS[encoding](1 << 20, 1 << 20) + postgres_query = ( + "INSERT INTO table VALUES " + '(\'x\' * 25001, \'{"top_left": {"x": 0.29411766, "y": 0.123786405}, ' + '"top_right": {"x": 0.38886696, "y": 0.123786405}, ' + '"bottom_right": {"x": 0.38886696, "y": 0.13288835}, ' + '"bottom_left": {"x": 0.29411766, "y": 0.13288835}}\'), ' + '(\'y\' * 25001, \'{"top_left": {"x": 0.12345678, "y": 0.23456789}, ' + '"top_right": {"x": 0.34567890, "y": 0.23456789}, ' + '"bottom_right": {"x": 0.34567890, "y": 0.45678901}, ' + '"bottom_left": {"x": 0.12345678, "y": 0.45678901}}\'), ' + '(\'z\' * 25001, \'{"top_left": {"x": 0.56789012, "y": 0.67890123}, ' + '"top_right": {"x": 0.78901234, "y": 0.67890123}, ' + '"bottom_right": {"x": 0.78901234, "y": 0.89012345}, ' + '"bottom_left": {"x": 0.56789012, "y": 0.89012345}}\'), ' + '(\'a\' * 25001, \'{"top_left": {"x": 0.11111111, "y": 0.22222222}, ' + '"top_right": {"x": 0.33333333, "y": 0.22222222}, ' + '"bottom_right": {"x": 0.33333333, "y": 0.44444444}, ' + '"bottom_left": {"x": 0.11111111, "y": 0.44444444}}\'), ' + '(\'b\' * 25001, \'{"top_left": {"x": 0.55555555, "y": 0.66666666}, ' + '"top_right": {"x": 0.77777777, "y": 0.66666666}, ' + '"bottom_right": {"x": 0.77777777, "y": 0.88888888}, ' + '"bottom_left": {"x": 0.55555555, "y": 0.88888888}}\'), ' + '(\'c\' * 25001, \'{"top_left": {"x": 0.99999999, "y": 0.10101010}, ' + '"top_right": {"x": 0.20202020, "y": 0.10101010}, ' + '"bottom_right": {"x": 0.20202020, "y": 0.30303030}, ' + '"bottom_left": {"x": 0.99999999, "y": 0.30303030}}\'), ' + '(\'d\' * 25001, \'{"top_left": {"x": 0.40404040, "y": 0.50505050}, ' + '"top_right": {"x": 0.60606060, "y": 0.50505050}, ' + '"bottom_right": {"x": 0.60606060, "y": 0.70707070}, ' + '"bottom_left": {"x": 0.40404040, "y": 0.70707070}}\'), ' + '(\'e\' * 25001, \'{"top_left": {"x": 0.80808080, "y": 0.90909090}, ' + '"top_right": {"x": 0.12121212, "y": 0.90909090}, ' + '"bottom_right": {"x": 0.12121212, "y": 0.13131313}, ' + '"bottom_left": {"x": 0.80808080, "y": 0.13131313}}\'), ' + '(\'f\' * 25001, \'{"top_left": {"x": 0.14141414, "y": 0.15151515}, ' + '"top_right": {"x": 0.16161616, "y": 0.15151515}, ' + '"bottom_right": {"x": 0.16161616, "y": 0.17171717}, ' + '"bottom_left": {"x": 0.14141414, "y": 0.17171717}}\'), ' + '(\'g\' * 25001, \'{"top_left": {"x": 0.18181818, "y": 0.19191919}, ' + '"top_right": {"x": 0.20202020, "y": 0.19191919}, ' + '"bottom_right": {"x": 0.20202020, "y": 0.21212121}, ' + '"bottom_left": {"x": 0.18181818, "y": 0.21212121}}\'), ' + '(\'h\' * 25001, \'{"top_left": {"x": 0.22222222, "y": 0.23232323}, ' + '"top_right": {"x": 0.24242424, "y": 0.23232323}, ' + '"bottom_right": {"x": 0.24242424, "y": 0.25252525}, ' + '"bottom_left": {"x": 0.22222222, "y": 0.25252525}}\'), ' + '(\'i\' * 25001, \'{"top_left": {"x": 0.26262626, "y": 0.27272727}, ' + '"top_right": {"x": 0.28282828, "y": 0.27272727}, ' + '"bottom_right": {"x": 0.28282828, "y": 0.29292929}, ' + '"bottom_left": {"x": 0.26262626, "y": 0.29292929}}\'), ' + '(\'j\' * 25001, \'{"top_left": {"x": 0.30303030, "y": 0.31313131}, ' + '"top_right": {"x": 0.32323232, "y": 0.31313131}, ' + '"bottom_right": {"x": 0.32323232, "y": 0.33333333}, ' + '"bottom_left": {"x": 0.30303030, "y": 0.33333333}}\')' * 10 + ) + span = Span(name="test-span", resource=postgres_query.encode("utf-8")) + trace = [span] + encoder.put(trace) + + class SubString(str): pass diff --git a/tests/tracer/test_native_logger.py b/tests/tracer/test_native_logger.py new file mode 100644 index 00000000000..3e01c7ad010 --- /dev/null +++ b/tests/tracer/test_native_logger.py @@ -0,0 +1,108 @@ +from contextlib import nullcontext +import os +import uuid + +import pytest + +from ddtrace.internal.native._native import logger + + +@pytest.mark.parametrize( + "output, expected", + [ + ("stdout", nullcontext()), + ("stderr", nullcontext()), + ("file", nullcontext()), + ], +) +def test_logger_disable(output, expected): + logger.configure() + with expected: + logger.disable(output) + + +@pytest.mark.parametrize( + "log_level, expected", + [ + ("trace", nullcontext()), + ("debug", nullcontext()), + ("info", nullcontext()), + ("warn", nullcontext()), + ("error", nullcontext()), + ("invalid", pytest.raises(ValueError)), + ], +) +def test_logger_set_log_level(log_level, expected): + logger.configure() + with expected as ex: + logger.set_log_level(log_level) + if log_level == "invalid": + assert "Invalid log level" in str(ex.value) + + +@pytest.mark.parametrize("output", [None, "stdout", "stderr", "file"]) +@pytest.mark.parametrize("path", [None, "/tmp/log.txt"]) +@pytest.mark.parametrize("files", [None, 2]) +@pytest.mark.parametrize("max_bytes", [None, 4096]) +def test_logger_configure(output, path, files, max_bytes): + if output is None: + kwargs = {} + else: + kwargs = {"output": output} + if path is not None: + kwargs["path"] = path + if files is not None: + kwargs["max_files"] = files + if max_bytes is not None: + kwargs["max_size_bytes"] = bytes + + if output == "file": + if path is None: + with pytest.raises(ValueError): + logger.configure(**kwargs) + else: + logger.configure(**kwargs) + + +LEVELS = ["trace", "debug", "info", "warn", "error"] +cases = [(config, msg, LEVELS.index(msg) >= LEVELS.index(config)) for config in LEVELS for msg in LEVELS] + + +@pytest.mark.parametrize("backend", ["stdout", "stderr", "file"]) +@pytest.mark.parametrize("configured_level, message_level, should_log", cases) +def test_logger_subprocess( + backend, configured_level, message_level, should_log, tmp_path, ddtrace_run_python_code_in_subprocess +): + log_path = tmp_path / f"{backend}_{configured_level}_{message_level}.log" + + env = os.environ.copy() + env["_DD_TRACE_WRITER_NATIVE"] = "1" + env["_DD_NATIVE_LOGGING_BACKEND"] = backend + env["_DD_NATIVE_LOGGING_FILE_PATH"] = log_path + env["_DD_NATIVE_LOGGING_LOG_LEVEL"] = configured_level + + message = f"msg_{uuid.uuid4().hex}" + code = """ +from ddtrace.internal.native._native import logger + +message_level = f"{}" +logger.log(message_level, f"{}") + """.format( + message_level, message + ) + out, err, status, _ = ddtrace_run_python_code_in_subprocess(code, env=env) + + assert status == 0 + if backend == "stdout": + found = message in out.decode("utf8") + assert err == b"" + assert found == should_log + elif backend == "stderr": + found = message in err.decode("utf8") + assert out == b"" + assert found == should_log + else: # file + found = message in log_path.read_text() + assert out == b"" + assert err == b"" + assert found == should_log diff --git a/tests/tracer/test_resource_renaming.py b/tests/tracer/test_resource_renaming.py new file mode 100644 index 00000000000..840f5b76541 --- /dev/null +++ b/tests/tracer/test_resource_renaming.py @@ -0,0 +1,122 @@ +import pytest + +from ddtrace._trace.processor.resource_renaming import ResourceRenamingProcessor +from ddtrace.ext import SpanTypes +from ddtrace.ext import http +from ddtrace.trace import Context +from ddtrace.trace import Span +from tests.utils import override_global_config + + +class TestResourceRenaming: + @pytest.mark.parametrize( + "elem,expected", + [ + # Integer patterns + ("123", "{param:int}"), + ("10", "{param:int}"), + ("12345", "{param:int}"), + ("0", "0"), + ("01", "01"), + # Integer ID patterns + ("123.456", "{param:int_id}"), + ("123-456-789", "{param:int_id}"), + ("0123", "{param:int_id}"), + # Hex patterns (require at least one digit) + ("123ABC", "{param:hex}"), + ("a1b2c3", "{param:hex}"), + ("abcdef", "abcdef"), + ("ABCDEF", "ABCDEF"), + ("abcde", "abcde"), + # Hex ID patterns + ("123.ABC", "{param:hex_id}"), + ("a1b2-c3d4", "{param:hex_id}"), + ("abc-def", "abc-def"), + # String patterns + ("this_is_a_very_long_string", "{param:str}"), + ("with%special&chars", "{param:str}"), + ("email@domain.com", "{param:str}"), + ("file.with.dots", "file.with.dots"), + # No match cases + ("users", "users"), + ("short", "short"), + ("xyz123", "xyz123"), + ], + ) + def test_compute_simplified_endpoint_path_element(self, elem, expected): + processor = ResourceRenamingProcessor() + result = processor._compute_simplified_endpoint_path_element(elem) + assert result == expected + + @pytest.mark.parametrize( + "url,expected", + [ + # Basic cases + ("", "/"), + ("http://example.com", "/"), + ("http://example.com/", "/"), + ("/users", "/users"), + ("https://example.com/users", "/users"), + # Query and fragment handling + ("http://example.com/api/users?id=123", "/api/users"), + ("https://example.com/users/123#section", "/users/{param:int}"), + ("https://example.com/users/123?filter=active#top", "/users/{param:int}"), + # Parameter replacement + ("/users/123", "/users/{param:int}"), + ("/users/5", "/users/5"), + ("/users/0123", "/users/{param:int_id}"), + ("/items/123-456", "/items/{param:int_id}"), + ("/commits/abc123", "/commits/{param:hex}"), + ("/sessions/deadbeef", "/sessions/deadbeef"), + ("/items/abc123-def", "/items/{param:hex_id}"), + ("/files/verylongfilename12345", "/files/{param:str}"), + ("/users/user@example", "/users/{param:str}"), + # Path limits and edge cases + ("/a/b/c/d/e/f/g/h/i/j/k", "/a/b/c/d/e/f/g/h"), + ("/api//v1///users//123", "/api/v1/users/{param:int}"), + ("///////////////////////", "/"), + # Complex mixed cases + ( + "/api/v2/users/123/posts/abc123/comments/hello%20world", + "/api/v2/users/{param:int}/posts/{param:hex}/comments/{param:str}", + ), + ( + "/12/123-456/abc123/abc-def-123/longstringthathastoomanycharacters", + "/{param:int}/{param:int_id}/{param:hex}/{param:hex_id}/{param:str}", + ), + # Error cases + (None, "/"), + ], + ) + def test_compute_simplified_endpoint(self, url, expected): + processor = ResourceRenamingProcessor() + result = processor._compute_simplified_endpoint(url) + assert result == expected + + def test_processor_with_route(self): + processor = ResourceRenamingProcessor() + span = Span("test", context=Context(), span_type=SpanTypes.WEB) + span.set_tag(http.ROUTE, "/api/users/{id}") + span.set_tag(http.URL, "https://example.com/api/users/123") + + processor.on_span_finish(span) + assert span.get_tag(http.ENDPOINT) is None + + def test_processor_without_route(self): + processor = ResourceRenamingProcessor() + span = Span("test", context=Context(), span_type=SpanTypes.WEB) + span.set_tag(http.URL, "https://example.com/api/users/123") + + processor.on_span_finish(span) + assert span.get_tag(http.ENDPOINT) == "/api/users/{param:int}" + + def test_processor_always_simplified_endpoint(self): + processor = ResourceRenamingProcessor() + with override_global_config(dict(_trace_resource_renaming_always_simplified_endpoint=True)): + span = Span("test", context=Context(), span_type=SpanTypes.WEB) + span.set_tag(http.ROUTE, "/api/users/{id}") + span.set_tag(http.URL, "https://example.com/api/users/123") + + processor.on_span_finish(span) + # Should use simplified endpoint even when route exists + assert span.get_tag(http.ENDPOINT) == "/api/users/{param:int}" diff --git a/tests/tracer/test_writer.py b/tests/tracer/test_writer.py index 371568594cd..75de6dba81f 100644 --- a/tests/tracer/test_writer.py +++ b/tests/tracer/test_writer.py @@ -1171,6 +1171,7 @@ def test_writer_telemetry_enabled_on_linux( for method_name in [ "set_url", + "set_hostname", "set_language", "set_language_version", "set_language_interpreter", diff --git a/tests/utils.py b/tests/utils.py index 3af52545e1c..c4958739be0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -142,6 +142,7 @@ def override_global_config(values): "service", "_raise", "_trace_compute_stats", + "_trace_resource_renaming_always_simplified_endpoint", "_obfuscation_query_string_pattern", "_global_query_string_obfuscation_disabled", "_ci_visibility_agentless_url", @@ -1162,7 +1163,7 @@ def telemetry_requests(self, telemetry_type: Optional[str] = None) -> List[TestA reqs.append(req) return reqs - def crash_reports(self) -> List[TestAgentRequest]: + def crash_messages(self) -> List[TestAgentRequest]: reqs = [] for req in self.telemetry_requests(telemetry_type="logs"): # Parse the json data in order to filter based on "origin" key,