From beea361d74a4e0106271f0acccbd8cc9d3f16ee3 Mon Sep 17 00:00:00 2001
From: "red-hat-konflux[bot]"
 <126015336+red-hat-konflux[bot]@users.noreply.github.com>
Date: Sun, 30 Mar 2025 12:41:48 +0000
Subject: [PATCH] fix(deps): update module github.com/containers/image/v5 to
 v5.34.3

Signed-off-by: red-hat-konflux <126015336+red-hat-konflux[bot]@users.noreply.github.com>
---
 go.mod                                        |   55 +-
 go.sum                                        |  138 +-
 vendor/dario.cat/mergo/.gitignore             |    3 +
 vendor/dario.cat/mergo/README.md              |  102 +-
 vendor/dario.cat/mergo/map.go                 |    2 +-
 vendor/dario.cat/mergo/merge.go               |    2 +-
 .../stargz-snapshotter/estargz/testutil.go    |   15 +-
 .../containerd/typeurl/v2/README.md           |    6 +
 .../github.com/containerd/typeurl/v2/types.go |   89 +-
 .../containerd/typeurl/v2/types_gogo.go       |   68 +
 .../image/v5/directory/directory_dest.go      |   12 +-
 .../image/v5/docker/archive/dest.go           |   26 +-
 .../containers/image/v5/docker/body_reader.go |    6 +-
 .../image/v5/docker/daemon/client.go          |   14 +
 .../image/v5/docker/daemon/daemon_dest.go     |   20 +-
 .../image/v5/docker/distribution_error.go     |    8 +-
 .../image/v5/docker/docker_client.go          |  118 +-
 .../image/v5/docker/docker_image.go           |    6 +
 .../image/v5/docker/docker_image_dest.go      |   36 +-
 .../image/v5/docker/docker_image_src.go       |  108 +-
 .../containers/image/v5/docker/errors.go      |    1 +
 .../image/v5/docker/internal/tarfile/dest.go  |   29 +-
 .../image/v5/docker/registries_d.go           |    6 +
 .../internal/blobinfocache/blobinfocache.go   |    9 +-
 .../image/v5/internal/blobinfocache/types.go  |   42 +-
 .../internal/imagedestination/impl/compat.go  |   13 +
 .../stubs/original_oci_config.go              |   16 +
 .../stubs/put_blob_partial.go                 |    5 +-
 .../v5/internal/imagedestination/wrapper.go   |   12 +
 .../internal/manifest/docker_schema2_list.go  |   37 +-
 .../image/v5/internal/manifest/manifest.go    |    5 -
 .../image/v5/internal/manifest/oci_index.go   |   60 +-
 .../internal/pkg/platform/platform_matcher.go |    4 +-
 .../image/v5/internal/private/private.go      |   55 +-
 .../v5/internal/reflink/reflink_linux.go      |   22 +
 .../internal/reflink/reflink_unsupported.go   |   15 +
 .../image/v5/manifest/docker_schema1.go       |    6 +-
 .../image/v5/manifest/docker_schema2.go       |    2 +-
 .../containers/image/v5/manifest/oci.go       |    4 +-
 .../image/v5/oci/archive/oci_dest.go          |   27 +-
 .../image/v5/oci/internal/oci_util.go         |   31 +-
 .../image/v5/oci/layout/oci_delete.go         |  121 +-
 .../image/v5/oci/layout/oci_dest.go           |  108 +-
 .../image/v5/oci/layout/oci_transport.go      |   75 +-
 .../containers/image/v5/oci/layout/reader.go  |   52 +
 .../image/v5/openshift/openshift-copies.go    |    6 +-
 .../image/v5/openshift/openshift_dest.go      |   27 +-
 .../containers/image/v5/ostree/ostree_dest.go |    6 +-
 .../containers/image/v5/ostree/ostree_src.go  |    6 +-
 .../image/v5/pkg/blobinfocache/none/none.go   |   13 +
 .../image/v5/pkg/compression/compression.go   |   14 +-
 .../v5/pkg/compression/internal/types.go      |    9 +
 .../sysregistriesv2/system_registries_v2.go   |   26 +
 .../v5/pkg/tlsclientconfig/tlsclientconfig.go |   10 +-
 .../image/v5/storage/storage_dest.go          |  944 +-
 .../image/v5/storage/storage_reference.go     |    6 +-
 .../image/v5/storage/storage_src.go           |   28 +-
 .../image/v5/tarball/tarball_src.go           |   65 +-
 .../image/v5/tarball/tarball_transport.go     |    4 +-
 .../containers/image/v5/types/types.go        |    1 +
 .../containers/image/v5/version/version.go    |    4 +-
 .../github.com/containers/storage/.cirrus.yml |   23 +-
 .../containers/storage/.codespellrc           |    3 +
 .../containers/storage/.golangci.yml          |    6 +-
 vendor/github.com/containers/storage/Makefile |   11 +-
 vendor/github.com/containers/storage/OWNERS   |   20 +-
 vendor/github.com/containers/storage/VERSION  |    2 +-
 vendor/github.com/containers/storage/check.go |   41 +-
 .../containers/storage/containers.go          |   52 +-
 .../containers/storage/drivers/aufs/aufs.go   |   49 +-
 .../containers/storage/drivers/aufs/dirs.go   |    1 -
 .../containers/storage/drivers/aufs/mount.go  |    1 -
 .../containers/storage/drivers/btrfs/btrfs.go |   32 +-
 .../drivers/btrfs/dummy_unsupported.go        |    1 -
 .../storage/drivers/btrfs/version.go          |    1 -
 .../storage/drivers/btrfs/version_none.go     |    1 -
 .../containers/storage/drivers/chown.go       |   10 +-
 .../storage/drivers/chown_darwin.go           |    3 +-
 .../containers/storage/drivers/chown_unix.go  |    3 +-
 .../storage/drivers/chown_windows.go          |    1 -
 .../containers/storage/drivers/chroot_unix.go |    3 +-
 .../storage/drivers/copy/copy_linux.go        |   16 +-
 .../storage/drivers/copy/copy_unsupported.go  |    1 -
 .../containers/storage/drivers/driver.go      |   50 +-
 .../storage/drivers/driver_linux.go           |    1 -
 .../storage/drivers/driver_solaris.go         |    1 -
 .../storage/drivers/driver_unsupported.go     |    1 -
 .../containers/storage/drivers/fsdiff.go      |    5 +-
 .../storage/drivers/overlay/check.go          |    7 +-
 .../storage/drivers/overlay/check_116.go      |    4 +-
 .../storage/drivers/overlay/composefs.go      |  135 +-
 .../storage/drivers/overlay/jsoniter.go       |    1 -
 .../storage/drivers/overlay/mount.go          |   13 +-
 .../storage/drivers/overlay/overlay.go        |  555 +-
 .../{overlay_cgo.go => overlay_disk_quota.go} |    3 +-
 .../overlay/overlay_disk_quota_unsupported.go |   16 +
 .../storage/drivers/overlay/overlay_nocgo.go  |   30 -
 .../drivers/overlay/overlay_unsupported.go    |    1 -
 .../storage/drivers/overlay/randomid.go       |    1 -
 .../drivers/overlayutils/overlayutils.go      |    1 -
 .../drivers/quota/projectquota_supported.go   |   38 +-
 .../drivers/quota/projectquota_unsupported.go |    1 -
 .../storage/drivers/register/register_aufs.go |    1 -
 .../drivers/register/register_btrfs.go        |    1 -
 .../drivers/register/register_overlay.go      |    3 +-
 .../storage/drivers/register/register_zfs.go  |    1 -
 .../storage/drivers/vfs/copy_unsupported.go   |    1 -
 .../containers/storage/drivers/vfs/driver.go  |   48 +-
 .../storage/drivers/windows/windows.go        |    9 +-
 .../containers/storage/drivers/zfs/zfs.go     |   42 +-
 .../storage/drivers/zfs/zfs_unsupported.go    |    1 -
 .../github.com/containers/storage/images.go   |   72 +-
 .../storage/internal/dedup/dedup.go           |  163 +
 .../storage/internal/dedup/dedup_linux.go     |  139 +
 .../internal/dedup/dedup_unsupported.go       |   27 +
 .../github.com/containers/storage/layers.go   |  198 +-
 .../containers/storage/lockfile_compat.go     |    2 +-
 .../containers/storage/pkg/archive/archive.go |  129 +-
 .../storage/pkg/archive/archive_110.go        |    1 -
 .../storage/pkg/archive/archive_19.go         |    1 -
 .../storage/pkg/archive/archive_bsd.go        |    3 +-
 .../storage/pkg/archive/archive_linux.go      |    7 +-
 .../storage/pkg/archive/archive_other.go      |    1 -
 .../storage/pkg/archive/archive_unix.go       |    1 -
 .../storage/pkg/archive/archive_windows.go    |    1 -
 .../containers/storage/pkg/archive/changes.go |   11 +-
 .../storage/pkg/archive/changes_linux.go      |   19 +-
 .../storage/pkg/archive/changes_other.go      |    3 +-
 .../storage/pkg/archive/changes_unix.go       |    5 +-
 .../storage/pkg/archive/copy_unix.go          |    1 -
 .../storage/pkg/archive/fflags_bsd.go         |    5 +-
 .../storage/pkg/archive/fflags_unsupported.go |    1 -
 .../containers/storage/pkg/archive/filter.go  |   73 +
 .../storage/pkg/archive/time_unsupported.go   |    1 -
 .../storage/pkg/chrootarchive/archive.go      |   10 +-
 .../pkg/chrootarchive/archive_darwin.go       |   22 +-
 .../storage/pkg/chrootarchive/archive_unix.go |   97 +-
 .../pkg/chrootarchive/archive_windows.go      |   21 +-
 .../storage/pkg/chrootarchive/chroot_linux.go |    7 +-
 .../storage/pkg/chrootarchive/chroot_unix.go  |    1 -
 .../storage/pkg/chrootarchive/diff_unix.go    |    7 +-
 .../storage/pkg/chrootarchive/init_unix.go    |    1 -
 .../storage/pkg/chrootarchive/jsoniter.go     |    1 -
 ...{bloom_filter.go => bloom_filter_linux.go} |    9 +
 .../storage/pkg/chunked/cache_linux.go        |   83 +-
 .../storage/pkg/chunked/compression.go        |   18 +-
 .../storage/pkg/chunked/compression_linux.go  |  358 +-
 .../pkg/chunked/compressor/compressor.go      |   51 +-
 .../storage/pkg/chunked/dump/dump.go          |  133 +-
 .../storage/pkg/chunked/filesystem_linux.go   |  646 ++
 .../internal/{ => minimal}/compression.go     |  118 +-
 .../storage/pkg/chunked/internal/path/path.go |   27 +
 .../containers/storage/pkg/chunked/storage.go |   19 +
 .../storage/pkg/chunked/storage_linux.go      | 1283 +--
 .../pkg/chunked/storage_unsupported.go        |    3 +-
 .../containers/storage/pkg/chunked/toc/toc.go |    4 +-
 .../containers/storage/pkg/config/config.go   |   11 -
 .../storage/pkg/directory/directory_unix.go   |   10 +-
 .../pkg/directory/directory_windows.go        |    8 +-
 .../storage/pkg/fileutils/exists_freebsd.go   |   38 +
 .../storage/pkg/fileutils/exists_unix.go      |    3 +-
 .../storage/pkg/fileutils/fileutils_unix.go   |    1 -
 .../storage/pkg/fsutils/fsutils_linux.go      |    1 -
 .../pkg/fsverity/fsverity_unsupported.go      |    1 -
 .../storage/pkg/homedir/homedir_others.go     |   38 -
 .../storage/pkg/homedir/homedir_unix.go       |    1 -
 .../storage/pkg/idmap/idmapped_utils.go       |   34 +-
 .../pkg/idmap/idmapped_utils_unsupported.go   |    1 -
 .../containers/storage/pkg/idtools/idtools.go |  199 +-
 .../storage/pkg/idtools/idtools_supported.go  |    1 -
 .../storage/pkg/idtools/idtools_unix.go       |    1 -
 .../pkg/idtools/idtools_unsupported.go        |    1 -
 .../storage/pkg/idtools/idtools_windows.go    |    1 -
 .../pkg/idtools/usergroupadd_unsupported.go   |    1 -
 .../storage/pkg/idtools/utils_unix.go         |    1 -
 .../storage/pkg/ioutils/fswriters.go          |    5 +-
 .../storage/pkg/ioutils/fswriters_other.go    |    1 -
 .../storage/pkg/ioutils/temp_unix.go          |    1 -
 .../storage/pkg/ioutils/temp_windows.go       |    1 -
 .../containers/storage/pkg/ioutils/writers.go |    4 +-
 .../storage/pkg/lockfile/lockfile.go          |   17 +-
 .../storage/pkg/lockfile/lockfile_unix.go     |    3 +-
 .../storage/pkg/lockfile/lockfile_windows.go  |    1 -
 .../storage/pkg/loopback/attach_loopback.go   |   83 +-
 .../containers/storage/pkg/loopback/ioctl.go  |    3 +-
 .../storage/pkg/loopback/loop_wrapper.go      |   39 +-
 .../storage/pkg/loopback/loopback.go          |    3 +-
 .../containers/storage/pkg/mount/flags.go     |   12 +-
 .../storage/pkg/mount/flags_unsupported.go    |    1 -
 .../storage/pkg/mount/mounter_freebsd.go      |   11 +-
 .../storage/pkg/mount/mounter_unsupported.go  |    2 -
 .../storage/pkg/mount/mountinfo_linux.go      |   17 +-
 .../storage/pkg/mount/unmount_unix.go         |    3 +-
 .../storage/pkg/mount/unmount_unsupported.go  |    1 -
 .../containers/storage/pkg/parsers/parsers.go |   14 +-
 .../storage/pkg/reexec/command_freebsd.go     |    1 -
 .../storage/pkg/reexec/command_linux.go       |    1 -
 .../storage/pkg/reexec/command_unix.go        |    1 -
 .../storage/pkg/reexec/command_unsupported.go |    1 -
 .../storage/pkg/reexec/command_windows.go     |    1 -
 .../pkg/regexp/regexp_dontprecompile.go       |    1 -
 .../storage/pkg/regexp/regexp_precompile.go   |    1 -
 .../storage/pkg/stringutils/stringutils.go    |    6 +-
 .../storage/pkg/system/chtimes_unix.go        |    1 -
 .../storage/pkg/system/chtimes_windows.go     |    1 -
 .../storage/pkg/system/extattr_freebsd.go     |   93 +
 .../storage/pkg/system/extattr_unsupported.go |   24 +
 .../storage/pkg/system/lchflags_bsd.go        |    1 -
 .../storage/pkg/system/lcow_unix.go           |    1 -
 .../storage/pkg/system/lstat_unix.go          |    1 -
 .../storage/pkg/system/meminfo_freebsd.go     |    1 -
 .../storage/pkg/system/meminfo_solaris.go     |    1 -
 .../storage/pkg/system/meminfo_unsupported.go |    4 -
 .../containers/storage/pkg/system/mknod.go    |    1 -
 .../storage/pkg/system/mknod_freebsd.go       |    1 -
 .../storage/pkg/system/mknod_windows.go       |    1 -
 .../storage/pkg/system/path_unix.go           |    1 -
 .../storage/pkg/system/path_windows.go        |    1 -
 .../storage/pkg/system/process_unix.go        |    1 -
 .../storage/pkg/system/rm_common.go           |    1 -
 .../storage/pkg/system/stat_common.go         |    1 -
 .../storage/pkg/system/stat_netbsd.go         |   13 +
 .../storage/pkg/system/stat_unix.go           |    1 -
 .../storage/pkg/system/syscall_unix.go        |    3 +-
 .../containers/storage/pkg/system/umask.go    |    1 -
 .../storage/pkg/system/umask_windows.go       |    1 -
 .../storage/pkg/system/utimes_unsupported.go  |    1 -
 .../storage/pkg/system/xattrs_darwin.go       |    2 +-
 .../storage/pkg/system/xattrs_freebsd.go      |   85 +
 .../storage/pkg/system/xattrs_linux.go        |    2 +-
 .../storage/pkg/system/xattrs_unsupported.go  |    5 +-
 .../storage/pkg/unshare/getenv_linux_cgo.go   |    1 -
 .../storage/pkg/unshare/getenv_linux_nocgo.go |    1 -
 .../storage/pkg/unshare/unshare_cgo.go        |    1 -
 .../storage/pkg/unshare/unshare_darwin.go     |    1 -
 .../storage/pkg/unshare/unshare_freebsd.go    |    1 -
 .../storage/pkg/unshare/unshare_gccgo.go      |    1 -
 .../storage/pkg/unshare/unshare_linux.go      |   15 +-
 .../pkg/unshare/unshare_unsupported.go        |    1 -
 .../pkg/unshare/unshare_unsupported_cgo.go    |    1 -
 .../containers/storage/storage.conf           |  105 +-
 .../containers/storage/storage.conf-freebsd   |   21 -
 vendor/github.com/containers/storage/store.go |  454 +-
 .../containers/storage/types/options.go       |   33 +-
 .../{options_freebsd.go => options_bsd.go}    |    4 +-
 .../storage/types/options_darwin.go           |    2 +-
 .../containers/storage/types/options_linux.go |    2 +-
 .../storage/types/options_windows.go          |    2 +-
 .../storage/types/storage_test.conf           |   10 -
 .../containers/storage/types/utils.go         |    5 +-
 .../github.com/containers/storage/userns.go   |   87 +-
 .../containers/storage/userns_unsupported.go  |   14 +
 vendor/github.com/containers/storage/utils.go |   15 +-
 .../cyphar/filepath-securejoin/CHANGELOG.md   |   73 +-
 .../cyphar/filepath-securejoin/README.md      |    3 +-
 .../cyphar/filepath-securejoin/VERSION        |    2 +-
 .../cyphar/filepath-securejoin/doc.go         |   39 +
 .../gocompat_errors_go120.go                  |   18 +
 .../gocompat_errors_unsupported.go            |   38 +
 .../gocompat_generics_go121.go                |   32 +
 .../gocompat_generics_unsupported.go          |  124 +
 .../cyphar/filepath-securejoin/join.go        |   21 +-
 .../filepath-securejoin/lookup_linux.go       |    3 +-
 .../cyphar/filepath-securejoin/mkdir_linux.go |  120 +-
 .../cyphar/filepath-securejoin/open_linux.go  |   14 +-
 .../filepath-securejoin/openat2_linux.go      |   34 +-
 .../filepath-securejoin/procfs_linux.go       |  168 +-
 .../testing_mocks_linux.go                    |   68 -
 .../cyphar/filepath-securejoin/vfs.go         |   24 +-
 .../registry/client/auth/challenge/addr.go    |   27 -
 .../client/auth/challenge/authchallenge.go    |  237 -
 .../github.com/docker/docker/api/swagger.yaml |   32 +-
 .../docker/api/types/container/hostconfig.go  |    2 +-
 .../docker/docker/api/types/types.go          |    2 +
 .../github.com/docker/docker/client/client.go |   10 +-
 .../github.com/docker/docker/client/ping.go   |    4 +-
 .../docker/pkg/jsonmessage/jsonmessage.go     |    2 +-
 .../klauspost/compress/.goreleaser.yml        |    6 +-
 .../github.com/klauspost/compress/README.md   |    7 +
 .../klauspost/compress/zstd/encoder.go        |   26 +-
 .../klauspost/compress/zstd/zstd.go           |    4 +
 .../moby/sys/capability/CHANGELOG.md          |  124 +
 .../sys/capability}/LICENSE                   |    1 +
 .../github.com/moby/sys/capability/README.md  |   13 +
 .../moby/sys/capability/capability.go         |  176 +
 .../sys}/capability/capability_linux.go       |  339 +-
 .../moby/sys/capability/capability_noop.go    |   46 +
 .../sys}/capability/enum.go                   |   37 +-
 .../sys}/capability/enum_gen.go               |    5 +-
 .../sys}/capability/syscall_linux.go          |   31 +-
 .../selinux/go-selinux/label/label_linux.go   |    5 +-
 .../selinux/go-selinux/label/label_stub.go    |   16 +-
 .../selinux/go-selinux/selinux_linux.go       |   15 +-
 .../selinux/go-selinux/selinux_stub.go        |   46 +-
 .../selinux/go-selinux/xattrs_linux.go        |    4 +-
 .../selinux/pkg/pwalk/README.md               |   48 -
 .../opencontainers/selinux/pkg/pwalk/pwalk.go |  123 -
 .../selinux/pkg/pwalkdir/README.md            |    6 +-
 .../selinux/pkg/pwalkdir/pwalkdir.go          |    7 +
 .../sylabs/sif/v2/pkg/sif/create.go           |   42 +-
 .../gocapability/capability/capability.go     |  133 -
 .../capability/capability_noop.go             |   19 -
 .../vbatts/tar-split/archive/tar/format.go    |    4 +
 .../vbatts/tar-split/archive/tar/reader.go    |   14 +-
 .../net/http/otelhttp/common.go               |    7 -
 .../net/http/otelhttp/config.go               |   15 +-
 .../net/http/otelhttp/handler.go              |   93 +-
 .../otelhttp/internal/request/body_wrapper.go |   75 +
 .../internal/request/resp_writer_wrapper.go   |  112 +
 .../net/http/otelhttp/internal/semconv/env.go |   97 +-
 .../semconv/{v1.24.0.go => httpconv.go}       |  153 +-
 .../http/otelhttp/internal/semconv/util.go    |   11 +-
 .../http/otelhttp/internal/semconv/v1.20.0.go |  118 +
 .../otelhttp/internal/semconvutil/netconv.go  |    2 +-
 .../net/http/otelhttp/transport.go            |   64 +-
 .../net/http/otelhttp/version.go              |    2 +-
 .../instrumentation/net/http/otelhttp/wrap.go |   99 -
 vendor/go.opentelemetry.io/otel/.golangci.yml |   13 +-
 vendor/go.opentelemetry.io/otel/CHANGELOG.md  |  121 +-
 vendor/go.opentelemetry.io/otel/CODEOWNERS    |    6 +-
 .../go.opentelemetry.io/otel/CONTRIBUTING.md  |   18 +-
 vendor/go.opentelemetry.io/otel/Makefile      |   22 +-
 vendor/go.opentelemetry.io/otel/README.md     |   34 +-
 vendor/go.opentelemetry.io/otel/RELEASING.md  |   12 +-
 .../go.opentelemetry.io/otel/attribute/set.go |   40 +-
 .../otel/baggage/baggage.go                   |  150 +-
 .../go.opentelemetry.io/otel/codes/codes.go   |    2 +-
 vendor/go.opentelemetry.io/otel/doc.go        |    2 +
 .../otel/internal/global/meter.go             |  317 +-
 .../otel/internal/rawhelpers.go               |   12 +-
 .../otel/metric/asyncfloat64.go               |    2 +-
 .../otel/metric/asyncint64.go                 |    2 +-
 .../otel/metric/instrument.go                 |    2 +-
 .../go.opentelemetry.io/otel/metric/meter.go  |   13 +
 .../otel/metric/noop/README.md                |    3 +
 .../otel/metric/noop/noop.go                  |  281 +
 vendor/go.opentelemetry.io/otel/renovate.json |    8 +
 .../otel/semconv/v1.24.0/README.md            |    3 -
 .../otel/semconv/v1.24.0/attribute_group.go   | 4387 --------
 .../otel/semconv/v1.24.0/event.go             |  200 -
 .../otel/semconv/v1.24.0/resource.go          | 2545 -----
 .../otel/semconv/v1.24.0/trace.go             | 1323 ---
 .../otel/semconv/v1.26.0/README.md            |    3 +
 .../otel/semconv/v1.26.0/attribute_group.go   | 8996 +++++++++++++++++
 .../otel/semconv/{v1.24.0 => v1.26.0}/doc.go  |    4 +-
 .../semconv/{v1.24.0 => v1.26.0}/exception.go |    2 +-
 .../semconv/{v1.24.0 => v1.26.0}/metric.go    |  466 +-
 .../semconv/{v1.24.0 => v1.26.0}/schema.go    |    4 +-
 .../go.opentelemetry.io/otel/trace/context.go |    2 +-
 vendor/go.opentelemetry.io/otel/trace/doc.go  |    2 +-
 .../otel/trace/provider.go                    |   59 +
 vendor/go.opentelemetry.io/otel/trace/span.go |  177 +
 .../go.opentelemetry.io/otel/trace/trace.go   |  249 -
 .../go.opentelemetry.io/otel/trace/tracer.go  |   37 +
 .../otel/trace/tracestate.go                  |   10 +
 .../otel/verify_examples.sh                   |   74 -
 .../otel/verify_released_changelog.sh         |   42 +
 vendor/go.opentelemetry.io/otel/version.go    |    2 +-
 vendor/go.opentelemetry.io/otel/versions.yaml |   10 +-
 vendor/golang.org/x/exp/LICENSE               |    4 +-
 vendor/golang.org/x/exp/maps/maps.go          |   58 +-
 vendor/golang.org/x/mod/LICENSE               |    4 +-
 vendor/golang.org/x/mod/modfile/read.go       |    7 +-
 vendor/golang.org/x/mod/modfile/rule.go       |   80 +-
 vendor/golang.org/x/mod/modfile/work.go       |    2 +-
 vendor/golang.org/x/mod/module/module.go      |    2 -
 .../grpc/grpclog/internal/loggerv2.go         |  107 +-
 .../grpc/internal/internal.go                 |   30 +-
 .../grpc/internal/status/status.go            |   35 +-
 .../protobuf/encoding/protojson/decode.go     |    2 +-
 .../protobuf/encoding/protojson/encode.go     |    4 +-
 .../encoding/protojson/well_known_types.go    |    6 +-
 .../protobuf/internal/descopts/options.go     |   20 +-
 .../editiondefaults/editions_defaults.binpb   |  Bin 93 -> 138 bytes
 .../protobuf/internal/errors/is_go112.go      |   40 -
 .../protobuf/internal/errors/is_go113.go      |   13 -
 .../protobuf/internal/filedesc/desc.go        |   22 +
 .../protobuf/internal/filedesc/desc_init.go   |    2 +
 .../protobuf/internal/filedesc/desc_lazy.go   |    2 +
 .../protobuf/internal/filedesc/editions.go    |   10 +-
 .../protobuf/internal/genid/doc.go            |    2 +-
 .../internal/genid/go_features_gen.go         |   49 +-
 .../protobuf/internal/genid/map_entry.go      |    2 +-
 .../protobuf/internal/genid/name.go           |   12 +
 .../protobuf/internal/genid/wrappers.go       |    2 +-
 .../internal/impl/api_export_opaque.go        |  128 +
 .../protobuf/internal/impl/bitmap.go          |   34 +
 .../protobuf/internal/impl/bitmap_race.go     |  126 +
 .../protobuf/internal/impl/checkinit.go       |   33 +
 .../protobuf/internal/impl/codec_extension.go |   11 +-
 .../protobuf/internal/impl/codec_field.go     |    3 +
 .../internal/impl/codec_field_opaque.go       |  264 +
 .../protobuf/internal/impl/codec_message.go   |   16 +
 .../internal/impl/codec_message_opaque.go     |  156 +
 .../protobuf/internal/impl/codec_reflect.go   |  210 -
 .../protobuf/internal/impl/codec_unsafe.go    |    3 -
 .../protobuf/internal/impl/convert.go         |    2 +-
 .../protobuf/internal/impl/decode.go          |   56 +-
 .../protobuf/internal/impl/encode.go          |   80 +-
 .../protobuf/internal/impl/equal.go           |  224 +
 .../protobuf/internal/impl/lazy.go            |  433 +
 .../internal/impl/legacy_extension.go         |    1 +
 .../protobuf/internal/impl/merge.go           |   27 +
 .../protobuf/internal/impl/message.go         |   16 +-
 .../protobuf/internal/impl/message_opaque.go  |  632 ++
 .../internal/impl/message_opaque_gen.go       |  132 +
 .../protobuf/internal/impl/message_reflect.go |    5 +
 .../internal/impl/message_reflect_field.go    |   32 +-
 .../impl/message_reflect_field_gen.go         |  273 +
 .../protobuf/internal/impl/pointer_reflect.go |  215 -
 .../protobuf/internal/impl/pointer_unsafe.go  |   12 +-
 .../internal/impl/pointer_unsafe_opaque.go    |   42 +
 .../protobuf/internal/impl/presence.go        |  142 +
 .../protobuf/internal/impl/validate.go        |   16 +
 .../internal/protolazy/bufferreader.go        |  364 +
 .../protobuf/internal/protolazy/lazy.go       |  359 +
 .../internal/protolazy/pointer_unsafe.go      |   17 +
 .../protobuf/internal/strs/strings_pure.go    |   28 -
 .../internal/strs/strings_unsafe_go120.go     |    3 +-
 .../internal/strs/strings_unsafe_go121.go     |    3 +-
 .../protobuf/internal/version/version.go      |    2 +-
 .../protobuf/proto/decode.go                  |   16 +
 .../protobuf/proto/encode.go                  |    3 +-
 .../google.golang.org/protobuf/proto/equal.go |    9 +
 .../protobuf/proto/extension.go               |   71 +
 .../google.golang.org/protobuf/proto/size.go  |    8 +
 .../protobuf/proto/wrapperopaque.go           |   80 +
 .../protobuf/reflect/protoreflect/methods.go  |   10 +
 .../protobuf/reflect/protoreflect/value.go    |    2 +-
 .../reflect/protoreflect/value_pure.go        |   60 -
 .../protoreflect/value_unsafe_go120.go        |    3 +-
 .../protoreflect/value_unsafe_go121.go        |    3 +-
 .../protobuf/runtime/protoiface/methods.go    |   34 +
 .../protobuf/runtime/protoimpl/impl.go        |    4 +
 .../protobuf/types/known/anypb/any.pb.go      |   33 +-
 vendor/modules.txt                            |   96 +-
 436 files changed, 22883 insertions(+), 14474 deletions(-)
 create mode 100644 vendor/github.com/containerd/typeurl/v2/types_gogo.go
 create mode 100644 vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
 create mode 100644 vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
 create mode 100644 vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
 create mode 100644 vendor/github.com/containers/image/v5/oci/layout/reader.go
 create mode 100644 vendor/github.com/containers/storage/.codespellrc
 rename vendor/github.com/containers/storage/drivers/overlay/{overlay_cgo.go => overlay_disk_quota.go} (92%)
 create mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go
 delete mode 100644 vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
 create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup.go
 create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
 create mode 100644 vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
 create mode 100644 vendor/github.com/containers/storage/pkg/archive/filter.go
 rename vendor/github.com/containers/storage/pkg/chunked/{bloom_filter.go => bloom_filter_linux.go} (87%)
 create mode 100644 vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
 rename vendor/github.com/containers/storage/pkg/chunked/internal/{ => minimal}/compression.go (59%)
 create mode 100644 vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
 create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go
 delete mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
 create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go
 create mode 100644 vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go
 create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_netbsd.go
 create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go
 rename vendor/github.com/containers/storage/types/{options_freebsd.go => options_bsd.go} (90%)
 create mode 100644 vendor/github.com/containers/storage/userns_unsupported.go
 create mode 100644 vendor/github.com/cyphar/filepath-securejoin/doc.go
 create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go
 create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go
 create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go
 create mode 100644 vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go
 delete mode 100644 vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go
 delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
 delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
 create mode 100644 vendor/github.com/moby/sys/capability/CHANGELOG.md
 rename vendor/github.com/{syndtr/gocapability => moby/sys/capability}/LICENSE (97%)
 create mode 100644 vendor/github.com/moby/sys/capability/README.md
 create mode 100644 vendor/github.com/moby/sys/capability/capability.go
 rename vendor/github.com/{syndtr/gocapability => moby/sys}/capability/capability_linux.go (65%)
 create mode 100644 vendor/github.com/moby/sys/capability/capability_noop.go
 rename vendor/github.com/{syndtr/gocapability => moby/sys}/capability/enum.go (91%)
 rename vendor/github.com/{syndtr/gocapability => moby/sys}/capability/enum_gen.go (94%)
 rename vendor/github.com/{syndtr/gocapability => moby/sys}/capability/syscall_linux.go (68%)
 delete mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
 delete mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
 delete mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go
 delete mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
 rename vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/{v1.24.0.go => httpconv.go} (57%)
 delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/README.md
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/noop.go
 delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
 delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
 delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
 delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
 delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
 rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/doc.go (96%)
 rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/exception.go (98%)
 rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/metric.go (77%)
 rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/schema.go (85%)
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go
 delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh
 create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
 delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go
 delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/genid/name.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
 delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/equal.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/lazy.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
 delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/impl/presence.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
 delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
 create mode 100644 vendor/google.golang.org/protobuf/proto/wrapperopaque.go
 delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go

diff --git a/go.mod b/go.mod
index 10082a6cb..f95ea164e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,11 +1,13 @@
 module github.com/openshift/source-to-image
 
-go 1.22.0
+go 1.22.8
+
+toolchain go1.22.9
 
 require (
-	github.com/containers/image/v5 v5.31.1
+	github.com/containers/image/v5 v5.34.3
 	github.com/distribution/reference v0.6.0
-	github.com/docker/docker v27.3.1+incompatible
+	github.com/docker/docker v27.5.1+incompatible
 	github.com/docker/go-connections v0.5.0
 	github.com/go-imports-organizer/goio v1.3.3
 	github.com/moby/buildkit v0.16.0
@@ -17,7 +19,7 @@ require (
 )
 
 require (
-	dario.cat/mergo v1.0.0 // indirect
+	dario.cat/mergo v1.0.1 // indirect
 	github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
 	github.com/BurntSushi/toml v1.4.0 // indirect
 	github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -25,12 +27,12 @@ require (
 	github.com/containerd/cgroups/v3 v3.0.3 // indirect
 	github.com/containerd/errdefs v0.3.0 // indirect
 	github.com/containerd/errdefs/pkg v0.3.0 // indirect
-	github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
-	github.com/containerd/typeurl/v2 v2.2.0 // indirect
+	github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
+	github.com/containerd/typeurl/v2 v2.2.3 // indirect
 	github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
-	github.com/containers/ocicrypt v1.2.0 // indirect
-	github.com/containers/storage v1.54.0 // indirect
-	github.com/cyphar/filepath-securejoin v0.3.1 // indirect
+	github.com/containers/ocicrypt v1.2.1 // indirect
+	github.com/containers/storage v1.57.2 // indirect
+	github.com/cyphar/filepath-securejoin v0.3.6 // indirect
 	github.com/docker/distribution v2.8.3+incompatible // indirect
 	github.com/docker/docker-credential-helpers v0.8.2 // indirect
 	github.com/docker/go-units v0.5.0 // indirect
@@ -39,7 +41,7 @@ require (
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
-	github.com/google/go-containerregistry v0.20.1 // indirect
+	github.com/google/go-containerregistry v0.20.2 // indirect
 	github.com/google/go-intervals v0.0.2 // indirect
 	github.com/google/uuid v1.6.0 // indirect
 	github.com/gorilla/mux v1.8.1 // indirect
@@ -47,11 +49,12 @@ require (
 	github.com/hashicorp/go-multierror v1.1.1 // indirect
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
-	github.com/klauspost/compress v1.17.10 // indirect
+	github.com/klauspost/compress v1.17.11 // indirect
 	github.com/klauspost/pgzip v1.2.6 // indirect
 	github.com/kr/text v0.2.0 // indirect
 	github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
 	github.com/moby/docker-image-spec v1.3.1 // indirect
+	github.com/moby/sys/capability v0.4.0 // indirect
 	github.com/moby/sys/mountinfo v0.7.2 // indirect
 	github.com/moby/sys/user v0.3.0 // indirect
 	github.com/moby/term v0.5.0 // indirect
@@ -60,27 +63,25 @@ require (
 	github.com/morikuni/aec v1.0.0 // indirect
 	github.com/opencontainers/go-digest v1.0.0 // indirect
 	github.com/opencontainers/runtime-spec v1.2.0 // indirect
-	github.com/opencontainers/selinux v1.11.0 // indirect
+	github.com/opencontainers/selinux v1.11.1 // indirect
 	github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
 	github.com/pkg/errors v0.9.1 // indirect
-	github.com/rogpeppe/go-internal v1.11.0 // indirect
 	github.com/sirupsen/logrus v1.9.3 // indirect
-	github.com/sylabs/sif/v2 v2.18.0 // indirect
-	github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
-	github.com/tchap/go-patricia/v2 v2.3.1 // indirect
+	github.com/sylabs/sif/v2 v2.20.2 // indirect
+	github.com/tchap/go-patricia/v2 v2.3.2 // indirect
 	github.com/ulikunitz/xz v0.5.12 // indirect
-	github.com/vbatts/tar-split v0.11.6 // indirect
+	github.com/vbatts/tar-split v0.11.7 // indirect
 	go.opencensus.io v0.24.0 // indirect
-	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
-	go.opentelemetry.io/otel v1.28.0 // indirect
-	go.opentelemetry.io/otel/metric v1.28.0 // indirect
-	go.opentelemetry.io/otel/trace v1.28.0 // indirect
-	golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
-	golang.org/x/mod v0.18.0 // indirect
-	golang.org/x/sync v0.8.0 // indirect
+	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
+	go.opentelemetry.io/otel v1.31.0 // indirect
+	go.opentelemetry.io/otel/metric v1.31.0 // indirect
+	go.opentelemetry.io/otel/trace v1.31.0 // indirect
+	golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect
+	golang.org/x/mod v0.22.0 // indirect
+	golang.org/x/sync v0.10.0 // indirect
 	golang.org/x/sys v0.30.0 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f // indirect
-	google.golang.org/grpc v1.67.0 // indirect
-	google.golang.org/protobuf v1.34.2 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
+	google.golang.org/grpc v1.69.4 // indirect
+	google.golang.org/protobuf v1.36.2 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
diff --git a/go.sum b/go.sum
index 23483ed88..71f1ca11f 100644
--- a/go.sum
+++ b/go.sum
@@ -1,6 +1,6 @@
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
 github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
 github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
@@ -29,36 +29,36 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X
 github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
 github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
 github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
-github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
-github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
-github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
-github.com/containers/image/v5 v5.31.1 h1:3x9soI6Biml/GiDLpkSmKrkRSwVGctxu/vONpoUdklA=
-github.com/containers/image/v5 v5.31.1/go.mod h1:5QfOqSackPkSbF7Qxc1DnVNnPJKQ+KWLkfEfDpK590Q=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
+github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
+github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
+github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
+github.com/containers/image/v5 v5.34.3 h1:/cMgfyA4Y7ILH7nzWP/kqpkE5Df35Ek4bp5ZPvJOVmI=
+github.com/containers/image/v5 v5.34.3/go.mod h1:MG++slvQSZVq5ejAcLdu4APGsKGMb0YHHnAo7X28fdE=
 github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
 github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
-github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
-github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
-github.com/containers/storage v1.54.0 h1:xwYAlf6n9OnIlURQLLg3FYHbO74fQ/2W2N6EtQEUM4I=
-github.com/containers/storage v1.54.0/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw=
+github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
+github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
+github.com/containers/storage v1.57.2 h1:2roCtTyE9pzIaBDHibK72DTnYkPmwWaq5uXxZdaWK4U=
+github.com/containers/storage v1.57.2/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
 github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
 github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
-github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE=
-github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc=
+github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
+github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
 github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v27.2.1+incompatible h1:U5BPtiD0viUzjGAjV1p0MGB8eVA3L3cbIrnyWmSJI70=
-github.com/docker/cli v27.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
+github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
 github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
 github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
-github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
+github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
 github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
 github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
 github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@@ -96,6 +96,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
 github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
 github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -104,8 +106,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
 github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0=
-github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
+github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
 github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
 github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -128,8 +130,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
 github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
 github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
 github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
@@ -142,6 +144,8 @@ github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE=
 github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ=
 github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
 github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
+github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
 github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
 github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
 github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
@@ -163,8 +167,8 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw
 github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
 github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
 github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
-github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
+github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
+github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
 github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
 github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -172,20 +176,20 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
-github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
 github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
+github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
 github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
 github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
-github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
+github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
+github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
 github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
 github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
 github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
@@ -202,51 +206,49 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI=
-github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
-github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
+github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
+github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
+github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
 github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
 github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
-github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
+github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
+github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
+go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
+go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
+go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
+go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
+go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
+go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
+go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
+go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
-golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
+golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo=
+golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
-golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -264,8 +266,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -298,17 +300,17 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
 google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f h1:cUMEy+8oS78BWIH9OWazBkzbr090Od9tWBNtZHkOhf0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
+google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
 google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
-google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
+google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -318,8 +320,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
+google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore
index 529c3412b..45ad0f1ae 100644
--- a/vendor/dario.cat/mergo/.gitignore
+++ b/vendor/dario.cat/mergo/.gitignore
@@ -13,6 +13,9 @@
 # Output of the go coverage tool, specifically when used with LiteIDE
 *.out
 
+# Golang/Intellij
+.idea
+
 # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
 .glide/
 
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
index 7d0cf9f32..0b3c48889 100644
--- a/vendor/dario.cat/mergo/README.md
+++ b/vendor/dario.cat/mergo/README.md
@@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
 
 ## Status
 
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
 
 ### Important notes
 
 #### 1.0.0
 
-In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
+In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
+
+If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
+
+```
+replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
+```
 
 #### 0.3.9
 
@@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works
 
 If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
 
-<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
 <a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
 <a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
 
 ### Mergo in the wild
 
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-- [containerssh/containerssh](https://github.com/containerssh/containerssh)
-- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
-- [tjpnz/structbot](https://github.com/tjpnz/structbot)
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](https://github.com/containerd/containerd)
+* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
+* [docker/cli/](https://github.com/docker/cli/)
+* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](https://github.com/go-micro/go-micro)
+* [grafana/loki](https://github.com/grafana/loki)
+* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](https://github.com/moby/moby)
+* [slackhq/nebula](https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
 
 ## Install
 
@@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
 }
 ```
 
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"dario.cat/mergo"
+)
+
+type Foo struct {
+	A *string
+	B int64
+}
+
+func main() {
+	first := "first"
+	second := "second"
+	src := Foo{
+		A: &first,
+		B: 2,
+	}
+
+	dest := Foo{
+		A: &second,
+		B: 1,
+	}
+
+	mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
 Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
 
 ```go
diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go
index b50d5c2a4..759b4f74f 100644
--- a/vendor/dario.cat/mergo/map.go
+++ b/vendor/dario.cat/mergo/map.go
@@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
 			}
 			fieldName := field.Name
 			fieldName = changeInitialCase(fieldName, unicode.ToLower)
-			if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
+			if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
 				dstMap[fieldName] = src.Field(i).Interface()
 			}
 		}
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
index 0ef9b2138..fd47c95b2 100644
--- a/vendor/dario.cat/mergo/merge.go
+++ b/vendor/dario.cat/mergo/merge.go
@@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
 					if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
 						return
 					}
-				} else {
+				} else if src.Elem().Kind() != reflect.Struct {
 					if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
 						dst.Set(src)
 					}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
index 0ca6fd75f..ba650b4d1 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -26,12 +26,13 @@ import (
 	"archive/tar"
 	"bytes"
 	"compress/gzip"
+	"crypto/rand"
 	"crypto/sha256"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
-	"math/rand"
+	"math/big"
 	"os"
 	"path/filepath"
 	"reflect"
@@ -45,10 +46,6 @@ import (
 	digest "github.com/opencontainers/go-digest"
 )
 
-func init() {
-	rand.Seed(time.Now().UnixNano())
-}
-
 // TestingController is Compression with some helper methods necessary for testing.
 type TestingController interface {
 	Compression
@@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
 				}
 				if sampleEntry == nil {
 					t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
+					return
 				}
 				if targetEntry == nil {
 					t.Fatalf("rewrite target not found")
+					return
 				}
 				targetEntry.Offset = sampleEntry.Offset
 			},
@@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
 func randomContents(n int) string {
 	b := make([]rune, n)
 	for i := range b {
-		b[i] = runes[rand.Intn(len(runes))]
+		bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes))))
+		if err != nil {
+			panic(err)
+		}
+		b[i] = runes[int(bi.Int64())]
 	}
 	return string(b)
 }
diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md
index 8d86600a4..3098526ab 100644
--- a/vendor/github.com/containerd/typeurl/v2/README.md
+++ b/vendor/github.com/containerd/typeurl/v2/README.md
@@ -18,3 +18,9 @@ As a containerd sub-project, you will find the:
  * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
 
 information in our [`containerd/project`](https://github.com/containerd/project) repository.
+
+## Optional
+
+By default, support for gogoproto is available along side the standard Google
+protobuf types.
+You can choose to leave gogo support out by using the `!no_gogo` build tag.
diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go
index 78817b701..9bf781041 100644
--- a/vendor/github.com/containerd/typeurl/v2/types.go
+++ b/vendor/github.com/containerd/typeurl/v2/types.go
@@ -24,7 +24,6 @@ import (
 	"reflect"
 	"sync"
 
-	gogoproto "github.com/gogo/protobuf/proto"
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoregistry"
 	"google.golang.org/protobuf/types/known/anypb"
@@ -33,8 +32,16 @@ import (
 var (
 	mu       sync.RWMutex
 	registry = make(map[reflect.Type]string)
+	handlers []handler
 )
 
+type handler interface {
+	Marshaller(interface{}) func() ([]byte, error)
+	Unmarshaller(interface{}) func([]byte) error
+	TypeURL(interface{}) string
+	GetType(url string) (reflect.Type, bool)
+}
+
 // Definitions of common error types used throughout typeurl.
 //
 // These error types are used with errors.Wrap and errors.Wrapf to add context
@@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) {
 		switch t := v.(type) {
 		case proto.Message:
 			return string(t.ProtoReflect().Descriptor().FullName()), nil
-		case gogoproto.Message:
-			return gogoproto.MessageName(t), nil
 		default:
+			for _, h := range handlers {
+				if u := h.TypeURL(v); u != "" {
+					return u, nil
+				}
+			}
 			return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
 		}
 	}
@@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) {
 		marshal = func(v interface{}) ([]byte, error) {
 			return proto.Marshal(t)
 		}
-	case gogoproto.Message:
-		marshal = func(v interface{}) ([]byte, error) {
-			return gogoproto.Marshal(t)
-		}
 	default:
-		marshal = json.Marshal
+		for _, h := range handlers {
+			if m := h.Marshaller(v); m != nil {
+				marshal = func(v interface{}) ([]byte, error) {
+					return m()
+				}
+				break
+			}
+		}
+
+		if marshal == nil {
+			marshal = json.Marshal
+		}
 	}
 
 	url, err := TypeURL(v)
@@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
 }
 
 func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
-	t, err := getTypeByUrl(typeURL)
+	t, isProto, err := getTypeByUrl(typeURL)
 	if err != nil {
 		return nil, err
 	}
 
 	if v == nil {
-		v = reflect.New(t.t).Interface()
+		v = reflect.New(t).Interface()
 	} else {
 		// Validate interface type provided by client
 		vURL, err := TypeURL(v)
@@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error)
 		}
 	}
 
-	if t.isProto {
-		switch t := v.(type) {
-		case proto.Message:
-			err = proto.Unmarshal(value, t)
-		case gogoproto.Message:
-			err = gogoproto.Unmarshal(value, t)
+	if isProto {
+		pm, ok := v.(proto.Message)
+		if ok {
+			return v, proto.Unmarshal(value, pm)
 		}
-	} else {
-		err = json.Unmarshal(value, v)
-	}
 
-	return v, err
-}
+		for _, h := range handlers {
+			if unmarshal := h.Unmarshaller(v); unmarshal != nil {
+				return v, unmarshal(value)
+			}
+		}
+	}
 
-type urlType struct {
-	t       reflect.Type
-	isProto bool
+	// fallback to json unmarshaller
+	return v, json.Unmarshal(value, v)
 }
 
-func getTypeByUrl(url string) (urlType, error) {
+func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) {
 	mu.RLock()
 	for t, u := range registry {
 		if u == url {
 			mu.RUnlock()
-			return urlType{
-				t: t,
-			}, nil
+			return t, false, nil
 		}
 	}
 	mu.RUnlock()
-	// fallback to proto registry
-	t := gogoproto.MessageType(url)
-	if t != nil {
-		return urlType{
-			// get the underlying Elem because proto returns a pointer to the type
-			t:       t.Elem(),
-			isProto: true,
-		}, nil
-	}
 	mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
 	if err != nil {
-		return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
+		if errors.Is(err, protoregistry.NotFound) {
+			for _, h := range handlers {
+				if t, isProto := h.GetType(url); t != nil {
+					return t, isProto, nil
+				}
+			}
+		}
+		return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
 	}
 	empty := mt.New().Interface()
-	return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil
+	return reflect.TypeOf(empty).Elem(), true, nil
 }
 
 func tryDereference(v interface{}) reflect.Type {
diff --git a/vendor/github.com/containerd/typeurl/v2/types_gogo.go b/vendor/github.com/containerd/typeurl/v2/types_gogo.go
new file mode 100644
index 000000000..adb892ec6
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/v2/types_gogo.go
@@ -0,0 +1,68 @@
+//go:build !no_gogo
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package typeurl
+
+import (
+	"reflect"
+
+	gogoproto "github.com/gogo/protobuf/proto"
+)
+
+func init() {
+	handlers = append(handlers, gogoHandler{})
+}
+
+type gogoHandler struct{}
+
+func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
+	pm, ok := v.(gogoproto.Message)
+	if !ok {
+		return nil
+	}
+	return func() ([]byte, error) {
+		return gogoproto.Marshal(pm)
+	}
+}
+
+func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
+	pm, ok := v.(gogoproto.Message)
+	if !ok {
+		return nil
+	}
+
+	return func(dt []byte) error {
+		return gogoproto.Unmarshal(dt, pm)
+	}
+}
+
+func (gogoHandler) TypeURL(v interface{}) string {
+	pm, ok := v.(gogoproto.Message)
+	if !ok {
+		return ""
+	}
+	return gogoproto.MessageName(pm)
+}
+
+func (gogoHandler) GetType(url string) (reflect.Type, bool) {
+	t := gogoproto.MessageType(url)
+	if t == nil {
+		return nil, false
+	}
+	return t.Elem(), true
+}
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index c9b390318..6e88aa01d 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
 type dirImageDestination struct {
 	impl.Compat
 	impl.PropertyMethodsInitialize
+	stubs.IgnoresOriginalOCIConfig
 	stubs.NoPutBlobPartialInitialize
 	stubs.AlwaysSupportsSignatures
 
@@ -251,14 +252,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
 	return nil
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *dirImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	return nil
 }
 
diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go
index 632ee7c49..9e0d32007 100644
--- a/vendor/github.com/containers/image/v5/docker/archive/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go
@@ -34,16 +34,17 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat
 		writer = w
 		closeWriter = true
 	}
-	tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
-	if sys != nil && sys.DockerArchiveAdditionalTags != nil {
-		tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
-	}
-	return &archiveImageDestination{
-		Destination: tarDest,
+	d := &archiveImageDestination{
 		ref:         ref,
 		writer:      writer,
 		closeWriter: closeWriter,
-	}, nil
+	}
+	tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref, d.CommitWithOptions)
+	if sys != nil && sys.DockerArchiveAdditionalTags != nil {
+		tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
+	}
+	d.Destination = tarDest
+	return d, nil
 }
 
 // Reference returns the reference used to set up this destination.  Note that this should directly correspond to user's intent,
@@ -60,14 +61,11 @@ func (d *archiveImageDestination) Close() error {
 	return nil
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *archiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	d.writer.imageCommitted()
 	if d.closeWriter {
 		// We could do this only in .Close(), but failures in .Close() are much more likely to be
diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go
index 7d66ef6bc..e69e21ef7 100644
--- a/vendor/github.com/containers/image/v5/docker/body_reader.go
+++ b/vendor/github.com/containers/image/v5/docker/body_reader.go
@@ -6,7 +6,7 @@ import (
 	"fmt"
 	"io"
 	"math"
-	"math/rand"
+	"math/rand/v2"
 	"net/http"
 	"net/url"
 	"strconv"
@@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
 			logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
 		}
 		br.body = nil
-		time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
+		time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
 
 		headers := map[string][]string{
 			"Range": {fmt.Sprintf("bytes=%d-", br.offset)},
@@ -197,7 +197,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
 		consumedBody = true
 		br.body = res.Body
 		br.lastRetryOffset = br.offset
-		br.lastRetryTime = time.Time{}
+		br.lastRetryTime = time.Now()
 		return n, nil
 
 	default:
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go
index 354af2140..6ade2384f 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/client.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go
@@ -3,6 +3,7 @@ package daemon
 import (
 	"net/http"
 	"path/filepath"
+	"time"
 
 	"github.com/containers/image/v5/types"
 	dockerclient "github.com/docker/docker/client"
@@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
 	}
 	switch serverURL.Scheme {
 	case "unix": // Nothing
+	case "npipe": // Nothing
 	case "http":
 		hc := httpConfig()
 		opts = append(opts, dockerclient.WithHTTPClient(hc))
@@ -80,7 +82,13 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
 
 	return &http.Client{
 		Transport: &http.Transport{
+			Proxy:           http.ProxyFromEnvironment,
 			TLSClientConfig: tlsc,
+			// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
+			// These idle connection limits really only apply to long-running clients, which is not our case here;
+			// we include the same values purely for symmetry.
+			MaxIdleConns:    6,
+			IdleConnTimeout: 30 * time.Second,
 		},
 		CheckRedirect: dockerclient.CheckRedirect,
 	}, nil
@@ -89,7 +97,13 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
 func httpConfig() *http.Client {
 	return &http.Client{
 		Transport: &http.Transport{
+			Proxy:           http.ProxyFromEnvironment,
 			TLSClientConfig: nil,
+			// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
+			// These idle connection limits really only apply to long-running clients, which is not our case here;
+			// we include the same values purely for symmetry.
+			MaxIdleConns:    6,
+			IdleConnTimeout: 30 * time.Second,
 		},
 		CheckRedirect: dockerclient.CheckRedirect,
 	}
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
index 9b880a2e7..4a59a6a61 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
@@ -56,16 +56,17 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
 	goroutineContext, goroutineCancel := context.WithCancel(ctx)
 	go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
 
-	return &daemonImageDestination{
+	d := &daemonImageDestination{
 		ref:                ref,
 		mustMatchRuntimeOS: mustMatchRuntimeOS,
-		Destination:        tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef),
 		archive:            archive,
 		goroutineCancel:    goroutineCancel,
 		statusChannel:      statusChannel,
 		writer:             writer,
 		committed:          false,
-	}, nil
+	}
+	d.Destination = tarfile.NewDestination(sys, archive, ref.Transport().Name(), namedTaggedRef, d.CommitWithOptions)
+	return d, nil
 }
 
 // imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
@@ -146,7 +147,7 @@ func (d *daemonImageDestination) Close() error {
 		// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
 		// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
 		// Whether that works or not, closing the PipeWriter seems desirable in any case.
-		if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil {
+		if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .CommitWithOptions()")); err != nil {
 			return err
 		}
 	}
@@ -159,14 +160,11 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
 	return d.ref
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *daemonImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	logrus.Debugf("docker-daemon: Closing tar stream")
 	if err := d.archive.Close(); err != nil {
 		return err
diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go
index 0a0064576..622d21fb1 100644
--- a/vendor/github.com/containers/image/v5/docker/distribution_error.go
+++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go
@@ -24,7 +24,6 @@ import (
 	"slices"
 
 	"github.com/docker/distribution/registry/api/errcode"
-	dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
 )
 
 // errNoErrorsInBody is returned when an HTTP response body parses to an empty
@@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
 // UnexpectedHTTPStatusError returned for response code outside of expected
 // range.
 func handleErrorResponse(resp *http.Response) error {
-	if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+	switch {
+	case resp.StatusCode == http.StatusUnauthorized:
 		// Check for OAuth errors within the `WWW-Authenticate` header first
 		// See https://tools.ietf.org/html/rfc6750#section-3
-		for _, c := range dockerChallenge.ResponseChallenges(resp) {
+		for _, c := range parseAuthHeader(resp.Header) {
 			if c.Scheme == "bearer" {
 				var err errcode.Error
 				// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
@@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
 				return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
 			}
 		}
+		fallthrough
+	case resp.StatusCode >= 400 && resp.StatusCode < 500:
 		err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
 		if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
 			return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 94cbcb1d9..220afd009 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -42,7 +42,6 @@ const (
 	dockerRegistry   = "registry-1.docker.io"
 
 	resolvedPingV2URL       = "%s://%s/v2/"
-	resolvedPingV1URL       = "%s://%s/v1/_ping"
 	tagsPath                = "/v2/%s/tags/list"
 	manifestPath            = "/v2/%s/manifests/%s"
 	blobsPath               = "/v2/%s/blobs/%s"
@@ -86,11 +85,9 @@ type extensionSignatureList struct {
 	Signatures []extensionSignature `json:"signatures"`
 }
 
+// bearerToken records a cached token we can use to authenticate.
 type bearerToken struct {
-	Token          string    `json:"token"`
-	AccessToken    string    `json:"access_token"`
-	ExpiresIn      int       `json:"expires_in"`
-	IssuedAt       time.Time `json:"issued_at"`
+	token          string
 	expirationTime time.Time
 }
 
@@ -147,25 +144,6 @@ const (
 	noAuth
 )
 
-func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
-	token := new(bearerToken)
-	if err := json.Unmarshal(blob, &token); err != nil {
-		return nil, err
-	}
-	if token.Token == "" {
-		token.Token = token.AccessToken
-	}
-	if token.ExpiresIn < minimumTokenLifetimeSeconds {
-		token.ExpiresIn = minimumTokenLifetimeSeconds
-		logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
-	}
-	if token.IssuedAt.IsZero() {
-		token.IssuedAt = time.Now().UTC()
-	}
-	token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
-	return token, nil
-}
-
 // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
 func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
 	if sys != nil && sys.DockerCertPath != "" {
@@ -774,7 +752,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
 					token = *t
 					c.tokenCache.Store(cacheKey, token)
 				}
-				registryToken = token.Token
+				registryToken = token.token
 			}
 			req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
 			return nil
@@ -827,12 +805,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
 		return nil, err
 	}
 
-	tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
-	if err != nil {
-		return nil, err
-	}
-
-	return newBearerTokenFromJSONBlob(tokenBlob)
+	return newBearerTokenFromHTTPResponseBody(res)
 }
 
 func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
@@ -878,12 +851,50 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
 	if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
 		return nil, err
 	}
-	tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
+
+	return newBearerTokenFromHTTPResponseBody(res)
+}
+
+// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken.
+// The caller is still responsible for ensuring res.Body is closed.
+func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) {
+	blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
 	if err != nil {
 		return nil, err
 	}
 
-	return newBearerTokenFromJSONBlob(tokenBlob)
+	var token struct {
+		Token          string    `json:"token"`
+		AccessToken    string    `json:"access_token"`
+		ExpiresIn      int       `json:"expires_in"`
+		IssuedAt       time.Time `json:"issued_at"`
+		expirationTime time.Time
+	}
+	if err := json.Unmarshal(blob, &token); err != nil {
+		const bodySampleLength = 50
+		bodySample := blob
+		if len(bodySample) > bodySampleLength {
+			bodySample = bodySample[:bodySampleLength]
+		}
+		return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err)
+	}
+
+	bt := &bearerToken{
+		token: token.Token,
+	}
+	if bt.token == "" {
+		bt.token = token.AccessToken
+	}
+
+	if token.ExpiresIn < minimumTokenLifetimeSeconds {
+		token.ExpiresIn = minimumTokenLifetimeSeconds
+		logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
+	}
+	if token.IssuedAt.IsZero() {
+		token.IssuedAt = time.Now().UTC()
+	}
+	bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+	return bt, nil
 }
 
 // detectPropertiesHelper performs the work of detectProperties which executes
@@ -924,34 +935,6 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
 	}
 	if err != nil {
 		err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
-		if c.sys != nil && c.sys.DockerDisableV1Ping {
-			return err
-		}
-		// best effort to understand if we're talking to a V1 registry
-		pingV1 := func(scheme string) bool {
-			pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
-			if err != nil {
-				return false
-			}
-			resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
-			if err != nil {
-				logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
-				return false
-			}
-			defer resp.Body.Close()
-			logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
-			if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
-				return false
-			}
-			return true
-		}
-		isV1 := pingV1("https")
-		if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
-			isV1 = pingV1("http")
-		}
-		if isV1 {
-			err = ErrV1NotSupported
-		}
 	}
 	return err
 }
@@ -1073,6 +1056,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
 func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
 	// Note that this copies all kinds of attachments: attestations, and whatever else is there,
 	// not just signatures. We leave the signature consumers to decide based on the MIME type.
+
+	if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check
+		return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err)
+	}
+	digestAlgorithm := desc.Digest.Algorithm()
+	if !digestAlgorithm.Available() {
+		return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String())
+	}
+
 	reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
 	if err != nil {
 		return nil, err
@@ -1082,6 +1074,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
 	if err != nil {
 		return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
 	}
+	actualDigest := digestAlgorithm.FromBytes(payload)
+	if actualDigest != desc.Digest {
+		return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String())
+	}
 	return payload, nil
 }
 
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 9741afc3f..74f559dce 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
 		}
 		for _, tag := range tagsHolder.Tags {
 			if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
+				// Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary
+				// to the spec, may include JSON null values in the list; and Go silently parses them as "".
+				if tag == "" {
+					logrus.Debugf("Ignoring invalid empty tag")
+					continue
+				}
 				// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
 				// contrary to the tag format specified in
 				// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 7f59ea3fe..3ac43cf9f 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -41,6 +41,7 @@ import (
 type dockerImageDestination struct {
 	impl.Compat
 	impl.PropertyMethodsInitialize
+	stubs.IgnoresOriginalOCIConfig
 	stubs.NoPutBlobPartialInitialize
 
 	ref dockerReference
@@ -332,6 +333,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 		return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
 	}
 
+	originalCandidateKnownToBeMissing := false
 	if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
 		// First, check whether the blob happens to already exist at the destination.
 		haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
@@ -341,9 +343,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 		if haveBlob {
 			return true, reusedInfo, nil
 		}
+		originalCandidateKnownToBeMissing = true
 	} else {
 		logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
 			optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
+		// We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
+		// In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
+		// a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
+		// with the BIC’s annotations.
+		// This is not quite correct, it only works if the BIC also contains an acceptable _location_.
+		// Ideally, we could look up just the compression algorithm/annotations for info.digest,
+		// and use it even if no location candidate exists and the original dandidate is present.
 	}
 
 	// Then try reusing blobs from other locations.
@@ -361,8 +371,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 				logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
 				continue
 			}
-		}
-		if !candidate.UnknownLocation {
 			if candidate.CompressionAlgorithm != nil {
 				logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
 			} else {
@@ -389,7 +397,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 			// for it in the current repo.
 			candidateRepo = reference.TrimNamed(d.ref.ref)
 		}
-		if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+		if originalCandidateKnownToBeMissing &&
+			candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
 			logrus.Debug("... Already tried the primary destination")
 			continue
 		}
@@ -429,10 +438,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
 		options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
 
 		return true, private.ReusedBlob{
-			Digest:               candidate.Digest,
-			Size:                 size,
-			CompressionOperation: candidate.CompressionOperation,
-			CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
+			Digest:                 candidate.Digest,
+			Size:                   size,
+			CompressionOperation:   candidate.CompressionOperation,
+			CompressionAlgorithm:   candidate.CompressionAlgorithm,
+			CompressionAnnotations: candidate.CompressionAnnotations,
+		}, nil
 	}
 
 	return false, private.ReusedBlob{}, nil
@@ -913,13 +924,10 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
 	return nil
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	return nil
 }
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index a2b6dbed7..41ab9bfd1 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -1,7 +1,9 @@
 package docker
 
 import (
+	"bytes"
 	"context"
+	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
@@ -11,6 +13,7 @@ import (
 	"net/http"
 	"net/url"
 	"os"
+	"os/exec"
 	"strings"
 	"sync"
 
@@ -113,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
 		// Don’t just build a string, try to preserve the typed error.
 		primary := &attempts[len(attempts)-1]
 		extras := []string{}
-		for i := 0; i < len(attempts)-1; i++ {
+		for _, attempt := range attempts[:len(attempts)-1] {
 			// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
 			// The paired [] at least have some chance of being unambiguous.
-			extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
+			extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err))
 		}
 		return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
 	}
@@ -162,6 +165,34 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
 		client.Close()
 		return nil, err
 	}
+
+	if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" {
+		acf := map[string]struct {
+			Username      string `json:"username,omitempty"`
+			Password      string `json:"password,omitempty"`
+			IdentityToken string `json:"identityToken,omitempty"`
+		}{
+			physicalRef.ref.String(): {
+				Username:      client.auth.Username,
+				Password:      client.auth.Password,
+				IdentityToken: client.auth.IdentityToken,
+			},
+		}
+		acfD, err := json.Marshal(acf)
+		if err != nil {
+			logrus.Warnf("failed to marshal auth config: %v", err)
+		} else {
+			cmd := exec.Command(h)
+			cmd.Stdin = bytes.NewReader(acfD)
+			if err := cmd.Run(); err != nil {
+				var stderr string
+				if ee, ok := err.(*exec.ExitError); ok {
+					stderr = string(ee.Stderr)
+				}
+				logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err)
+			}
+		}
+	}
 	return s, nil
 }
 
@@ -309,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
 			}
 			return
 		}
+		if parts >= len(chunks) {
+			errs <- errors.New("too many parts returned by the server")
+			break
+		}
 		s := signalCloseReader{
 			closed: make(chan struct{}),
 			stream: p,
@@ -433,26 +468,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc
 	var res []signature.Signature
 	switch {
 	case s.c.supportsSignatures:
-		sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
-		if err != nil {
+		if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil {
 			return nil, err
 		}
-		res = append(res, sigs...)
 	case s.c.signatureBase != nil:
-		sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
-		if err != nil {
+		if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil {
 			return nil, err
 		}
-		res = append(res, sigs...)
 	default:
 		return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
 	}
 
-	sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
-	if err != nil {
+	if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil {
 		return nil, err
 	}
-	res = append(res, sigstoreSigs...)
 	return res, nil
 }
 
@@ -474,35 +503,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
 	return manifest.Digest(s.cachedManifest)
 }
 
-// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
-// which is not nil.
-func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
+// which is not nil, storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
 	manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
 	if err != nil {
-		return nil, err
+		return err
 	}
 
 	// NOTE: Keep this in sync with docs/signature-protocols.md!
-	signatures := []signature.Signature{}
 	for i := 0; ; i++ {
 		if i >= maxLookasideSignatures {
-			return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
+			return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
 		}
 
 		sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
 		if err != nil {
-			return nil, err
+			return err
 		}
 		signature, missing, err := s.getOneSignature(ctx, sigURL)
 		if err != nil {
-			return nil, err
+			return err
 		}
 		if missing {
 			break
 		}
-		signatures = append(signatures, signature)
+		*dest = append(*dest, signature)
 	}
-	return signatures, nil
+	return nil
 }
 
 // getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
@@ -565,48 +594,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL
 	}
 }
 
-// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
-func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
+// storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
 	manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
 	if err != nil {
-		return nil, err
+		return err
 	}
 
 	parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
 	if err != nil {
-		return nil, err
+		return err
 	}
 
-	var sigs []signature.Signature
 	for _, sig := range parsedBody.Signatures {
 		if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
-			sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
+			*dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content))
 		}
 	}
-	return sigs, nil
+	return nil
 }
 
-func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
+// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention,
+// storing the signatures to *dest.
+// On error, the contents of *dest are undefined.
+func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
 	if !s.c.useSigstoreAttachments {
 		logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
-		return nil, nil
+		return nil
 	}
 
 	manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
 	if err != nil {
-		return nil, err
+		return err
 	}
 
 	ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
 	if err != nil {
-		return nil, err
+		return err
 	}
 	if ociManifest == nil {
-		return nil, nil
+		return nil
 	}
 
 	logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
-	res := []signature.Signature{}
 	for layerIndex, layer := range ociManifest.Layers {
 		// Note that this copies all kinds of attachments: attestations, and whatever else is there,
 		// not just signatures. We leave the signature consumers to decide based on the MIME type.
@@ -617,11 +649,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
 		payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
 			none.NoCache)
 		if err != nil {
-			return nil, err
+			return err
 		}
-		res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
+		*dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
 	}
-	return res, nil
+	return nil
 }
 
 // deleteImage deletes the named image from the registry, if supported.
@@ -799,7 +831,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
 		handleBufferedNetworkReader(&br)
 	}()
 
-	for i := uint(0); i < nBuffers; i++ {
+	for range nBuffers {
 		b := bufferedNetworkReaderBuffer{
 			data: make([]byte, bufferSize),
 		}
diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go
index 4392f9d18..e749b5014 100644
--- a/vendor/github.com/containers/image/v5/docker/errors.go
+++ b/vendor/github.com/containers/image/v5/docker/errors.go
@@ -12,6 +12,7 @@ import (
 var (
 	// ErrV1NotSupported is returned when we're trying to talk to a
 	// docker V1 registry.
+	// Deprecated: The V1 container registry detection is no longer performed, so this error is never returned.
 	ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
 	// ErrTooManyRequests is returned when the status code returned is 429
 	ErrTooManyRequests = errors.New("too many requests to registry")
diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
index d5446840b..8f5ba7e36 100644
--- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
+++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
@@ -20,22 +20,26 @@ import (
 	"github.com/sirupsen/logrus"
 )
 
-// Destination is a partial implementation of private.ImageDestination for writing to an io.Writer.
+// Destination is a partial implementation of private.ImageDestination for writing to a Writer.
 type Destination struct {
 	impl.Compat
 	impl.PropertyMethodsInitialize
+	stubs.IgnoresOriginalOCIConfig
 	stubs.NoPutBlobPartialInitialize
 	stubs.NoSignaturesInitialize
 
-	archive  *Writer
-	repoTags []reference.NamedTagged
+	archive           *Writer
+	commitWithOptions func(ctx context.Context, options private.CommitOptions) error
+	repoTags          []reference.NamedTagged
 	// Other state.
 	config []byte
 	sysCtx *types.SystemContext
 }
 
 // NewDestination returns a tarfile.Destination adding images to the specified Writer.
-func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged) *Destination {
+// commitWithOptions implements ImageDestination.CommitWithOptions.
+func NewDestination(sys *types.SystemContext, archive *Writer, transportName string, ref reference.NamedTagged,
+	commitWithOptions func(ctx context.Context, options private.CommitOptions) error) *Destination {
 	repoTags := []reference.NamedTagged{}
 	if ref != nil {
 		repoTags = append(repoTags, ref)
@@ -57,9 +61,10 @@ func NewDestination(sys *types.SystemContext, archive *Writer, transportName str
 		NoPutBlobPartialInitialize: stubs.NoPutBlobPartialRaw(transportName),
 		NoSignaturesInitialize:     stubs.NoSignatures("Storing signatures for docker tar files is not supported"),
 
-		archive:  archive,
-		repoTags: repoTags,
-		sysCtx:   sys,
+		archive:           archive,
+		commitWithOptions: commitWithOptions,
+		repoTags:          repoTags,
+		sysCtx:            sys,
 	}
 	dest.Compat = impl.AddCompat(dest)
 	return dest
@@ -179,3 +184,13 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
 
 	return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
 }
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *Destination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+	// This indirection exists because impl.Compat expects all ImageDestinationInternalOnly methods
+	// to be implemented in one place.
+	return d.commitWithOptions(ctx, options)
+}
diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go
index 3619c3bae..89d48cc4f 100644
--- a/vendor/github.com/containers/image/v5/docker/registries_d.go
+++ b/vendor/github.com/containers/image/v5/docker/registries_d.go
@@ -3,6 +3,7 @@ package docker
 import (
 	"errors"
 	"fmt"
+	"io/fs"
 	"net/url"
 	"os"
 	"path"
@@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
 		configPath := filepath.Join(dirPath, configName)
 		configBytes, err := os.ReadFile(configPath)
 		if err != nil {
+			if errors.Is(err, fs.ErrNotExist) {
+				// file must have been removed between the directory listing
+				// and the open call, ignore that as it is a expected race
+				continue
+			}
 			return nil, err
 		}
 
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
index 893aa959d..f31ee3124 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
@@ -27,7 +27,14 @@ func (bic *v1OnlyBlobInfoCache) Open() {
 func (bic *v1OnlyBlobInfoCache) Close() {
 }
 
-func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
+func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
+	return ""
+}
+
+func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) {
 }
 
 func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
index c9e4aaa48..acf82ee63 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
@@ -26,19 +26,40 @@ type BlobInfoCache2 interface {
 	// Close destroys state created by Open().
 	Close()
 
-	// RecordDigestCompressorName records a compressor for the blob with the specified digest,
-	// or Uncompressed or UnknownCompression.
-	// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
-	// digest just because some remote author claims so (e.g. because a manifest says so);
+	// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
+	// Returns "" if the uncompressed digest is unknown.
+	UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest
+	// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
+	// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+	// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+	// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+	RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest)
+
+	// RecordDigestCompressorData records data for the blob with the specified digest.
+	// WARNING: Only call this with LOCALLY VERIFIED data:
+	//  - don’t record a compressor for a digest just because some remote author claims so
+	//    (e.g. because a manifest says so);
+	//  - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
+	//    and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
+	//    in a manifest)
 	// otherwise the cache could be poisoned and cause us to make incorrect edits to type
 	// information in a manifest.
-	RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
+	RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
 	// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
 	// that could possibly be reused within the specified (transport scope) (if they still
 	// exist, which is not guaranteed).
 	CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
 }
 
+// DigestCompressorData is information known about how a blob is compressed.
+// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
+type DigestCompressorData struct {
+	BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression.
+	// The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
+	SpecificVariantCompressor  string            // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
+	SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
+}
+
 // CandidateLocations2Options are used in CandidateLocations2.
 type CandidateLocations2Options struct {
 	// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
@@ -51,9 +72,10 @@ type CandidateLocations2Options struct {
 
 // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
 type BICReplacementCandidate2 struct {
-	Digest               digest.Digest
-	CompressionOperation types.LayerCompression      // Either types.Decompress for uncompressed, or types.Compress for compressed
-	CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
-	UnknownLocation      bool                        // is true when `Location` for this blob is not set
-	Location             types.BICLocationReference  // not set if UnknownLocation is set to `true`
+	Digest                 digest.Digest
+	CompressionOperation   types.LayerCompression      // Either types.Decompress for uncompressed, or types.Compress for compressed
+	CompressionAlgorithm   *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
+	CompressionAnnotations map[string]string           // If necessary, annotations necessary to use CompressionAlgorithm
+	UnknownLocation        bool                        // is true when `Location` for this blob is not set
+	Location               types.BICLocationReference  // not set if UnknownLocation is set to `true`
 }
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
index 47c169a1f..70b207d9b 100644
--- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
@@ -99,3 +99,16 @@ func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanc
 	}
 	return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest)
 }
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+// original manifest list digest, if desired.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+	return c.dest.CommitWithOptions(ctx, private.CommitOptions{
+		UnparsedToplevel: unparsedToplevel,
+	})
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
new file mode 100644
index 000000000..c4536e933
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
@@ -0,0 +1,16 @@
+package stubs
+
+import (
+	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
+type IgnoresOriginalOCIConfig struct{}
+
+// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
+// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
+// The destination can use it in its TryReusingBlob/PutBlob implementations
+// (otherwise it only obtains the final config after all layers are written).
+func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
+	return nil
+}
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
index bbb53c198..22bed4b0f 100644
--- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go
@@ -36,8 +36,9 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
 // PutBlobPartial attempts to create a blob using the data that is already present
 // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
 // It is available only if SupportsPutBlobPartial().
-// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-// should fall back to PutBlobWithOptions.
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
 func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
 	return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
 }
diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
index cdd3c5e5d..b2462a3bc 100644
--- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
+++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
@@ -14,6 +14,7 @@ import (
 // wrapped provides the private.ImageDestination operations
 // for a destination that only implements types.ImageDestination
 type wrapped struct {
+	stubs.IgnoresOriginalOCIConfig
 	stubs.NoPutBlobPartialInitialize
 
 	types.ImageDestination
@@ -76,6 +77,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob
 		Size:                 blob.Size,
 		CompressionOperation: blob.CompressionOperation,
 		CompressionAlgorithm: blob.CompressionAlgorithm,
+		// CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
+		// annotations, and we didn’t use the blob.Annotations field previously, so we’ll
+		// continue not using it.
 	}, nil
 }
 
@@ -94,3 +98,11 @@ func (w *wrapped) PutSignaturesWithFormat(ctx context.Context, signatures []sign
 	}
 	return w.PutSignatures(ctx, simpleSigs, instanceDigest)
 }
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (w *wrapped) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+	return w.Commit(ctx, options.UnparsedToplevel)
+}
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
index f847fa9cc..4c1589ef0 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
@@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
 
 // UpdateInstances updates the sizes, digests, and media types of the manifests
 // which the list catalogs.
-func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
+func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
 	editInstances := []ListEdit{}
 	for i, instance := range updates {
 		editInstances = append(editInstances, ListEdit{
-			UpdateOldDigest: index.Manifests[i].Digest,
+			UpdateOldDigest: list.Manifests[i].Digest,
 			UpdateDigest:    instance.Digest,
 			UpdateSize:      instance.Size,
 			UpdateMediaType: instance.MediaType,
 			ListOperation:   ListOpUpdate})
 	}
-	return index.editInstances(editInstances)
+	return list.editInstances(editInstances)
 }
 
-func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
+func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
 	addedEntries := []Schema2ManifestDescriptor{}
 	for i, editInstance := range editInstances {
 		switch editInstance.ListOperation {
@@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
 			if err := editInstance.UpdateDigest.Validate(); err != nil {
 				return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
 			}
-			targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
+			targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
 				return m.Digest == editInstance.UpdateOldDigest
 			})
 			if targetIndex == -1 {
 				return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
 			}
-			index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
+			list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
 			if editInstance.UpdateSize < 0 {
 				return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
 			}
-			index.Manifests[targetIndex].Size = editInstance.UpdateSize
+			list.Manifests[targetIndex].Size = editInstance.UpdateSize
 			if editInstance.UpdateMediaType == "" {
-				return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
+				return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
 			}
-			index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
+			list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
 		case ListOpAdd:
 			if editInstance.AddPlatform == nil {
 				// Should we create a struct with empty fields instead?
@@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
 	if len(addedEntries) != 0 {
 		// slices.Clone() here to ensure a private backing array;
 		// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
-		index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
+		list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
 	}
 	return nil
 }
 
-func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
-	return index.editInstances(editInstances)
+func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
+	return list.editInstances(editInstances)
 }
 
 func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
@@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont
 // ChooseInstance parses blob as a schema2 manifest list, and returns the digest
 // of the image which is appropriate for the current environment.
 func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
-	wantedPlatforms, err := platform.WantedPlatforms(ctx)
-	if err != nil {
-		return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
-	}
+	wantedPlatforms := platform.WantedPlatforms(ctx)
 	for _, wantedPlatform := range wantedPlatforms {
 		for _, d := range list.Manifests {
 			imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
@@ -283,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
 	return &Schema2List{*public}
 }
 
-func (index *Schema2List) CloneInternal() List {
-	return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
+func (list *Schema2List) CloneInternal() List {
+	return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
 }
 
-func (index *Schema2List) Clone() ListPublic {
-	return index.CloneInternal()
+func (list *Schema2List) Clone() ListPublic {
+	return list.CloneInternal()
 }
 
 // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
index ee0ddc772..3fb52104a 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go
@@ -205,11 +205,6 @@ type ReuseConditions struct {
 // (which can be nil to represent uncompressed or unknown) matches reuseConditions.
 func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
 	if c.RequiredCompression != nil {
-		if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
-			// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
-			// The caller must re-compress to build those annotations.
-			return false
-		}
 		if candidateCompression == nil ||
 			(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
 			return false
diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
index 67b4cfeba..6a0f88d3a 100644
--- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
+++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
@@ -1,6 +1,7 @@
 package manifest
 
 import (
+	"bytes"
 	"encoding/json"
 	"fmt"
 	"maps"
@@ -235,10 +236,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
 	if preferGzip == types.OptionalBoolTrue {
 		didPreferGzip = true
 	}
-	wantedPlatforms, err := platform.WantedPlatforms(ctx)
-	if err != nil {
-		return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
-	}
+	wantedPlatforms := platform.WantedPlatforms(ctx)
 	var bestMatch *instanceCandidate
 	bestMatch = nil
 	for manifestIndex, d := range index.Manifests {
@@ -296,29 +294,51 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
 		},
 	}
 	for i, component := range components {
-		var platform *imgspecv1.Platform
-		if component.Platform != nil {
-			platformCopy := ociPlatformClone(*component.Platform)
-			platform = &platformCopy
-		}
-		m := imgspecv1.Descriptor{
-			MediaType:    component.MediaType,
-			ArtifactType: component.ArtifactType,
-			Size:         component.Size,
-			Digest:       component.Digest,
-			URLs:         slices.Clone(component.URLs),
-			Annotations:  maps.Clone(component.Annotations),
-			Platform:     platform,
-		}
-		index.Manifests[i] = m
+		index.Manifests[i] = oci1DescriptorClone(component)
 	}
 	return &index
 }
 
+func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor {
+	var platform *imgspecv1.Platform
+	if d.Platform != nil {
+		platformCopy := ociPlatformClone(*d.Platform)
+		platform = &platformCopy
+	}
+	return imgspecv1.Descriptor{
+		MediaType:    d.MediaType,
+		Digest:       d.Digest,
+		Size:         d.Size,
+		URLs:         slices.Clone(d.URLs),
+		Annotations:  maps.Clone(d.Annotations),
+		Data:         bytes.Clone(d.Data),
+		Platform:     platform,
+		ArtifactType: d.ArtifactType,
+	}
+}
+
 // OCI1IndexPublicClone creates a deep copy of the passed-in index.
 // This is publicly visible as c/image/manifest.OCI1IndexClone.
 func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
-	return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
+	var subject *imgspecv1.Descriptor
+	if index.Subject != nil {
+		s := oci1DescriptorClone(*index.Subject)
+		subject = &s
+	}
+	manifests := make([]imgspecv1.Descriptor, len(index.Manifests))
+	for i, m := range index.Manifests {
+		manifests[i] = oci1DescriptorClone(m)
+	}
+	return &OCI1IndexPublic{
+		Index: imgspecv1.Index{
+			Versioned:    index.Versioned,
+			MediaType:    index.MediaType,
+			ArtifactType: index.ArtifactType,
+			Manifests:    manifests,
+			Subject:      subject,
+			Annotations:  maps.Clone(index.Annotations),
+		},
+	}
 }
 
 // ToOCI1Index returns the index encoded as an OCI1 index.
diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
index afdce1d3d..3a16dad63 100644
--- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
+++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
@@ -153,7 +153,7 @@ var compatibility = map[string][]string{
 // WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
 // the most compatible platform is first.
 // If some option (arch, os, variant) is not present, a value from current platform is detected.
-func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
+func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform {
 	// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
 	// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
 	// to be interoperable, anyway.
@@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
 			Variant:      v,
 		})
 	}
-	return res, nil
+	return res
 }
 
 // MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go
index 63fb9326d..afd425483 100644
--- a/vendor/github.com/containers/image/v5/internal/private/private.go
+++ b/vendor/github.com/containers/image/v5/internal/private/private.go
@@ -10,6 +10,7 @@ import (
 	compression "github.com/containers/image/v5/pkg/compression/types"
 	"github.com/containers/image/v5/types"
 	"github.com/opencontainers/go-digest"
+	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
 // ImageSourceInternalOnly is the part of private.ImageSource that is not
@@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
 	// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
 	// on unsupported formats.
 
+	// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
+	// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
+	// The destination can use it in its TryReusingBlob/PutBlob implementations
+	// (otherwise it only obtains the final config after all layers are written).
+	NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
+
 	// PutBlobWithOptions writes contents of stream and returns data representing the result.
 	// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
 	// inputInfo.Size is the expected length of stream, if known.
@@ -53,8 +60,9 @@ type ImageDestinationInternalOnly interface {
 	// PutBlobPartial attempts to create a blob using the data that is already present
 	// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
 	// It is available only if SupportsPutBlobPartial().
-	// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-	// should fall back to PutBlobWithOptions.
+	// Even if SupportsPutBlobPartial() returns true, the call can fail.
+	// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+	// The fallback _must not_ be done otherwise.
 	PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
 
 	// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
@@ -69,6 +77,12 @@ type ImageDestinationInternalOnly interface {
 	// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
 	// MUST be called after PutManifest (signatures may reference manifest contents).
 	PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error
+
+	// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+	// WARNING: This does not have any transactional semantics:
+	// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+	// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+	CommitWithOptions(ctx context.Context, options CommitOptions) error
 }
 
 // ImageDestination is an internal extension to the types.ImageDestination
@@ -134,12 +148,30 @@ type ReusedBlob struct {
 	Size   int64         // Must be provided
 	// The following compression fields should be set when the reuse substitutes
 	// a differently-compressed blob.
+	// They may be set also to change from a base variant to a specific variant of an algorithm.
 	CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
 	CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
 
+	// Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
+	// added even if the digest doesn’t change (if we found the annotations in a cache).
+	CompressionAnnotations map[string]string
+
 	MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
 }
 
+// CommitOptions are used in CommitWithOptions
+type CommitOptions struct {
+	// UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
+	// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
+	// original manifest list digest, if desired.
+	UnparsedToplevel types.UnparsedImage
+	// ReportResolvedReference, if set, asks the transport to store a “resolved” (more detailed) reference to the created image
+	// into the value this option points to.
+	// What “resolved” means is transport-specific.
+	// Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value.
+	ReportResolvedReference *types.ImageReference
+}
+
 // ImageSourceChunk is a portion of a blob.
 // This API is experimental and can be changed without bumping the major version number.
 type ImageSourceChunk struct {
@@ -178,3 +210,22 @@ type UnparsedImage interface {
 	// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
 	UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
 }
+
+// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial.
+// It suggests to the caller that a fallback mechanism can be used instead of a hard failure;
+// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob.
+type ErrFallbackToOrdinaryLayerDownload struct {
+	err error
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
+	return c.err.Error()
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
+	return c.err
+}
+
+func NewErrFallbackToOrdinaryLayerDownload(err error) error {
+	return ErrFallbackToOrdinaryLayerDownload{err: err}
+}
diff --git a/vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go b/vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
new file mode 100644
index 000000000..2cfd97bda
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
@@ -0,0 +1,22 @@
+//go:build linux
+
+package reflink
+
+import (
+	"io"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// LinkOrCopy attempts to reflink the source to the destination fd.
+// If reflinking fails or is unsupported, it falls back to io.Copy().
+func LinkOrCopy(src, dst *os.File) error {
+	_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
+	if errno == 0 {
+		return nil
+	}
+
+	_, err := io.Copy(dst, src)
+	return err
+}
diff --git a/vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go b/vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
new file mode 100644
index 000000000..8ba11db84
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
@@ -0,0 +1,15 @@
+//go:build !linux
+
+package reflink
+
+import (
+	"io"
+	"os"
+)
+
+// LinkOrCopy attempts to reflink the source to the destination fd.
+// If reflinking fails or is unsupported, it falls back to io.Copy().
+func LinkOrCopy(src, dst *os.File) error {
+	_, err := io.Copy(dst, src)
+	return err
+}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
index 222aa896e..b74a1e240 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go
@@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
 	// Add the history and rootfs information.
 	rootfs, err := json.Marshal(rootFS)
 	if err != nil {
-		return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+		return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err)
 	}
 	rawRootfs := json.RawMessage(rootfs)
 	raw["rootfs"] = &rawRootfs
 	history, err := json.Marshal(convertedHistory)
 	if err != nil {
-		return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+		return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err)
 	}
 	rawHistory := json.RawMessage(history)
 	raw["history"] = &rawHistory
 	// Encode the result.
 	config, err = json.Marshal(raw)
 	if err != nil {
-		return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
+		return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err)
 	}
 	return config, nil
 }
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 818166834..7e53f4f54 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -202,7 +202,7 @@ func (m *Schema2) ConfigInfo() types.BlobInfo {
 // The Digest field is guaranteed to be provided; Size may be -1.
 // WARNING: The list may contain duplicates, and they are semantically relevant.
 func (m *Schema2) LayerInfos() []LayerInfo {
-	blobs := []LayerInfo{}
+	blobs := make([]LayerInfo, 0, len(m.LayersDescriptors))
 	for _, layer := range m.LayersDescriptors {
 		blobs = append(blobs, LayerInfo{
 			BlobInfo:   BlobInfoFromSchema2Descriptor(layer),
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 497cf476e..0faa866b7 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
 	if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
 		return nil, err
 	}
-	if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
+	if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest,
 		manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
 		return nil, err
 	}
@@ -95,7 +95,7 @@ func (m *OCI1) ConfigInfo() types.BlobInfo {
 // The Digest field is guaranteed to be provided; Size may be -1.
 // WARNING: The list may contain duplicates, and they are semantically relevant.
 func (m *OCI1) LayerInfos() []LayerInfo {
-	blobs := []LayerInfo{}
+	blobs := make([]LayerInfo, 0, len(m.Layers))
 	for _, layer := range m.Layers {
 		blobs = append(blobs, LayerInfo{
 			BlobInfo:   BlobInfoFromOCI1Descriptor(layer),
diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
index a3eb5d7a1..54b2e5056 100644
--- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
@@ -14,6 +14,7 @@ import (
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/idtools"
 	digest "github.com/opencontainers/go-digest"
+	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/sirupsen/logrus"
 )
 
@@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
 	return d.unpackedDest.SupportsPutBlobPartial()
 }
 
+// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
+// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
+// The destination can use it in its TryReusingBlob/PutBlob implementations
+// (otherwise it only obtains the final config after all layers are written).
+func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
+	return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
+}
+
 // PutBlobWithOptions writes contents of stream and returns data representing the result.
 // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
 // inputInfo.Size is the expected length of stream, if known.
@@ -117,8 +126,9 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
 // PutBlobPartial attempts to create a blob using the data that is already present
 // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
 // It is available only if SupportsPutBlobPartial().
-// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-// should fall back to PutBlobWithOptions.
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
 func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
 	return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
 }
@@ -149,13 +159,12 @@ func (d *ociArchiveImageDestination) PutSignaturesWithFormat(ctx context.Context
 	return d.unpackedDest.PutSignaturesWithFormat(ctx, signatures, instanceDigest)
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
-// after the directory is made, it is tarred up into a file and the directory is deleted
-func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
-	if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil {
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+	if err := d.unpackedDest.CommitWithOptions(ctx, options); err != nil {
 		return fmt.Errorf("storing image %q: %w", d.ref.image, err)
 	}
 
diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
index 53827b11a..c4eaed0ee 100644
--- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
+++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go
@@ -6,6 +6,7 @@ import (
 	"path/filepath"
 	"regexp"
 	"runtime"
+	"strconv"
 	"strings"
 )
 
@@ -98,7 +99,7 @@ func ValidateScope(scope string) error {
 }
 
 func validateScopeWindows(scope string) error {
-	matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
+	matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope)
 	if !matched {
 		return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
 	}
@@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error {
 
 	return nil
 }
+
+// parseOCIReferenceName parses the image from the oci reference.
+func parseOCIReferenceName(image string) (img string, index int, err error) {
+	index = -1
+	if strings.HasPrefix(image, "@") {
+		idx, err := strconv.Atoi(image[1:])
+		if err != nil {
+			return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err)
+		}
+		if idx < 0 {
+			return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx)
+		}
+		index = idx
+	} else {
+		img = image
+	}
+	return img, index, nil
+}
+
+// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists
+func ParseReferenceIntoElements(reference string) (string, string, int, error) {
+	dir, image := SplitPathAndImage(reference)
+	image, index, err := parseOCIReferenceName(image)
+	if err != nil {
+		return "", "", -1, err
+	}
+	return dir, image, index, nil
+}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
index bcf257df6..08366a7e2 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
@@ -27,17 +27,8 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex
 		return err
 	}
 
-	var blobsUsedByImage map[digest.Digest]int
-
-	switch descriptor.MediaType {
-	case imgspecv1.MediaTypeImageManifest:
-		blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
-	case imgspecv1.MediaTypeImageIndex:
-		blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
-	default:
-		return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
-	}
-	if err != nil {
+	blobsUsedByImage := make(map[digest.Digest]int)
+	if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil {
 		return err
 	}
 
@@ -54,82 +45,48 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex
 	return ref.deleteReferenceFromIndex(descriptorIndex)
 }
 
-func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
-	manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
-	if err != nil {
-		return nil, err
-	}
-	blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
-	blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
-
-	return blobsUsedInManifest, nil
-}
-
-func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
+// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself.
+func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error {
 	blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
 	if err != nil {
-		return nil, err
-	}
-	index, err := parseIndex(blobPath)
-	if err != nil {
-		return nil, err
+		return err
 	}
 
-	blobsUsedInImageRefIndex := make(map[digest.Digest]int)
-	err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
-	if err != nil {
-		return nil, err
+	dest[descriptor.Digest]++
+	switch descriptor.MediaType {
+	case imgspecv1.MediaTypeImageManifest:
+		manifest, err := parseJSON[imgspecv1.Manifest](blobPath)
+		if err != nil {
+			return err
+		}
+		dest[manifest.Config.Digest]++
+		for _, layer := range manifest.Layers {
+			dest[layer.Digest]++
+		}
+	case imgspecv1.MediaTypeImageIndex:
+		index, err := parseIndex(blobPath)
+		if err != nil {
+			return err
+		}
+		if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
 	}
-	blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
-
-	return blobsUsedInImageRefIndex, nil
+	return nil
 }
 
-// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
-func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
+// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself.
+func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
 	for _, descriptor := range index.Manifests {
-		destination[descriptor.Digest]++
-		switch descriptor.MediaType {
-		case imgspecv1.MediaTypeImageManifest:
-			manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
-			if err != nil {
-				return err
-			}
-			for digest, count := range ref.getBlobsUsedInManifest(manifest) {
-				destination[digest] += count
-			}
-		case imgspecv1.MediaTypeImageIndex:
-			blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
-			if err != nil {
-				return err
-			}
-			index, err := parseIndex(blobPath)
-			if err != nil {
-				return err
-			}
-			err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
-			if err != nil {
-				return err
-			}
-		default:
-			return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
+		if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil {
+			return err
 		}
 	}
-
 	return nil
 }
 
-func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
-	blobsUsedInManifest := make(map[digest.Digest]int, 0)
-
-	blobsUsedInManifest[manifest.Config.Digest]++
-	for _, layer := range manifest.Layers {
-		blobsUsedInManifest[layer.Digest]++
-	}
-
-	return blobsUsedInManifest
-}
-
 // This takes in a map of the digest and their usage count in the manifest to be deleted
 // It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
 func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
@@ -138,7 +95,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges
 		return nil, err
 	}
 	blobsUsedInRootIndex := make(map[digest.Digest]int)
-	err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
+	err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
 	if err != nil {
 		return nil, err
 	}
@@ -224,17 +181,3 @@ func saveJSON(path string, content any) error {
 
 	return json.NewEncoder(file).Encode(content)
 }
-
-func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
-	manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
-	if err != nil {
-		return nil, err
-	}
-
-	manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
-	if err != nil {
-		return nil, err
-	}
-
-	return manifest, nil
-}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
index a096afe0f..b87cef4f7 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
@@ -17,6 +17,7 @@ import (
 	"github.com/containers/image/v5/internal/manifest"
 	"github.com/containers/image/v5/internal/private"
 	"github.com/containers/image/v5/internal/putblobdigest"
+	"github.com/containers/image/v5/internal/reflink"
 	"github.com/containers/image/v5/types"
 	"github.com/containers/storage/pkg/fileutils"
 	digest "github.com/opencontainers/go-digest"
@@ -27,6 +28,7 @@ import (
 type ociImageDestination struct {
 	impl.Compat
 	impl.PropertyMethodsInitialize
+	stubs.IgnoresOriginalOCIConfig
 	stubs.NoPutBlobPartialInitialize
 	stubs.NoSignaturesInitialize
 
@@ -37,6 +39,9 @@ type ociImageDestination struct {
 
 // newImageDestination returns an ImageDestination for writing to an existing directory.
 func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
+	if ref.sourceIndex != -1 {
+		return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
+	}
 	var index *imgspecv1.Index
 	if indexExists(ref) {
 		var err error
@@ -137,9 +142,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
 	if inputInfo.Size != -1 && size != inputInfo.Size {
 		return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
 	}
-	if err := blobFile.Sync(); err != nil {
+
+	if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil {
 		return private.UploadedBlob{}, err
 	}
+	succeeded = true
+	return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
+}
+
+// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the
+// specific blob path determined by the blobDigest. The closed pointer indicates to the caller
+// whether blobFile has been closed or not.
+func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error {
+	if err := blobFile.Sync(); err != nil {
+		return err
+	}
 
 	// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
 	// On Windows, the “permissions of newly created files” argument to syscall.Open is
@@ -147,26 +164,27 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
 	// always fails on Windows.
 	if runtime.GOOS != "windows" {
 		if err := blobFile.Chmod(0644); err != nil {
-			return private.UploadedBlob{}, err
+			return err
 		}
 	}
 
 	blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
 	if err != nil {
-		return private.UploadedBlob{}, err
+		return err
 	}
 	if err := ensureParentDirectoryExists(blobPath); err != nil {
-		return private.UploadedBlob{}, err
+		return err
 	}
 
-	// need to explicitly close the file, since a rename won't otherwise not work on Windows
+	// need to explicitly close the file, since a rename won't otherwise work on Windows
 	blobFile.Close()
-	explicitClosed = true
+	*closed = true
+
 	if err := os.Rename(blobFile.Name(), blobPath); err != nil {
-		return private.UploadedBlob{}, err
+		return err
 	}
-	succeeded = true
-	return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
+
+	return nil
 }
 
 // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
@@ -278,14 +296,11 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
 	d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc)
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{
 		Version: imgspecv1.ImageLayoutVersion,
 	})
@@ -302,6 +317,67 @@ func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error
 	return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
 }
 
+// PutBlobFromLocalFileOption is unused but may receive functionality in the future.
+type PutBlobFromLocalFileOption struct{}
+
+// PutBlobFromLocalFile arranges the data from path to be used as blob with digest.
+// It computes, and returns, the digest and size of the used file.
+//
+// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
+func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
+	d, ok := dest.(*ociImageDestination)
+	if !ok {
+		return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
+	}
+
+	succeeded := false
+	blobFileClosed := false
+	blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
+	if err != nil {
+		return "", -1, err
+	}
+	defer func() {
+		if !blobFileClosed {
+			blobFile.Close()
+		}
+		if !succeeded {
+			os.Remove(blobFile.Name())
+		}
+	}()
+
+	srcFile, err := os.Open(file)
+	if err != nil {
+		return "", -1, err
+	}
+	defer srcFile.Close()
+
+	err = reflink.LinkOrCopy(srcFile, blobFile)
+	if err != nil {
+		return "", -1, err
+	}
+
+	_, err = blobFile.Seek(0, io.SeekStart)
+	if err != nil {
+		return "", -1, err
+	}
+	blobDigest, err := digest.FromReader(blobFile)
+	if err != nil {
+		return "", -1, err
+	}
+
+	fileInfo, err := blobFile.Stat()
+	if err != nil {
+		return "", -1, err
+	}
+
+	if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil {
+		return "", -1, err
+	}
+
+	succeeded = true
+	return blobDigest, fileInfo.Size(), nil
+}
+
 func ensureDirectoryExists(path string) error {
 	if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
 		if err := os.MkdirAll(path, 0755); err != nil {
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index 816dfa7a1..6ab384676 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -61,22 +61,31 @@ type ociReference struct {
 	// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
 	dir         string // As specified by the user. May be relative, contain symlinks, etc.
 	resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
-	// If image=="", it means the "only image" in the index.json is used in the case it is a source
-	// for destinations, the image name annotation "image.ref.name" is not added to the index.json
+	// If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source
+	// for destinations, the image name annotation "image.ref.name" is not added to the index.json.
+	//
+	// Must not be set if sourceIndex is set (the value is not -1).
 	image string
+	// If not -1, a zero-based index of an image in the manifest index. Valid only for sources.
+	// Must not be set if image is set.
+	sourceIndex int
 }
 
 // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
 func ParseReference(reference string) (types.ImageReference, error) {
-	dir, image := internal.SplitPathAndImage(reference)
-	return NewReference(dir, image)
+	dir, image, index, err := internal.ParseReferenceIntoElements(reference)
+	if err != nil {
+		return nil, err
+	}
+	return newReference(dir, image, index)
 }
 
-// NewReference returns an OCI reference for a directory and a image.
+// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex.
 //
+// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used.
 // We do not expose an API supplying the resolvedDir; we could, but recomputing it
 // is generally cheap enough that we prefer being confident about the properties of resolvedDir.
-func NewReference(dir, image string) (types.ImageReference, error) {
+func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) {
 	resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
 	if err != nil {
 		return nil, err
@@ -90,7 +99,26 @@ func NewReference(dir, image string) (types.ImageReference, error) {
 		return nil, err
 	}
 
-	return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
+	if sourceIndex != -1 && sourceIndex < 0 {
+		return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex)
+	}
+	if sourceIndex != -1 && image != "" {
+		return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex)
+	}
+	return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil
+}
+
+// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
+func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
+	return newReference(dir, "", sourceIndex)
+}
+
+// NewReference returns an OCI reference for a directory and a image.
+//
+// We do not expose an API supplying the resolvedDir; we could, but recomputing it
+// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
+func NewReference(dir, image string) (types.ImageReference, error) {
+	return newReference(dir, image, -1)
 }
 
 func (ref ociReference) Transport() types.ImageTransport {
@@ -103,7 +131,10 @@ func (ref ociReference) Transport() types.ImageTransport {
 // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
 // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
 func (ref ociReference) StringWithinTransport() string {
-	return fmt.Sprintf("%s:%s", ref.dir, ref.image)
+	if ref.sourceIndex == -1 {
+		return fmt.Sprintf("%s:%s", ref.dir, ref.image)
+	}
+	return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex)
 }
 
 // DockerReference returns a Docker reference associated with this reference
@@ -187,14 +218,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
 		return imgspecv1.Descriptor{}, -1, err
 	}
 
-	if ref.image == "" {
-		// return manifest if only one image is in the oci directory
-		if len(index.Manifests) != 1 {
-			// ask user to choose image when more than one image in the oci directory
-			return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
+	switch {
+	case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references.
+		return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
+			ref.image, ref.sourceIndex)
+
+	case ref.sourceIndex != -1:
+		if ref.sourceIndex >= len(index.Manifests) {
+			return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests))
 		}
-		return index.Manifests[0], 0, nil
-	} else {
+		return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil
+
+	case ref.image != "":
 		// if image specified, look through all manifests for a match
 		var unsupportedMIMETypes []string
 		for i, md := range index.Manifests {
@@ -208,8 +243,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
 		if len(unsupportedMIMETypes) != 0 {
 			return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
 		}
+		return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
+
+	default:
+		// return manifest if only one image is in the oci directory
+		if len(index.Manifests) != 1 {
+			// ask user to choose image when more than one image in the oci directory
+			return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
+		}
+		return index.Manifests[0], 0, nil
 	}
-	return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
 }
 
 // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
diff --git a/vendor/github.com/containers/image/v5/oci/layout/reader.go b/vendor/github.com/containers/image/v5/oci/layout/reader.go
new file mode 100644
index 000000000..112db2d70
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/oci/layout/reader.go
@@ -0,0 +1,52 @@
+package layout
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/containers/image/v5/types"
+	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// This file is named reader.go for consistency with other transports’
+// handling of “image containers”, but we don’t actually need a stateful reader object.
+
+// ListResult wraps the image reference and the manifest for loading
+type ListResult struct {
+	Reference          types.ImageReference
+	ManifestDescriptor imgspecv1.Descriptor
+}
+
+// List returns a slice of manifests included in the archive
+func List(dir string) ([]ListResult, error) {
+	var res []ListResult
+
+	indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile))
+	if err != nil {
+		return nil, err
+	}
+	var index imgspecv1.Index
+	if err := json.Unmarshal(indexJSON, &index); err != nil {
+		return nil, err
+	}
+
+	for manifestIndex, md := range index.Manifests {
+		refName := md.Annotations[imgspecv1.AnnotationRefName]
+		index := -1
+		if refName == "" {
+			index = manifestIndex
+		}
+		ref, err := newReference(dir, refName, index)
+		if err != nil {
+			return nil, fmt.Errorf("error creating image reference: %w", err)
+		}
+		reference := ListResult{
+			Reference:          ref,
+			ManifestDescriptor: md,
+		}
+		res = append(res, reference)
+	}
+	return res, nil
+}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
index fff586bee..cef3dcccf 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go
@@ -365,7 +365,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
 	if len(clusterInfo.CertificateAuthority) != 0 {
 		err := validateFileIsReadable(clusterInfo.CertificateAuthority)
 		if err != nil {
-			validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+			validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err))
 		}
 	}
 
@@ -403,13 +403,13 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
 		if len(authInfo.ClientCertificate) != 0 {
 			err := validateFileIsReadable(authInfo.ClientCertificate)
 			if err != nil {
-				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err))
 			}
 		}
 		if len(authInfo.ClientKey) != 0 {
 			err := validateFileIsReadable(authInfo.ClientKey)
 			if err != nil {
-				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err))
 			}
 		}
 	}
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
index 4170d6e20..bd5e77aa8 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go
@@ -22,6 +22,7 @@ import (
 	"github.com/containers/image/v5/manifest"
 	"github.com/containers/image/v5/types"
 	"github.com/opencontainers/go-digest"
+	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
 )
 
 type openshiftImageDestination struct {
@@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
 	return d.docker.SupportsPutBlobPartial()
 }
 
+// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
+// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
+// The destination can use it in its TryReusingBlob/PutBlob implementations
+// (otherwise it only obtains the final config after all layers are written).
+func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
+	return d.docker.NoteOriginalOCIConfig(ociConfig, configErr)
+}
+
 // PutBlobWithOptions writes contents of stream and returns data representing the result.
 // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
 // inputInfo.Size is the expected length of stream, if known.
@@ -125,8 +134,9 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
 // PutBlobPartial attempts to create a blob using the data that is already present
 // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
 // It is available only if SupportsPutBlobPartial().
-// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-// should fall back to PutBlobWithOptions.
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
 func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
 	return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
 }
@@ -235,13 +245,10 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context,
 	return nil
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
-	return d.docker.Commit(ctx, unparsedToplevel)
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *openshiftImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
+	return d.docker.CommitWithOptions(ctx, options)
 }
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
index 951b5d098..d4ebe413b 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go
@@ -435,7 +435,11 @@ func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, si
 	return nil
 }
 
-func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error {
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (d *ostreeImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	runtime.LockOSThread()
 	defer runtime.UnlockOSThread()
 
diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
index 85a89f253..0b597ce26 100644
--- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go
@@ -151,9 +151,9 @@ func openRepo(path string) (*C.struct_OstreeRepo, error) {
 	var cerr *C.GError
 	cpath := C.CString(path)
 	defer C.free(unsafe.Pointer(cpath))
-	pathc := C.g_file_new_for_path(cpath)
-	defer C.g_object_unref(C.gpointer(pathc))
-	repo := C.ostree_repo_new(pathc)
+	file := C.g_file_new_for_path(cpath)
+	defer C.g_object_unref(C.gpointer(file))
+	repo := C.ostree_repo_new(file)
 	r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
 	if !r {
 		C.g_object_unref(C.gpointer(repo))
diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
index 4b7122f92..9a2219e79 100644
--- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
+++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
@@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
 func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
 }
 
+// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
+// Returns "" if the uncompressed digest is unknown.
+func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
+	return ""
+}
+
+// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
+}
+
 // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
 // and can be reused given the opaque location data.
 func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index b83a257e4..782c86d06 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -99,8 +99,18 @@ func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser,
 	return internal.AlgorithmCompressor(algo)(dest, m, level)
 }
 
-// CompressStreamWithMetadata returns the compressor by its name.  If the compression
-// generates any metadata, it is written to the provided metadata map.
+// CompressStreamWithMetadata returns the compressor by its name.
+//
+// Compressing a stream may create integrity data that allows consuming the compressed byte stream
+// while only using subsets of the compressed data (if the compressed data is seekable and most
+// of the uncompressed data is already present via other means), while still protecting integrity
+// of the compressed stream against unwanted modification. (In OCI container images, this metadata
+// is usually carried in manifest annotations.)
+//
+// Such a partial decompression is not implemented by this package; it is consumed e.g. by
+// github.com/containers/storage/pkg/chunked .
+//
+// If the compression generates such metadata, it is written to the provided metadata map.
 func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) {
 	return internal.AlgorithmCompressor(algo)(dest, metadata, level)
 }
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
index d6f85274d..e715705b4 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
@@ -3,6 +3,15 @@ package internal
 import "io"
 
 // CompressorFunc writes the compressed stream to the given writer using the specified compression level.
+//
+// Compressing a stream may create integrity data that allows consuming the compressed byte stream
+// while only using subsets of the compressed data (if the compressed data is seekable and most
+// of the uncompressed data is already present via other means), while still protecting integrity
+// of the compressed stream against unwanted modification. (In OCI container images, this metadata
+// is usually carried in manifest annotations.)
+//
+// If the compression generates such metadata, it is written to the provided metadata map.
+//
 // The caller must call Close() on the stream (even if the input stream does not need closing!).
 type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
 
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index 45427a350..9ac050512 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -1,6 +1,7 @@
 package sysregistriesv2
 
 import (
+	"errors"
 	"fmt"
 	"io/fs"
 	"os"
@@ -248,6 +249,11 @@ type V2RegistriesConf struct {
 	// potentially use all unqualified-search registries
 	ShortNameMode string `toml:"short-name-mode"`
 
+	// AdditionalLayerStoreAuthHelper is a helper binary that receives
+	// registry credentials pass them to Additional Layer Store for
+	// registry authentication. These credentials are only collected when pulling (not pushing).
+	AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"`
+
 	shortNameAliasConf
 
 	// If you add any field, make sure to update Nonempty() below.
@@ -739,6 +745,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
 		// Enforce v2 format for drop-in-configs.
 		dropIn, err := loadConfigFile(path, true)
 		if err != nil {
+			if errors.Is(err, fs.ErrNotExist) {
+				// file must have been removed between the directory listing
+				// and the open call, ignore that as it is a expected race
+				continue
+			}
 			return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
 		}
 		config.updateWithConfigurationFrom(dropIn)
@@ -825,6 +836,16 @@ func CredentialHelpers(sys *types.SystemContext) ([]string, error) {
 	return config.partialV2.CredentialHelpers, nil
 }
 
+// AdditionalLayerStoreAuthHelper returns the helper for passing registry
+// credentials to Additional Layer Store.
+func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) {
+	config, err := getConfig(sys)
+	if err != nil {
+		return "", err
+	}
+	return config.partialV2.AdditionalLayerStoreAuthHelper, nil
+}
+
 // refMatchingSubdomainPrefix returns the length of ref
 // iff ref, which is a registry, repository namespace, repository or image reference (as formatted by
 // reference.Domain(), reference.Named.Name() or reference.Reference.String()
@@ -1051,6 +1072,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
 		c.shortNameMode = updates.shortNameMode
 	}
 
+	// == Merge AdditionalLayerStoreAuthHelper:
+	if updates.partialV2.AdditionalLayerStoreAuthHelper != "" {
+		c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper
+	}
+
 	// == Merge aliasCache:
 	// We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf.
 	c.aliasCache.updateWithConfigurationFrom(updates.aliasCache)
diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
index f6c0576e0..4e0ee57e9 100644
--- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
+++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
@@ -3,6 +3,7 @@ package tlsclientconfig
 import (
 	"crypto/tls"
 	"crypto/x509"
+	"errors"
 	"fmt"
 	"net"
 	"net/http"
@@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
 			logrus.Debugf(" crt: %s", fullPath)
 			data, err := os.ReadFile(fullPath)
 			if err != nil {
-				if os.IsNotExist(err) {
-					// Dangling symbolic link?
-					// Race with someone who deleted the
-					// file after we read the directory's
-					// list of contents?
-					logrus.Warnf("error reading certificate %q: %v", fullPath, err)
+				if errors.Is(err, os.ErrNotExist) {
+					// file must have been removed between the directory listing
+					// and the open call, ignore that as it is a expected race
 					continue
 				}
 				return err
diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go
index a0b347410..0af4523ff 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_dest.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go
@@ -17,11 +17,13 @@ import (
 	"sync/atomic"
 
 	"github.com/containers/image/v5/docker/reference"
+	"github.com/containers/image/v5/internal/image"
 	"github.com/containers/image/v5/internal/imagedestination/impl"
 	"github.com/containers/image/v5/internal/imagedestination/stubs"
+	srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
+	srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
 	"github.com/containers/image/v5/internal/private"
 	"github.com/containers/image/v5/internal/putblobdigest"
-	"github.com/containers/image/v5/internal/set"
 	"github.com/containers/image/v5/internal/signature"
 	"github.com/containers/image/v5/internal/tmpdir"
 	"github.com/containers/image/v5/manifest"
@@ -31,6 +33,7 @@ import (
 	graphdriver "github.com/containers/storage/drivers"
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/chunked"
+	"github.com/containers/storage/pkg/chunked/toc"
 	"github.com/containers/storage/pkg/ioutils"
 	digest "github.com/opencontainers/go-digest"
 	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -57,8 +60,9 @@ type storageImageDestination struct {
 	imageRef              storageReference
 	directory             string                   // Temporary directory where we store blobs until Commit() time
 	nextTempFileID        atomic.Int32             // A counter that we use for computing filenames to assign to blobs
-	manifest              []byte                   // Manifest contents, temporary
-	manifestDigest        digest.Digest            // Valid if len(manifest) != 0
+	manifest              []byte                   // (Per-instance) manifest contents, or nil if not yet known.
+	manifestMIMEType      string                   // Valid if manifest != nil
+	manifestDigest        digest.Digest            // Valid if manifest != nil
 	untrustedDiffIDValues []digest.Digest          // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
 	signatures            []byte                   // Signature contents, temporary
 	signatureses          map[digest.Digest][]byte // Instance signature contents, temporary
@@ -84,18 +88,38 @@ type storageImageDestinationLockProtected struct {
 	currentIndex          int                    // The index of the layer to be committed (i.e., lower indices have already been committed)
 	indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
 
-	// In general, a layer is identified either by (compressed) digest, or by TOC digest.
+	// Externally, a layer is identified either by (compressed) digest, or by TOC digest
+	// (and we assume the TOC digest also uniquely identifies the contents, i.e. there aren’t two
+	// different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffID”) or a TOC digest.
+	// We may or may not know the relationships between these three values.
+	//
 	// When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
 	// we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
-	// recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.)
-
-	// Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called.
-	// The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted.
-	blobDiffIDs      map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
-	indexToTOCDigest map[int]digest.Digest           // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
+	// recompute those values for incoming layers — the point of the reuse is that we don’t need to consume the incoming layer.)
+	//
+	// Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available
+	// before commitLayer is called.
+	// The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted).
+	//
+	// WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle.
+	// The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently.
+	// But it is possible for a layer’s indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs
+	// for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some
+	// other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer
+	// to mismatching contents.
+	// Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic,
+	// instead of interpreting these fields, especially blobDiffIDs, directly.
+	//
+	// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
+	// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
+	indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
+	// Mapping from layer index to a TOC Digest.
+	// If this is set, then either c/storage/pkg/chunked/toc.GetTOCDigest must have returned a value, or indexToDiffID must be set as well.
+	indexToTOCDigest map[int]digest.Digest
+	blobDiffIDs      map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
 
 	// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
-	// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
+	// should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
 	// They are looked up in the order they are mentioned above.
 	diffOutputs            map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
 	indexToAdditionalLayer map[int]storage.AdditionalLayer             // Mapping from layer index to their corresponding additional layer
@@ -103,6 +127,9 @@ type storageImageDestinationLockProtected struct {
 	filenames map[digest.Digest]string
 	// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
 	fileSizes map[digest.Digest]int64
+
+	// Config
+	configDigest digest.Digest // "" if N/A or not known yet.
 }
 
 // addedLayerInfo records data about a layer to use in this image.
@@ -145,9 +172,12 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
 		},
 		indexToStorageID: make(map[int]string),
 		lockProtected: storageImageDestinationLockProtected{
-			indexToAddedLayerInfo:  make(map[int]addedLayerInfo),
-			blobDiffIDs:            make(map[digest.Digest]digest.Digest),
-			indexToTOCDigest:       make(map[int]digest.Digest),
+			indexToAddedLayerInfo: make(map[int]addedLayerInfo),
+
+			indexToDiffID:    make(map[int]digest.Digest),
+			indexToTOCDigest: make(map[int]digest.Digest),
+			blobDiffIDs:      make(map[digest.Digest]digest.Digest),
+
 			diffOutputs:            make(map[int]*graphdriver.DriverWithDifferOutput),
 			indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
 			filenames:              make(map[digest.Digest]string),
@@ -180,6 +210,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
 	return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
 }
 
+// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
+// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
+// The destination can use it in its TryReusingBlob/PutBlob implementations
+// (otherwise it only obtains the final config after all layers are written).
+func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
+	if configErr != nil {
+		return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
+	}
+	s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
+	return nil
+}
+
 // PutBlobWithOptions writes contents of stream and returns data representing the result.
 // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
 // inputInfo.Size is the expected length of stream, if known.
@@ -193,7 +235,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
 		return info, err
 	}
 
-	if options.IsConfig || options.LayerIndex == nil {
+	if options.IsConfig {
+		s.lock.Lock()
+		defer s.lock.Unlock()
+		if s.lockProtected.configDigest != "" {
+			return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
+				s.lockProtected.configDigest.String(), info.Digest.String())
+		}
+		s.lockProtected.configDigest = info.Digest
+		return info, nil
+	}
+	if options.LayerIndex == nil {
 		return info, nil
 	}
 
@@ -290,23 +342,81 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
 // PutBlobPartial attempts to create a blob using the data that is already present
 // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
 // It is available only if SupportsPutBlobPartial().
-// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
-// should fall back to PutBlobWithOptions.
-func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
+// Even if SupportsPutBlobPartial() returns true, the call can fail.
+// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
+// The fallback _must not_ be done otherwise.
+func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
+	inputTOCDigest, err := toc.GetTOCDigest(srcInfo.Annotations)
+	if err != nil {
+		return private.UploadedBlob{}, err
+	}
+
+	// The identity of partially-pulled layers is, as long as we keep compatibility with tar-like consumers,
+	// unfixably ambiguous: there are two possible “views” of the same file (same compressed digest),
+	// the traditional “view” that decompresses the primary stream and consumes a tar file,
+	// and the partial-pull “view” that starts with the TOC.
+	// The two “views” have two separate metadata sets and may refer to different parts of the blob for file contents;
+	// the direct way to ensure they are consistent would be to read the full primary stream (and authenticate it against
+	// the compressed digest), and ensure the metadata and layer contents exactly match the partially-pulled contents -
+	// making the partial pull completely pointless.
+	//
+	// Instead, for partial-pull-capable layers (with inputTOCDigest set), we require the image to “commit”
+	// to uncompressed layer digest values via the config's RootFS.DiffIDs array:
+	// they are already naturally computed for traditionally-pulled layers, and for partially-pulled layers we
+	// do the optimal partial pull, and then reconstruct the uncompressed tar stream just to (expensively) compute this digest.
+	//
+	// Layers which don’t support partial pulls (inputTOCDigest == "", incl. all schema1 layers) can be let through:
+	// the partial pull code will either not engage, or consume the full layer; and the rules of indexToTOCDigest / layerIdentifiedByTOC
+	// ensure the layer is identified by DiffID, i.e. using the traditional “view”.
+	//
+	// But if inputTOCDigest is set and the input image doesn't have RootFS.DiffIDs (the config is invalid for schema2/OCI),
+	// don't allow a partial pull, and fall back to PutBlobWithOptions.
+	//
+	// (The user can opt out of the DiffID commitment checking by a c/storage option, giving up security for performance,
+	// but we will still trigger the fall back here, and we will still enforce a DiffID match, so that the set of accepted images
+	// is the same in both cases, and so that users are not tempted to set the c/storage option to allow accepting some invalid images.)
+	var untrustedDiffID digest.Digest // "" if unknown
+	udid, err := s.untrustedLayerDiffID(options.LayerIndex)
+	if err != nil {
+		var diffIDUnknownErr untrustedLayerDiffIDUnknownError
+		switch {
+		case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
+			// PutBlobPartial is a private API, so all callers are within c/image, and should have called
+			// NoteOriginalOCIConfig first.
+			return private.UploadedBlob{}, fmt.Errorf("internal error: in PutBlobPartial, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
+		case errors.As(err, &diffIDUnknownErr):
+			if inputTOCDigest != nil {
+				return private.UploadedBlob{}, private.NewErrFallbackToOrdinaryLayerDownload(err)
+			}
+			untrustedDiffID = "" // A schema1 image or a non-TOC layer with no ambiguity, let it through
+		default:
+			return private.UploadedBlob{}, err
+		}
+	} else {
+		untrustedDiffID = udid
+	}
+
 	fetcher := zstdFetcher{
 		chunkAccessor: chunkAccessor,
 		ctx:           ctx,
 		blobInfo:      srcInfo,
 	}
 
+	defer func() {
+		var perr chunked.ErrFallbackToOrdinaryLayerDownload
+		if errors.As(retErr, &perr) {
+			retErr = private.NewErrFallbackToOrdinaryLayerDownload(retErr)
+		}
+	}()
+
 	differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
 	if err != nil {
 		return private.UploadedBlob{}, err
 	}
 
-	out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
+	out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
 	if err != nil {
-		return private.UploadedBlob{}, err
+		return private.UploadedBlob{}, fmt.Errorf("staging a partially-pulled layer: %w", err)
 	}
 	succeeded := false
 	defer func() {
@@ -316,25 +426,61 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
 	}()
 
 	if out.TOCDigest == "" && out.UncompressedDigest == "" {
-		return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
+		return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set")
 	}
 
 	blobDigest := srcInfo.Digest
 
 	s.lock.Lock()
-	if out.UncompressedDigest != "" {
-		// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
-		// responsible for ensuring blobDigest has been validated.
-		s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
-	} else {
-		// Don’t identify layers by TOC if UncompressedDigest is available.
-		// - Using UncompressedDigest allows image reuse with non-partially-pulled layers
-		// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
-		//   That TOC is quite unlikely to match with any other TOC value.
-		s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
+	if err := func() error { // A scope for defer
+		defer s.lock.Unlock()
+
+		// For true partial pulls, c/storage decides whether to compute the uncompressed digest based on an option in storage.conf
+		// (defaults to true, to avoid ambiguity.)
+		// c/storage can also be configured, to consume a layer not prepared for partial pulls (primarily to allow composefs conversion),
+		// and in that case it always consumes the full blob and always computes the uncompressed digest.
+		if out.UncompressedDigest != "" {
+			// This is centrally enforced later, in commitLayer, but because we have the value available,
+			// we might just as well check immediately.
+			if untrustedDiffID != "" && out.UncompressedDigest != untrustedDiffID {
+				return fmt.Errorf("uncompressed digest of layer %q is %q, config claims %q", srcInfo.Digest.String(),
+					out.UncompressedDigest.String(), untrustedDiffID.String())
+			}
+
+			s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
+			if out.TOCDigest != "" {
+				s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
+				options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
+			}
+
+			// If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated.
+			if out.CompressedDigest != "" {
+				if out.CompressedDigest != blobDigest {
+					return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
+						out.CompressedDigest, blobDigest)
+				}
+				// So, record also information about blobDigest, that might benefit reuse.
+				// We trust PrepareStagedLayer to validate or create both values correctly.
+				s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
+				options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
+			}
+		} else {
+			// Sanity-check the defined rules for indexToTOCDigest.
+			if inputTOCDigest == nil {
+				return fmt.Errorf("internal error: PrepareStagedLayer returned a TOC-only identity for layer %q with no TOC digest", srcInfo.Digest.String())
+			}
+
+			// Use diffID for layer identity if it is known.
+			if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
+				s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
+			}
+			s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
+		}
+		s.lockProtected.diffOutputs[options.LayerIndex] = out
+		return nil
+	}(); err != nil {
+		return private.UploadedBlob{}, err
 	}
-	s.lockProtected.diffOutputs[options.LayerIndex] = out
-	s.lock.Unlock()
 
 	succeeded = true
 	return private.UploadedBlob{
@@ -372,22 +518,43 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
 	if err := blobDigest.Validate(); err != nil {
 		return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
 	}
-	if options.TOCDigest != "" {
+	useTOCDigest := false // If set, (options.TOCDigest != "" && options.LayerIndex != nil) AND we can use options.TOCDigest safely.
+	if options.TOCDigest != "" && options.LayerIndex != nil {
 		if err := options.TOCDigest.Validate(); err != nil {
 			return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
 		}
+		// Only consider using TOCDigest if we can avoid ambiguous image “views”, see the detailed comment in PutBlobPartial.
+		_, err := s.untrustedLayerDiffID(*options.LayerIndex)
+		if err != nil {
+			var diffIDUnknownErr untrustedLayerDiffIDUnknownError
+			switch {
+			case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
+				// options.TOCDigest is a private API, so all callers are within c/image, and should have called
+				// NoteOriginalOCIConfig first.
+				return false, private.ReusedBlob{}, fmt.Errorf("internal error: in TryReusingBlobWithOptions, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
+			case errors.As(err, &diffIDUnknownErr):
+				logrus.Debugf("Not using TOC %q to look for layer reuse: %v", options.TOCDigest, err)
+				// But don’t abort entirely, keep useTOCDigest = false, try a blobDigest match.
+			default:
+				return false, private.ReusedBlob{}, err
+			}
+		} else {
+			useTOCDigest = true
+		}
 	}
 
 	// lock the entire method as it executes fairly quickly
 	s.lock.Lock()
 	defer s.lock.Unlock()
 
-	if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
+	if options.SrcRef != nil && useTOCDigest {
 		// Check if we have the layer in the underlying additional layer store.
 		aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
 		if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
 			return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
 		} else if err == nil {
+			// Compare the long comment in PutBlobPartial. We assume that the Additional Layer Store will, somehow,
+			// avoid layer “view” ambiguity.
 			alsTOCDigest := aLayer.TOCDigest()
 			if alsTOCDigest != options.TOCDigest {
 				// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
@@ -459,49 +626,45 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
 			if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
 				return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
 			}
-			if len(layers) > 0 {
-				if size != -1 {
-					s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
-					return true, private.ReusedBlob{
-						Digest: blobDigest,
-						Size:   size,
-					}, nil
-				}
-				if !options.CanSubstitute {
-					return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest)
-				}
-				s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest
-				return true, private.ReusedBlob{
-					Digest: uncompressedDigest,
-					Size:   layers[0].UncompressedSize,
-				}, nil
+			if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
+				s.lockProtected.blobDiffIDs[reused.Digest] = uncompressedDigest
+				return true, reused, nil
 			}
 		}
 	}
 
-	if options.TOCDigest != "" && options.LayerIndex != nil {
+	if useTOCDigest {
+		// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
+		// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
+		uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
+		if uncompressedDigest != "" {
+			layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+			if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
+				return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
+			}
+			if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
+				s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
+				reused.MatchedByTOCDigest = true
+				return true, reused, nil
+			}
+		}
 		// Check if we have a chunked layer in storage with the same TOC digest.
 		layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
-
 		if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
 			return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
 		}
-		if len(layers) > 0 {
-			if size != -1 {
-				s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
-				return true, private.ReusedBlob{
-					Digest:             blobDigest,
-					Size:               size,
-					MatchedByTOCDigest: true,
-				}, nil
-			} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
-				s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
-				return true, private.ReusedBlob{
-					Digest:             layers[0].UncompressedDigest,
-					Size:               layers[0].UncompressedSize,
-					MatchedByTOCDigest: true,
-				}, nil
+		if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
+			if uncompressedDigest == "" && layers[0].UncompressedDigest != "" {
+				// Determine an uncompressed digest if at all possible, to use a traditional image ID
+				// and to maximize image reuse.
+				uncompressedDigest = layers[0].UncompressedDigest
 			}
+			if uncompressedDigest != "" {
+				s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
+			}
+			s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
+			reused.MatchedByTOCDigest = true
+			return true, reused, nil
 		}
 	}
 
@@ -509,49 +672,146 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
 	return false, private.ReusedBlob{}, nil
 }
 
+// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not.
+// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again.
+func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) {
+	if len(layers) > 0 {
+		if blobSize != -1 {
+			return true, private.ReusedBlob{
+				Digest: blobDigest,
+				Size:   blobSize,
+			}
+		} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
+			return true, private.ReusedBlob{
+				Digest:               layers[0].UncompressedDigest,
+				Size:                 layers[0].UncompressedSize,
+				CompressionOperation: types.Decompress,
+				CompressionAlgorithm: nil,
+			}
+		}
+	}
+	return false, private.ReusedBlob{}
+}
+
+// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
+type trustedLayerIdentityData struct {
+	// true if we decided the layer should be identified by tocDigest, false if by diffID
+	// This can only be true if c/storage/pkg/chunked/toc.GetTOCDigest returns a value.
+	layerIdentifiedByTOC bool
+
+	diffID     digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
+	tocDigest  digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
+	blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
+}
+
+// logString() prints a representation of trusted suitable identifying a layer in logs and errors.
+// The string is already quoted to expose malicious input and does not need to be quoted again.
+// Note that it does not include _all_ of the contents.
+func (trusted trustedLayerIdentityData) logString() string {
+	return fmt.Sprintf("%q/%q/%q", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
+}
+
+// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
+// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
+// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
+//
+// The caller must hold s.lock.
+func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) {
+	// The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set,
+	// even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasn’t originally available.
+	//
+	// If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find
+	// a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file:
+	// but the layer in question, and possibly child layers, might already have been committed to storage.
+	// A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set
+	// new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again.
+	//
+	// Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match
+	// and proceed to the originally-planned TOC match.
+
+	res := trustedLayerIdentityData{}
+	diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex]
+	if layerIdentifiedByDiffID {
+		res.layerIdentifiedByTOC = false
+		res.diffID = diffID
+	}
+	if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok {
+		res.tocDigest = tocDigest
+		if !layerIdentifiedByDiffID {
+			res.layerIdentifiedByTOC = true
+		}
+	}
+	if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok {
+		if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
+			// This is the only data we have, so it is clearly self-consistent.
+			res.layerIdentifiedByTOC = false
+			res.diffID = otherDiffID
+			res.blobDigest = blobDigest
+			layerIdentifiedByDiffID = true
+		} else {
+			// We have set up the layer identity without referring to blobDigest:
+			// an attacker might have used a manifest with non-matching tocDigest and blobDigest.
+			// But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest,
+			// we know blobDigest is fine as well.
+			if res.diffID != "" && otherDiffID == res.diffID {
+				res.blobDigest = blobDigest
+			}
+		}
+	}
+	if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
+		return trustedLayerIdentityData{}, false // We found nothing at all
+	}
+	return res, true
+}
+
 // computeID computes a recommended image ID based on information we have so far.  If
 // the manifest is not of a type that we recognize, we return an empty value, indicating
 // that since we don't have a recommendation, a random ID should be used if one needs
 // to be allocated.
-func (s *storageImageDestination) computeID(m manifest.Manifest) string {
+func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) {
 	// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
 
+	layerInfos := m.LayerInfos()
+
 	// Build the diffID list.  We need the decompressed sums that we've been calculating to
 	// fill in the DiffIDs.  It's expected (but not enforced by us) that the number of
 	// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
 	var diffIDs []digest.Digest
-	switch m := m.(type) {
+	switch m.(type) {
 	case *manifest.Schema1:
-		// Build a list of the diffIDs we've generated for the non-throwaway FS layers,
-		// in reverse of the order in which they were originally listed.
-		for i, compat := range m.ExtractedV1Compatibility {
-			if compat.ThrowAway {
+		// Build a list of the diffIDs we've generated for the non-throwaway FS layers
+		for i, li := range layerInfos {
+			if li.EmptyLayer {
 				continue
 			}
-			blobSum := m.FSLayers[i].BlobSum
-			diffID, ok := s.lockProtected.blobDiffIDs[blobSum]
-			if !ok {
-				// this can, in principle, legitimately happen when a layer is reused by TOC.
-				logrus.Infof("error looking up diffID for layer %q", blobSum.String())
-				return ""
+			trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
+			if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
+				return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
 			}
-			diffIDs = append([]digest.Digest{diffID}, diffIDs...)
+			if trusted.diffID == "" {
+				if trusted.layerIdentifiedByTOC {
+					logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID")
+					return "", nil
+				}
+				return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest)
+			}
+			diffIDs = append(diffIDs, trusted.diffID)
 		}
 	case *manifest.Schema2, *manifest.OCI1:
 		// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
 		// the diffID list.
 	default:
-		return ""
+		return "", nil
 	}
 
 	// We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption.
 	//
 	// Traditionally that means the same ~config digest, as computed by m.ImageID;
-	// but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
+	// but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
 	// nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see
 	// most of the data.
 	//
-	// So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
+	// So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
 	// must enter into the image ID computation.
 	// But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
 	// and to introduce the changed behavior only when partial pulls are used.
@@ -560,28 +820,31 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
 	// (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
 	ordinaryImageID, err := m.ImageID(diffIDs)
 	if err != nil {
-		return ""
+		return "", err
 	}
 	tocIDInput := ""
 	hasLayerPulledByTOC := false
-	for i := range m.LayerInfos() {
-		layerValue := ""                                     // An empty string is not a valid digest, so this is unambiguous with the TOC case.
-		tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC
-		if ok {
+	for i, li := range layerInfos {
+		trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
+		if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
+			return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
+		}
+		layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
+		if trusted.layerIdentifiedByTOC {
 			hasLayerPulledByTOC = true
-			layerValue = tocDigest.String()
+			layerValue = trusted.tocDigest.String()
 		}
 		tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
 	}
 
 	if !hasLayerPulledByTOC {
-		return ordinaryImageID
+		return ordinaryImageID, nil
 	}
 	// ordinaryImageID is a digest of a config, which is a JSON value.
 	// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
 	tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
 	logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
-	return tocImageID
+	return tocImageID, nil
 }
 
 // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
@@ -658,23 +921,6 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
 	return nil
 }
 
-// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
-// and an indication whether the input already has the shape of a layer ID.
-// It returns ("", false) if the layer is not found at all (which should never happen)
-func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
-		return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
-	}
-
-	if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
-		return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
-	}
-	return "", false
-}
-
 // commitLayer commits the specified layer with the given index to the storage.
 // size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
 //
@@ -686,16 +932,15 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
 // must guarantee that, at any given time, at most one goroutine may execute
 // `commitLayer()`.
 func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
-	// Already committed?  Return early.
 	if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
 		return false, nil
 	}
 
-	// Start with an empty string or the previous layer ID.  Note that
-	// `s.indexToStorageID` can only be accessed by *one* goroutine at any
-	// given time. Hence, we don't need to lock accesses.
-	var parentLayer string
+	var parentLayer string // "" if no parent
 	if index != 0 {
+		// s.indexToStorageID can only be written by this function, and our caller
+		// is responsible for ensuring it can be only be called by *one* goroutine at any
+		// given time. Hence, we don't need to lock accesses.
 		prev, ok := s.indexToStorageID[index-1]
 		if !ok {
 			return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
@@ -703,18 +948,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
 		parentLayer = prev
 	}
 
-	// Carry over the previous ID for empty non-base layers.
 	if info.emptyLayer {
 		s.indexToStorageID[index] = parentLayer
 		return false, nil
 	}
 
-	// Check if there's already a layer with the ID that we'd give to the result of applying
-	// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
-	// The layerID refers either to the DiffID or the digest of the TOC.
-	layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
-	if layerIDComponent == "" {
-		// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
+	// Collect trusted parameters of the layer.
+	s.lock.Lock()
+	trusted, ok := s.trustedLayerIdentityDataLocked(index, info.digest)
+	s.lock.Unlock()
+	if !ok {
+		// Check if the layer exists already and the caller just (incorrectly) forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
 		//
 		// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
 		// that relies on using a blob digest that has never been seen by the store had better call
@@ -738,23 +982,54 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
 			return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
 		}
 
-		layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
-		if layerIDComponent == "" {
+		s.lock.Lock()
+		trusted, ok = s.trustedLayerIdentityDataLocked(index, info.digest)
+		s.lock.Unlock()
+		if !ok {
 			return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
 		}
 	}
 
-	id := layerIDComponent
-	if !layerIDComponentStandalone || parentLayer != "" {
-		id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
+	// Ensure that we always see the same “view” of a layer, as identified by the layer’s uncompressed digest,
+	// unless the user has explicitly opted out of this in storage.conf: see the more detailed explanation in PutBlobPartial.
+	if trusted.diffID != "" {
+		untrustedDiffID, err := s.untrustedLayerDiffID(index)
+		if err != nil {
+			var diffIDUnknownErr untrustedLayerDiffIDUnknownError
+			switch {
+			case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
+				logrus.Debugf("Skipping commit for layer %q, manifest not yet available for DiffID check", index)
+				return true, nil
+			case errors.As(err, &diffIDUnknownErr):
+				// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
+				// so we could not have reused a TOC-identified layer nor have done a TOC-identified partial pull,
+				// i.e. there is no other “view” to worry about.  Sanity-check that we really see the only expected view.
+				//
+				// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
+				// we _must_ fail if we used a TOC-identified layer - but PutBlobPartial should have already
+				// refused to do a partial pull, so we are in an inconsistent state.
+				if trusted.layerIdentifiedByTOC {
+					return false, fmt.Errorf("internal error: layer %d for blob %s was identified by TOC, but we don't have a DiffID in config",
+						index, trusted.logString())
+				}
+				// else a schema1 image or a non-TOC layer with no ambiguity, let it through
+			default:
+				return false, err
+			}
+		} else if trusted.diffID != untrustedDiffID {
+			return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
+		}
 	}
+
+	id := layerID(parentLayer, trusted)
+
 	if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
 		// There's already a layer that should have the right contents, just reuse it.
 		s.indexToStorageID[index] = layer.ID
 		return false, nil
 	}
 
-	layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
+	layer, err := s.createNewLayer(index, trusted, parentLayer, id)
 	if err != nil {
 		return false, err
 	}
@@ -765,22 +1040,62 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
 	return false, nil
 }
 
-// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
+// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted)
+func layerID(parentID string, trusted trustedLayerIdentityData) string {
+	var component string
+	mustHash := false
+	if trusted.layerIdentifiedByTOC {
+		// "@" is not a valid start of a digest.Digest.Encoded(), so this is unambiguous with the !layerIdentifiedByTOC case.
+		// But we _must_ hash this below to get a Digest.Encoded()-formatted value.
+		component = "@TOC=" + trusted.tocDigest.Encoded()
+		mustHash = true
+	} else {
+		component = trusted.diffID.Encoded() // This looks like chain IDs, and it uses the traditional value.
+	}
+
+	if parentID == "" && !mustHash {
+		return component
+	}
+	return digest.Canonical.FromString(parentID + "+" + component).Encoded()
+}
+
+// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be "").
 // If the layer cannot be committed yet, the function returns (nil, nil).
-func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
+func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayerIdentityData, parentLayer, newLayerID string) (*storage.Layer, error) {
 	s.lock.Lock()
 	diffOutput, ok := s.lockProtected.diffOutputs[index]
 	s.lock.Unlock()
 	if ok {
+		// Typically, we compute a trusted DiffID value to authenticate the layer contents, see the detailed explanation
+		// in PutBlobPartial.  If the user has opted out of that, but we know a trusted DiffID value
+		// (e.g. from a BlobInfoCache), set it in diffOutput.
+		// That way it will be persisted in storage even if the cache is deleted; also
+		// we can use the value below to avoid the untrustedUncompressedDigest logic.
+		if diffOutput.UncompressedDigest == "" && trusted.diffID != "" {
+			diffOutput.UncompressedDigest = trusted.diffID
+		}
+
 		var untrustedUncompressedDigest digest.Digest
 		if diffOutput.UncompressedDigest == "" {
 			d, err := s.untrustedLayerDiffID(index)
 			if err != nil {
-				return nil, err
-			}
-			if d == "" {
-				logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
-				return nil, nil
+				var diffIDUnknownErr untrustedLayerDiffIDUnknownError
+				switch {
+				case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
+					logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
+					return nil, nil
+				case errors.As(err, &diffIDUnknownErr):
+					// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
+					// so we should have !trusted.layerIdentifiedByTOC, i.e. we should have set
+					// diffOutput.UncompressedDigest above in this function, at the very latest.
+					//
+					// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
+					// commitLayer should have already refused this image when dealing with the “view” ambiguity.
+					return nil, fmt.Errorf("internal error: layer %d for blob %s was partially-pulled with unknown UncompressedDigest, but we don't have a DiffID in config",
+						index, trusted.logString())
+				default:
+					return nil, err
+				}
 			}
 
 			untrustedUncompressedDigest = d
@@ -793,7 +1108,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
 
 		flags := make(map[string]interface{})
 		if untrustedUncompressedDigest != "" {
-			flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest
+			flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest.String()
 			logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID)
 		}
 
@@ -826,47 +1141,41 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
 
 	// Check if we previously cached a file with that blob's contents.  If we didn't,
 	// then we need to read the desired contents from a layer.
-	var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions
-	s.lock.Lock()
-	tocDigest := s.lockProtected.indexToTOCDigest[index]       // "" if not set
-	optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set
-	filename, gotFilename := s.lockProtected.filenames[layerDigest]
-	s.lock.Unlock()
-	if gotFilename && tocDigest == "" {
-		// If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based,
-		// and we don't know the relationship of the layerDigest and TOC digest.
-		// We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer
-		// reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the
-		// originally-planned TOC match.
-
-		// Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream.
-		trustedUncompressedDigest = optionalDiffID
-		trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching.
+	var filename string
+	var gotFilename bool
+	if trusted.blobDigest != "" {
+		s.lock.Lock()
+		filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
+		s.lock.Unlock()
+	}
+	var trustedOriginalDigest digest.Digest // For storage.LayerOptions
+	var trustedOriginalSize *int64
+	if gotFilename {
+		// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
+		trustedOriginalDigest = trusted.blobDigest
+		trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost.
 	} else {
 		// Try to find the layer with contents matching the data we use.
 		var layer *storage.Layer // = nil
-		if tocDigest != "" {
-			layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
-			if err2 == nil && len(layers) > 0 {
+		if trusted.diffID != "" {
+			if layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(trusted.diffID); err2 == nil && len(layers) > 0 {
 				layer = &layers[0]
-			} else {
-				return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2)
 			}
-		} else {
-			// Because tocDigest == "", optionaldiffID must have been set
-			layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID)
-			if err2 == nil && len(layers) > 0 {
+		}
+		if layer == nil && trusted.tocDigest != "" {
+			if layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(trusted.tocDigest); err2 == nil && len(layers) > 0 {
 				layer = &layers[0]
-			} else {
-				layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest)
-				if err2 == nil && len(layers) > 0 {
-					layer = &layers[0]
-				}
 			}
-			if layer == nil {
-				return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2)
+		}
+		if layer == nil && trusted.blobDigest != "" {
+			if layers, err2 := s.imageRef.transport.store.LayersByCompressedDigest(trusted.blobDigest); err2 == nil && len(layers) > 0 {
+				layer = &layers[0]
 			}
 		}
+		if layer == nil {
+			return nil, fmt.Errorf("layer for blob %s not found", trusted.logString())
+		}
+
 		// Read the layer's contents.
 		noCompression := archive.Uncompressed
 		diffOptions := &storage.DiffOptions{
@@ -874,7 +1183,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
 		}
 		diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
 		if err2 != nil {
-			return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2)
+			return nil, fmt.Errorf("reading layer %q for blob %s: %w", layer.ID, trusted.logString(), err2)
 		}
 		// Copy the layer diff to a file.  Diff() takes a lock that it holds
 		// until the ReadCloser that it returns is closed, and PutLayer() wants
@@ -896,32 +1205,45 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
 			return nil, fmt.Errorf("storing blob to file %q: %w", filename, err)
 		}
 
-		if optionalDiffID == "" && layer.UncompressedDigest != "" {
-			optionalDiffID = layer.UncompressedDigest
+		if trusted.diffID == "" && layer.UncompressedDigest != "" {
+			trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
 		}
-		// The stream we have is uncompressed, this matches contents of the stream.
-		// If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream.
-		trustedUncompressedDigest = optionalDiffID
-		// FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted).
-		// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
-		// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
+
+		// Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse.
+		// But we don’t want to just use the size from the manifest if we never saw the compressed blob,
+		// so that we don’t propagate mistakes / attacks.
 		//
-		// We can legitimately set storage.LayerOptions.OriginalDigest to "",
-		// but that would just result in PutLayer computing the digest of the input stream == optionalDiffID.
-		// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
-		trustedOriginalDigest = optionalDiffID
+		// s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename.
+		// So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit
+		// to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to
+		// this new layer copy can help.)
+		if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 {
+			trustedOriginalDigest = trusted.blobDigest
+			sizeCopy := layer.CompressedSize
+			trustedOriginalSize = &sizeCopy
+		} else {
+			// The stream we have is uncompressed, and it matches trusted.diffID (if known).
+			//
+			// We can legitimately set storage.LayerOptions.OriginalDigest to "",
+			// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
+			// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
+			trustedOriginalDigest = trusted.diffID
+			trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost.
+		}
 
 		// Allow using the already-collected layer contents without extracting the layer again.
 		//
 		// This only matches against the uncompressed digest.
-		// We don’t have the original compressed data here to trivially set filenames[layerDigest].
-		// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
+		// If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the
+		// same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config),
+		// we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate
+		// between “original files” and “possibly uncompressed files”.
 		// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
-		if trustedUncompressedDigest != "" {
+		if trusted.diffID != "" {
 			s.lock.Lock()
-			s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest
-			s.lockProtected.filenames[trustedUncompressedDigest] = filename
-			s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize
+			s.lockProtected.blobDiffIDs[trusted.diffID] = trusted.diffID
+			s.lockProtected.filenames[trusted.diffID] = filename
+			s.lockProtected.fileSizes[trusted.diffID] = fileSize
 			s.lock.Unlock()
 		}
 	}
@@ -934,55 +1256,129 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
 	// Build the new layer using the diff, regardless of where it came from.
 	// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
 	layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
-		OriginalDigest:     trustedOriginalDigest,
-		UncompressedDigest: trustedUncompressedDigest,
+		OriginalDigest: trustedOriginalDigest,
+		OriginalSize:   trustedOriginalSize, // nil in many cases
+		// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
+		UncompressedDigest: trusted.diffID,
 	}, file)
 	if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
-		return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err)
+		return nil, fmt.Errorf("adding layer with blob %s: %w", trusted.logString(), err)
 	}
 	return layer, nil
 }
 
+// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed,
+// to allow using image.FromUnparsedImage.
+type uncommittedImageSource struct {
+	srcImpl.Compat
+	srcImpl.PropertyMethodsInitialize
+	srcImpl.NoSignatures
+	srcImpl.DoesNotAffectLayerInfosForCopy
+	srcStubs.NoGetBlobAtInitialize
+
+	d *storageImageDestination
+}
+
+func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource {
+	s := &uncommittedImageSource{
+		PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{
+			HasThreadSafeGetBlob: true,
+		}),
+		NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()),
+
+		d: d,
+	}
+	s.Compat = srcImpl.AddCompat(s)
+	return s
+}
+
+func (u *uncommittedImageSource) Reference() types.ImageReference {
+	return u.d.Reference()
+}
+
+func (u *uncommittedImageSource) Close() error {
+	return nil
+}
+
+func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+	return u.d.manifest, u.d.manifestMIMEType, nil
+}
+
+func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+	blob, err := u.d.getConfigBlob(info)
+	if err != nil {
+		return nil, -1, err
+	}
+	return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil
+}
+
+// errUntrustedLayerDiffIDNotYetAvailable is returned by untrustedLayerDiffID
+// if the value is not yet available (but it can be available after s.manifests is set).
+// This should only happen for external callers of the transport, not for c/image/copy.
+//
+// Callers of untrustedLayerDiffID before PutManifest must handle this error specially;
+// callers after PutManifest can use the default, reporting an internal error.
+var errUntrustedLayerDiffIDNotYetAvailable = errors.New("internal error: untrustedLayerDiffID has no value available and fallback was not implemented")
+
+// untrustedLayerDiffIDUnknownError is returned by untrustedLayerDiffID
+// if the image’s format does not provide DiffIDs.
+type untrustedLayerDiffIDUnknownError struct {
+	layerIndex int
+}
+
+func (e untrustedLayerDiffIDUnknownError) Error() string {
+	return fmt.Sprintf("DiffID value for layer %d is unknown or explicitly empty", e.layerIndex)
+}
+
 // untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
-// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
-// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
+// It may return two special errors, errUntrustedLayerDiffIDNotYetAvailable or untrustedLayerDiffIDUnknownError.
+//
+// WARNING: This function does not even validate that the returned digest has a valid format.
+// WARNING: We don’t _always_ validate this DiffID value against the layer contents; it must not be used for any deduplication.
 func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
-	// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
-	// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
+	// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob,
+	// nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set
+	// by NoteOriginalOCIConfig and are not being updated any more;
+	// or PutManifest has been called and s.manifest != nil.
 	// Either way this function does not need the protection of s.lock.
-	if s.manifest == nil {
-		return "", nil
-	}
 
 	if s.untrustedDiffIDValues == nil {
-		mt := manifest.GuessMIMEType(s.manifest)
-		if mt != imgspecv1.MediaTypeImageManifest {
-			// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
-			// and then use types.Image.OCIConfig so that we can parse the image.
-			//
-			// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
-			// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
-			// So it is not implemented yet.
-			return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
+		// Typically, we expect untrustedDiffIDValues to be set by the generic copy code
+		// via NoteOriginalOCIConfig; this is a compatibility fallback for external callers
+		// of the public types.ImageDestination.
+		if s.manifest == nil {
+			return "", errUntrustedLayerDiffIDNotYetAvailable
 		}
-		man, err := manifest.FromBlob(s.manifest, mt)
+
+		ctx := context.Background() // This is all happening in memory, no need to worry about cancellation.
+		unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil)
+		sourced, err := image.FromUnparsedImage(ctx, nil, unparsed)
 		if err != nil {
-			return "", fmt.Errorf("parsing manifest: %w", err)
+			return "", fmt.Errorf("parsing image to be committed: %w", err)
 		}
-
-		cb, err := s.getConfigBlob(man.ConfigInfo())
+		configOCI, err := sourced.OCIConfig(ctx)
 		if err != nil {
-			return "", err
+			return "", fmt.Errorf("obtaining config of image to be committed: %w", err)
 		}
 
-		// retrieve the expected uncompressed digest from the config blob.
-		configOCI := &imgspecv1.Image{}
-		if err := json.Unmarshal(cb, configOCI); err != nil {
-			return "", err
-		}
-		s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
-		if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
-			s.untrustedDiffIDValues = []digest.Digest{}
+		s.setUntrustedDiffIDValuesFromOCIConfig(configOCI)
+	}
+
+	// Let entirely empty / missing diffIDs through; but if the array does exist, expect it to contain an entry for every layer,
+	// and fail hard on missing entries. This tries to account for completely naive image producers who just don’t fill DiffID,
+	// while still detecting incorrectly-built / confused images.
+	//
+	// schema1 images don’t have DiffID values in the config.
+	// Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values, so treat arrays of all-empty
+	// values as “DiffID unknown”.
+	// For schema 1, it is important to exit here, before the layerIndex >= len(s.untrustedDiffIDValues)
+	// check, because the format conversion from schema1 to OCI used to compute untrustedDiffIDValues
+	// changes the number of layres (drops items with Schema1V1Compatibility.ThrowAway).
+	if !slices.ContainsFunc(s.untrustedDiffIDValues, func(d digest.Digest) bool {
+		return d != ""
+	}) {
+		return "", untrustedLayerDiffIDUnknownError{
+			layerIndex: layerIndex,
 		}
 	}
 	if layerIndex >= len(s.untrustedDiffIDValues) {
@@ -991,26 +1387,32 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
 	return s.untrustedDiffIDValues[layerIndex], nil
 }
 
-// Commit marks the process of storing the image as successful and asks for the image to be persisted.
-// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
-// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
-// original manifest list digest, if desired.
+// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config.
+// The caller must ensure s.lock does not need to be held.
+func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) {
+	s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs)
+	if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
+		s.untrustedDiffIDValues = []digest.Digest{}
+	}
+}
+
+// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
 // WARNING: This does not have any transactional semantics:
-// - Uploaded data MAY be visible to others before Commit() is called
-// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
-func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
+// - Uploaded data MAY be visible to others before CommitWithOptions() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
+func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
 	// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
 
-	if len(s.manifest) == 0 {
-		return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+	if s.manifest == nil {
+		return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
 	}
-	toplevelManifest, _, err := unparsedToplevel.Manifest(ctx)
+	toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
 	if err != nil {
 		return fmt.Errorf("retrieving top-level manifest: %w", err)
 	}
 	// If the name we're saving to includes a digest, then check that the
 	// manifests that we're about to save all either match the one from the
-	// unparsedToplevel, or match the digest in the name that we're using.
+	// options.UnparsedToplevel, or match the digest in the name that we're using.
 	if s.imageRef.named != nil {
 		if digested, ok := s.imageRef.named.(reference.Digested); ok {
 			matches, err := manifest.MatchesDigest(s.manifest, digested.Digest())
@@ -1029,7 +1431,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		}
 	}
 	// Find the list of layer blobs.
-	man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+	man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType)
 	if err != nil {
 		return fmt.Errorf("parsing manifest: %w", err)
 	}
@@ -1043,49 +1445,41 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		}, blob.Size); err != nil {
 			return err
 		} else if stopQueue {
-			return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest)
+			return fmt.Errorf("Internal error: storageImageDestination.CommitWithOptions(): commitLayer() not ready to commit for layer %q", blob.Digest)
 		}
 	}
 	var lastLayer string
 	if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts.
 		prev, ok := s.indexToStorageID[len(layerBlobs)-1]
 		if !ok {
-			return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
+			return fmt.Errorf("Internal error: storageImageDestination.CommitWithOptions(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
 		}
 		lastLayer = prev
 	}
 
 	// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
 	// was originally created, in case we're just copying it.  If not, no harm done.
-	options := &storage.ImageOptions{}
+	imgOptions := &storage.ImageOptions{}
 	if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil {
 		logrus.Debugf("setting image creation date to %s", inspect.Created)
-		options.CreationDate = *inspect.Created
+		imgOptions.CreationDate = *inspect.Created
 	}
 
-	// Set up to save the non-layer blobs as data items.  Since we only share layers, they should all be in files, so
-	// we just need to screen out the ones that are actually layers to get the list of non-layers.
-	dataBlobs := set.New[digest.Digest]()
-	for blob := range s.lockProtected.filenames {
-		dataBlobs.Add(blob)
-	}
-	for _, layerBlob := range layerBlobs {
-		dataBlobs.Delete(layerBlob.Digest)
-	}
-	for _, blob := range dataBlobs.Values() {
-		v, err := os.ReadFile(s.lockProtected.filenames[blob])
+	// Set up to save the config as a data item.  Since we only share layers, the config should be in a file.
+	if s.lockProtected.configDigest != "" {
+		v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest])
 		if err != nil {
-			return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
+			return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err)
 		}
-		options.BigData = append(options.BigData, storage.ImageBigDataOption{
-			Key:    blob.String(),
+		imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
+			Key:    s.lockProtected.configDigest.String(),
 			Data:   v,
 			Digest: digest.Canonical.FromBytes(v),
 		})
 	}
-	// Set up to save the unparsedToplevel's manifest if it differs from
+	// Set up to save the options.UnparsedToplevel's manifest if it differs from
 	// the per-platform one, which is saved below.
-	if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
+	if !bytes.Equal(toplevelManifest, s.manifest) {
 		manifestDigest, err := manifest.Digest(toplevelManifest)
 		if err != nil {
 			return fmt.Errorf("digesting top-level manifest: %w", err)
@@ -1094,7 +1488,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		if err != nil {
 			return err
 		}
-		options.BigData = append(options.BigData, storage.ImageBigDataOption{
+		imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
 			Key:    key,
 			Data:   toplevelManifest,
 			Digest: manifestDigest,
@@ -1107,19 +1501,19 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 	if err != nil {
 		return err
 	}
-	options.BigData = append(options.BigData, storage.ImageBigDataOption{
+	imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
 		Key:    key,
 		Data:   s.manifest,
 		Digest: s.manifestDigest,
 	})
-	options.BigData = append(options.BigData, storage.ImageBigDataOption{
+	imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
 		Key:    storage.ImageDigestBigDataKey,
 		Data:   s.manifest,
 		Digest: s.manifestDigest,
 	})
 	// Set up to save the signatures, if we have any.
 	if len(s.signatures) > 0 {
-		options.BigData = append(options.BigData, storage.ImageBigDataOption{
+		imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
 			Key:    "signatures",
 			Data:   s.signatures,
 			Digest: digest.Canonical.FromBytes(s.signatures),
@@ -1130,7 +1524,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		if err != nil {
 			return err
 		}
-		options.BigData = append(options.BigData, storage.ImageBigDataOption{
+		imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
 			Key:    key,
 			Data:   signatures,
 			Digest: digest.Canonical.FromBytes(signatures),
@@ -1143,16 +1537,19 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		return fmt.Errorf("encoding metadata for image: %w", err)
 	}
 	if len(metadata) != 0 {
-		options.Metadata = string(metadata)
+		imgOptions.Metadata = string(metadata)
 	}
 
 	// Create the image record, pointing to the most-recently added layer.
 	intendedID := s.imageRef.id
 	if intendedID == "" {
-		intendedID = s.computeID(man)
+		intendedID, err = s.computeID(man)
+		if err != nil {
+			return err
+		}
 	}
 	oldNames := []string{}
-	img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
+	img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", imgOptions)
 	if err != nil {
 		if !errors.Is(err, storage.ErrDuplicateID) {
 			logrus.Debugf("error creating image: %q", err)
@@ -1173,21 +1570,21 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		// sizes (tracked in the metadata) which might have already
 		// been present with new values, when ideally we'd find a way
 		// to merge them since they all apply to the same image
-		for _, data := range options.BigData {
+		for _, data := range imgOptions.BigData {
 			if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil {
 				logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err)
 				return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err)
 			}
 		}
-		if options.Metadata != "" {
-			if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil {
+		if imgOptions.Metadata != "" {
+			if err := s.imageRef.transport.store.SetMetadata(img.ID, imgOptions.Metadata); err != nil {
 				logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
 				return fmt.Errorf("saving metadata for image %q: %w", img.ID, err)
 			}
-			logrus.Debugf("saved image metadata %q", options.Metadata)
+			logrus.Debugf("saved image metadata %q", imgOptions.Metadata)
 		}
 	} else {
-		logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata)
+		logrus.Debugf("created new image ID %q with metadata %q", img.ID, imgOptions.Metadata)
 	}
 
 	// Clean up the unfinished image on any error.
@@ -1210,6 +1607,21 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
 		}
 		logrus.Debugf("added name %q to image %q", name, img.ID)
 	}
+	if options.ReportResolvedReference != nil {
+		// FIXME? This is using nil for the named reference.
+		// It would be better to also  use s.imageRef.named, because that allows us to resolve to the right
+		// digest / manifest (and corresponding signatures).
+		// The problem with that is that resolving such a reference fails if the s.imageRef.named name is moved to a different image
+		// (because it is a tag that moved, or because we have pulled “the same” image for a different architecture).
+		// Right now (2024-11), ReportResolvedReference is only used in c/common/libimage, where the caller only extracts the image ID,
+		// so the name does not matter; to give us options, copy.Options.ReportResolvedReference is explicitly refusing to document
+		// whether the value contains a name.
+		resolved, err := newReference(s.imageRef.transport, nil, intendedID)
+		if err != nil {
+			return fmt.Errorf("creating a resolved reference for (%s, %s): %w", s.imageRef.StringWithinTransport(), intendedID, err)
+		}
+		*options.ReportResolvedReference = resolved
+	}
 
 	commitSucceeded = true
 	return nil
@@ -1222,6 +1634,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
 		return err
 	}
 	s.manifest = bytes.Clone(manifestBlob)
+	if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
+		s.manifest = []byte{}
+	}
+	s.manifestMIMEType = manifest.GuessMIMEType(s.manifest)
 	s.manifestDigest = digest
 	return nil
 }
@@ -1244,7 +1660,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
 	if instanceDigest == nil {
 		s.signatures = sigblob
 		s.metadata.SignatureSizes = sizes
-		if len(s.manifest) > 0 {
+		if s.manifest != nil {
 			manifestDigest := s.manifestDigest
 			instanceDigest = &manifestDigest
 		}
diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go
index 2a1099f67..5775c4acb 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go
@@ -37,7 +37,7 @@ func newReference(transport storageTransport, named reference.Named, id string)
 	}
 	if id != "" {
 		if err := validateImageID(id); err != nil {
-			return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
+			return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference)
 		}
 	}
 	// We take a copy of the transport, which contains a pointer to the
@@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
 	}
 	if s.id == "" {
 		logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
-		return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
+		// %.0w makes the error visible to error.Unwrap() without including any text.
+		// ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause.
+		return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage)
 	}
 	if loadedImage == nil {
 		img, err := s.transport.store.Image(s.id)
diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go
index 4f501fc22..ff76b2066 100644
--- a/vendor/github.com/containers/image/v5/storage/storage_src.go
+++ b/vendor/github.com/containers/image/v5/storage/storage_src.go
@@ -11,6 +11,7 @@ import (
 	"fmt"
 	"io"
 	"os"
+	"slices"
 	"sync"
 
 	"github.com/containers/image/v5/docker/reference"
@@ -34,13 +35,14 @@ type storageImageSource struct {
 	impl.PropertyMethodsInitialize
 	stubs.NoGetBlobAtInitialize
 
-	imageRef              storageReference
-	image                 *storage.Image
-	systemContext         *types.SystemContext // SystemContext used in GetBlob() to create temporary files
-	metadata              storageImageMetadata
-	cachedManifest        []byte     // A cached copy of the manifest, if already known, or nil
-	getBlobMutex          sync.Mutex // Mutex to sync state for parallel GetBlob executions
-	getBlobMutexProtected getBlobMutexProtected
+	imageRef               storageReference
+	image                  *storage.Image
+	systemContext          *types.SystemContext // SystemContext used in GetBlob() to create temporary files
+	metadata               storageImageMetadata
+	cachedManifest         []byte     // A cached copy of the manifest, if already known, or nil
+	cachedManifestMIMEType string     // Valid if cachedManifest != nil
+	getBlobMutex           sync.Mutex // Mutex to sync state for parallel GetBlob executions
+	getBlobMutexProtected  getBlobMutexProtected
 }
 
 // getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
@@ -246,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
 		}
 		return blob, manifest.GuessMIMEType(blob), err
 	}
-	if len(s.cachedManifest) == 0 {
+	if s.cachedManifest == nil {
 		// The manifest is stored as a big data item.
 		// Prefer the manifest corresponding to the user-specified digest, if available.
 		if s.imageRef.named != nil {
@@ -266,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
 		}
 		// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
 		// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
-		if len(s.cachedManifest) == 0 {
+		if s.cachedManifest == nil {
 			cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
 			if err != nil {
 				return nil, "", err
 			}
 			s.cachedManifest = cachedBlob
 		}
+		s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest)
 	}
-	return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+	return s.cachedManifest, s.cachedManifestMIMEType, err
 }
 
 // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
@@ -300,7 +303,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
 		uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
 	}
 
-	physicalBlobInfos := []types.BlobInfo{}
+	physicalBlobInfos := []types.BlobInfo{} // Built reversed
 	layerID := s.image.TopLayer
 	for layerID != "" {
 		layer, err := s.imageRef.transport.store.Layer(layerID)
@@ -340,9 +343,10 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
 			Size:      size,
 			MediaType: uncompressedLayerType,
 		}
-		physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
+		physicalBlobInfos = append(physicalBlobInfos, blobInfo)
 		layerID = layer.Parent
 	}
+	slices.Reverse(physicalBlobInfos)
 
 	res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
 	if err != nil {
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index 18d4cc2d2..283a32d0e 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -14,8 +14,9 @@ import (
 
 	"github.com/containers/image/v5/internal/imagesource/impl"
 	"github.com/containers/image/v5/internal/imagesource/stubs"
+	"github.com/containers/image/v5/pkg/compression"
+	compressionTypes "github.com/containers/image/v5/pkg/compression/types"
 	"github.com/containers/image/v5/types"
-	"github.com/klauspost/pgzip"
 	digest "github.com/opencontainers/go-digest"
 	imgspecs "github.com/opencontainers/image-spec/specs-go"
 	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -82,31 +83,47 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
 			}
 		}
 
-		// Default to assuming the layer is compressed.
-		layerType := imgspecv1.MediaTypeImageLayerGzip
-
 		// Set up to digest the file as it is.
 		blobIDdigester := digest.Canonical.Digester()
 		reader = io.TeeReader(reader, blobIDdigester.Hash())
 
-		// Set up to digest the file after we maybe decompress it.
-		diffIDdigester := digest.Canonical.Digester()
-		uncompressed, err := pgzip.NewReader(reader)
-		if err == nil {
-			// It is compressed, so the diffID is the digest of the uncompressed version
-			reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
-		} else {
-			// It is not compressed, so the diffID and the blobID are going to be the same
-			diffIDdigester = blobIDdigester
-			layerType = imgspecv1.MediaTypeImageLayer
-			uncompressed = nil
-		}
-		// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
-		if _, err := io.Copy(io.Discard, reader); err != nil {
-			return nil, fmt.Errorf("error reading %q: %v", filename, err)
-		}
-		if uncompressed != nil {
-			uncompressed.Close()
+		var layerType string
+		var diffIDdigester digest.Digester
+		// If necessary, digest the file after we decompress it.
+		if err := func() error { // A scope for defer
+			format, decompressor, reader, err := compression.DetectCompressionFormat(reader)
+			if err != nil {
+				return err
+			}
+			if decompressor != nil {
+				uncompressed, err := decompressor(reader)
+				if err != nil {
+					return err
+				}
+				defer uncompressed.Close()
+				// It is compressed, so the diffID is the digest of the uncompressed version
+				diffIDdigester = digest.Canonical.Digester()
+				reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
+				switch format.Name() {
+				case compressionTypes.GzipAlgorithmName:
+					layerType = imgspecv1.MediaTypeImageLayerGzip
+				case compressionTypes.ZstdAlgorithmName:
+					layerType = imgspecv1.MediaTypeImageLayerZstd
+				default: // This is incorrect, but we have no good options, and it is what this transport was historically doing.
+					layerType = imgspecv1.MediaTypeImageLayerGzip
+				}
+			} else {
+				// It is not compressed, so the diffID and the blobID are going to be the same
+				diffIDdigester = blobIDdigester
+				layerType = imgspecv1.MediaTypeImageLayer
+			}
+			// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+			if _, err := io.Copy(io.Discard, reader); err != nil {
+				return fmt.Errorf("error reading %q: %w", filename, err)
+			}
+			return nil
+		}(); err != nil {
+			return nil, err
 		}
 
 		// Grab our uncompressed and possibly-compressed digests and sizes.
@@ -152,7 +169,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
 	// Encode and digest the image configuration blob.
 	configBytes, err := json.Marshal(&config)
 	if err != nil {
-		return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
+		return nil, fmt.Errorf("error generating configuration blob for %q: %w", strings.Join(r.filenames, separator), err)
 	}
 	configID := digest.Canonical.FromBytes(configBytes)
 	blobs[configID] = tarballBlob{
@@ -177,7 +194,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
 	// Encode the manifest.
 	manifestBytes, err := json.Marshal(&manifest)
 	if err != nil {
-		return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
+		return nil, fmt.Errorf("error generating manifest for %q: %w", strings.Join(r.filenames, separator), err)
 	}
 
 	// Return the image.
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
index 63d835530..b33208a51 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go
@@ -38,13 +38,13 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
 		if filename == "-" {
 			stdin, err = io.ReadAll(os.Stdin)
 			if err != nil {
-				return nil, fmt.Errorf("error buffering stdin: %v", err)
+				return nil, fmt.Errorf("error buffering stdin: %w", err)
 			}
 			continue
 		}
 		f, err := os.Open(filename)
 		if err != nil {
-			return nil, fmt.Errorf("error opening %q: %v", filename, err)
+			return nil, fmt.Errorf("error opening %q: %w", filename, err)
 		}
 		f.Close()
 	}
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 7d6097346..9a7a0da2b 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -643,6 +643,7 @@ type SystemContext struct {
 	// if true, a V1 ping attempt isn't done to give users a better error. Default is false.
 	// Note that this field is used mainly to integrate containers/image into projectatomic/docker
 	// in order to not break any existing docker's integration tests.
+	// Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect.
 	DockerDisableV1Ping bool
 	// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
 	DockerDisableDestSchema1MIMETypes bool
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 9e0338158..611324a2e 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,9 +6,9 @@ const (
 	// VersionMajor is for an API incompatible changes
 	VersionMajor = 5
 	// VersionMinor is for functionality in a backwards-compatible manner
-	VersionMinor = 31
+	VersionMinor = 34
 	// VersionPatch is for backwards-compatible bug fixes
-	VersionPatch = 1
+	VersionPatch = 3
 
 	// VersionDev indicates development branch. Releases will be empty string.
 	VersionDev = ""
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index c2474c7f2..2102e938d 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -17,13 +17,13 @@ env:
     ####
     #### Cache-image names to test with (double-quotes around names are critical)
     ###
-    FEDORA_NAME: "fedora-39"
+    FEDORA_NAME: "fedora-41"
     DEBIAN_NAME: "debian-13"
 
     # GCE project where images live
     IMAGE_PROJECT: "libpod-218412"
     # VM Image built in containers/automation_images
-    IMAGE_SUFFIX: "c20240513t140131z-f40f39d13"
+    IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
     FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
     DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
 
@@ -167,13 +167,26 @@ vendor_task:
     build_script: make vendor
     test_script: hack/tree_status.sh
 
-
 cross_task:
     alias: cross
     container:
-        image: golang:1.21
+        image: golang:1.22
     build_script: make cross
 
+gofix_task:
+    alias: gofix
+    container:
+        image: golang:1.22
+    build_script: go fix ./...
+    test_script: git diff --exit-code
+
+codespell_task:
+    alias: codespell
+    container:
+        image: python
+    build_script: pip install codespell
+    test_script: codespell
+
 
 # Status aggregator for all tests.  This task simply ensures a defined
 # set of tasks all passed, and allows confirming that based on the status
@@ -190,6 +203,8 @@ success_task:
         - meta
         - vendor
         - cross
+        - gofix
+        - codespell
     container:
         image: golang:1.21
     clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"'  # Source code not needed
diff --git a/vendor/github.com/containers/storage/.codespellrc b/vendor/github.com/containers/storage/.codespellrc
new file mode 100644
index 000000000..2af969196
--- /dev/null
+++ b/vendor/github.com/containers/storage/.codespellrc
@@ -0,0 +1,3 @@
+[codespell]
+skip = ./.git,./vendor,./tests/tools/vendor,AUTHORS
+ignore-words-list = afile,flate,prevend,Plack,worl
diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml
index 20968466c..ec11f5dae 100644
--- a/vendor/github.com/containers/storage/.golangci.yml
+++ b/vendor/github.com/containers/storage/.golangci.yml
@@ -1,11 +1,7 @@
 ---
 run:
   concurrency: 6
-  deadline: 5m
-  skip-dirs-use-default: true
+  timeout: 5m
 linters:
   enable:
     - gofumpt
-  disable:
-    - errcheck
-    - staticcheck
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index 7ee2642fc..518fd7f6e 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -32,6 +32,11 @@ BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
 GO ?= go
 TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
 
+# N/B: This value is managed by Renovate, manual changes are
+# possible, as long as they don't disturb the formatting
+# (i.e. DO NOT ADD A 'v' prefix!)
+GOLANGCI_LINT_VERSION := 1.64.5
+
 default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
 
 clean: ## remove all built files
@@ -41,7 +46,7 @@ containers-storage: ## build using gc on the host
 	$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
 
 codespell:
-	codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
+	codespell
 
 binary local-binary: containers-storage
 
@@ -53,6 +58,8 @@ local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
 		os=`echo $${target} | cut -f1 -d/` ; \
 		arch=`echo $${target} | cut -f2 -d/` ; \
 		suffix=$${os}.$${arch} ; \
+		echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) ./... ; \
+		env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) ./... || exit 1 ; \
 		echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
 		env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
 	done
@@ -72,7 +79,7 @@ local-validate validate: install.tools ## validate DCO on the host
 	@./hack/git-validation.sh
 
 install.tools:
-	$(MAKE) -C tests/tools
+	$(MAKE) -C tests/tools GOLANGCI_LINT_VERSION=$(GOLANGCI_LINT_VERSION)
 
 install.docs: docs
 	$(MAKE) -C docs install
diff --git a/vendor/github.com/containers/storage/OWNERS b/vendor/github.com/containers/storage/OWNERS
index c169581de..a90f7e312 100644
--- a/vendor/github.com/containers/storage/OWNERS
+++ b/vendor/github.com/containers/storage/OWNERS
@@ -1,32 +1,14 @@
 approvers:
-  - Luap99
-  - TomSweeneyRedHat
-  - cevich
-  - edsantiago
-  - flouthoc
   - giuseppe
-  - haircommander
   - kolyshkin
-  - mrunalp
   - mtrmac
   - nalind
   - rhatdan
-  - saschagrunert
-  - umohnani8
   - vrothberg
 reviewers:
-  - Luap99
+  - Honny1
   - TomSweeneyRedHat
-  - cevich
-  - edsantiago
   - flouthoc
-  - giuseppe
-  - haircommander
   - kolyshkin
   - mrunalp
-  - mtrmac
-  - nalind
-  - rhatdan
-  - saschagrunert
-  - umohnani8
   - vrothberg
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index b7921ae87..0af844be0 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.54.0
+1.57.2
diff --git a/vendor/github.com/containers/storage/check.go b/vendor/github.com/containers/storage/check.go
index e58084fc7..e8837ff95 100644
--- a/vendor/github.com/containers/storage/check.go
+++ b/vendor/github.com/containers/storage/check.go
@@ -8,6 +8,7 @@ import (
 	"os"
 	"path"
 	"path/filepath"
+	"slices"
 	"sort"
 	"strings"
 	"sync"
@@ -79,7 +80,7 @@ type CheckOptions struct {
 // layer to the contents that we'd expect it to have to ignore certain
 // discrepancies
 type checkIgnore struct {
-	ownership, timestamps, permissions bool
+	ownership, timestamps, permissions, filetype bool
 }
 
 // CheckMost returns a CheckOptions with mostly just "quick" checks enabled.
@@ -138,8 +139,10 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
 		if strings.Contains(o, "ignore_chown_errors=true") {
 			ignore.ownership = true
 		}
-		if strings.HasPrefix(o, "force_mask=") {
+		if strings.Contains(o, "force_mask=") {
+			ignore.ownership = true
 			ignore.permissions = true
+			ignore.filetype = true
 		}
 	}
 	for o := range s.pullOptions {
@@ -304,7 +307,14 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
 							archiveErr = err
 						}
 						// consume any trailer after the EOF marker
-						io.Copy(io.Discard, diffReader)
+						if _, err := io.Copy(io.Discard, diffReader); err != nil {
+							err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", layerID, err)
+							if isReadWrite {
+								report.Layers[layerID] = append(report.Layers[layerID], err)
+							} else {
+								report.ROLayers[layerID] = append(report.ROLayers[layerID], err)
+							}
+						}
 						wg.Done()
 					}(id, reader)
 					wg.Wait()
@@ -366,7 +376,7 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
 			if options.LayerMountable {
 				func() {
 					// Mount the layer.
-					mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel})
+					mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel, Options: []string{"ro"}})
 					if err != nil {
 						err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
 						if isReadWrite {
@@ -762,12 +772,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
 		return d
 	}
 	isUnaccounted := func(errs []error) bool {
-		for _, err := range errs {
-			if errors.Is(err, ErrLayerUnaccounted) {
-				return true
-			}
-		}
-		return false
+		return slices.ContainsFunc(errs, func(err error) bool {
+			return errors.Is(err, ErrLayerUnaccounted)
+		})
 	}
 	sort.Slice(layersToDelete, func(i, j int) bool {
 		// we've not heard of either of them, so remove them in the order the driver suggested
@@ -828,7 +835,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
 // compareFileInfo returns a string summarizing what's different between the two checkFileInfos
 func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string {
 	var comparison []string
-	if a.typeflag != b.typeflag {
+	if a.typeflag != b.typeflag && !ignore.filetype {
 		comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag))
 	}
 	if idmap != nil && !idmap.Empty() {
@@ -955,6 +962,9 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6
 					mtime:    mtime,
 				}
 			}
+		case tar.TypeXGlobalHeader:
+			// ignore, since even though it looks like a valid pathname, it doesn't end
+			// up on the filesystem
 		default:
 			// treat these as TypeReg items
 			delete(c.directory, components[0])
@@ -966,9 +976,6 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6
 				mode:     mode,
 				mtime:    mtime,
 			}
-		case tar.TypeXGlobalHeader:
-			// ignore, since even though it looks like a valid pathname, it doesn't end
-			// up on the filesystem
 		}
 		return
 	}
@@ -998,12 +1005,12 @@ func (c *checkDirectory) remove(path string) {
 func (c *checkDirectory) header(hdr *tar.Header) {
 	name := path.Clean(hdr.Name)
 	dir, base := path.Split(name)
-	if strings.HasPrefix(base, archive.WhiteoutPrefix) {
+	if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
 		if base == archive.WhiteoutOpaqueDir {
 			c.remove(path.Clean(dir))
 			c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix())
 		} else {
-			c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):]))
+			c.remove(path.Join(dir, file))
 		}
 	} else {
 		if hdr.Typeflag == tar.TypeLink {
@@ -1037,7 +1044,7 @@ func (c *checkDirectory) header(hdr *tar.Header) {
 
 // headers updates a checkDirectory using information from the passed-in header slice
 func (c *checkDirectory) headers(hdrs []*tar.Header) {
-	hdrs = append([]*tar.Header{}, hdrs...)
+	hdrs = slices.Clone(hdrs)
 	// sort the headers from the diff to ensure that whiteouts appear
 	// before content when they both appear in the same directory, per
 	// https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index a7dfb405b..143fde297 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"slices"
 	"sync"
 	"time"
 
@@ -162,17 +163,17 @@ type containerStore struct {
 func copyContainer(c *Container) *Container {
 	return &Container{
 		ID:             c.ID,
-		Names:          copyStringSlice(c.Names),
+		Names:          copySlicePreferringNil(c.Names),
 		ImageID:        c.ImageID,
 		LayerID:        c.LayerID,
 		Metadata:       c.Metadata,
-		BigDataNames:   copyStringSlice(c.BigDataNames),
-		BigDataSizes:   copyStringInt64Map(c.BigDataSizes),
-		BigDataDigests: copyStringDigestMap(c.BigDataDigests),
+		BigDataNames:   copySlicePreferringNil(c.BigDataNames),
+		BigDataSizes:   copyMapPreferringNil(c.BigDataSizes),
+		BigDataDigests: copyMapPreferringNil(c.BigDataDigests),
 		Created:        c.Created,
-		UIDMap:         copyIDMap(c.UIDMap),
-		GIDMap:         copyIDMap(c.GIDMap),
-		Flags:          copyStringInterfaceMap(c.Flags),
+		UIDMap:         copySlicePreferringNil(c.UIDMap),
+		GIDMap:         copySlicePreferringNil(c.GIDMap),
+		Flags:          copyMapPreferringNil(c.Flags),
 		volatileStore:  c.volatileStore,
 	}
 }
@@ -690,13 +691,13 @@ func (r *containerStore) create(id string, names []string, image, layer string,
 		BigDataSizes:   make(map[string]int64),
 		BigDataDigests: make(map[string]digest.Digest),
 		Created:        time.Now().UTC(),
-		Flags:          copyStringInterfaceMap(options.Flags),
-		UIDMap:         copyIDMap(options.UIDMap),
-		GIDMap:         copyIDMap(options.GIDMap),
+		Flags:          newMapFrom(options.Flags),
+		UIDMap:         copySlicePreferringNil(options.UIDMap),
+		GIDMap:         copySlicePreferringNil(options.GIDMap),
 		volatileStore:  options.Volatile,
 	}
 	if options.MountOpts != nil {
-		container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
+		container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts)
 	}
 	if options.Volatile {
 		container.Flags[volatileFlag] = true
@@ -788,13 +789,6 @@ func (r *containerStore) Delete(id string) error {
 		return ErrContainerUnknown
 	}
 	id = container.ID
-	toDeleteIndex := -1
-	for i, candidate := range r.containers {
-		if candidate.ID == id {
-			toDeleteIndex = i
-			break
-		}
-	}
 	delete(r.byid, id)
 	// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
 	// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
@@ -803,14 +797,9 @@ func (r *containerStore) Delete(id string) error {
 	for _, name := range container.Names {
 		delete(r.byname, name)
 	}
-	if toDeleteIndex != -1 {
-		// delete the container at toDeleteIndex
-		if toDeleteIndex == len(r.containers)-1 {
-			r.containers = r.containers[:len(r.containers)-1]
-		} else {
-			r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
-		}
-	}
+	r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool {
+		return candidate.ID == id
+	})
 	if err := r.saveFor(container); err != nil {
 		return err
 	}
@@ -916,7 +905,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
 	if !ok {
 		return nil, ErrContainerUnknown
 	}
-	return copyStringSlice(c.BigDataNames), nil
+	return copySlicePreferringNil(c.BigDataNames), nil
 }
 
 // Requires startWriting.
@@ -948,14 +937,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
 		if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
 			save = true
 		}
-		addName := true
-		for _, name := range c.BigDataNames {
-			if name == key {
-				addName = false
-				break
-			}
-		}
-		if addName {
+		if !slices.Contains(c.BigDataNames, key) {
 			c.BigDataNames = append(c.BigDataNames, key)
 			save = true
 		}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index e00314d3f..d03934263 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 /*
 
@@ -30,7 +29,6 @@ import (
 	"io"
 	"io/fs"
 	"os"
-	"os/exec"
 	"path"
 	"path/filepath"
 	"strings"
@@ -75,8 +73,6 @@ func init() {
 type Driver struct {
 	sync.Mutex
 	root          string
-	uidMaps       []idtools.IDMap
-	gidMaps       []idtools.IDMap
 	ctr           *graphdriver.RefCounter
 	pathCacheLock sync.Mutex
 	pathCache     map[string]string
@@ -129,22 +125,16 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 
 	a := &Driver{
 		root:         home,
-		uidMaps:      options.UIDMaps,
-		gidMaps:      options.GIDMaps,
 		pathCache:    make(map[string]string),
 		ctr:          graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
 		locker:       locker.New(),
 		mountOptions: mountOptions,
 	}
 
-	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
-	if err != nil {
-		return nil, err
-	}
 	// Create the root aufs driver dir and return
 	// if it already exists
 	// If not populate the dir structure
-	if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil {
+	if err := os.MkdirAll(home, 0o700); err != nil {
 		if os.IsExist(err) {
 			return a, nil
 		}
@@ -157,7 +147,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 
 	// Populate the dir structure
 	for _, p := range paths {
-		if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil {
+		if err := os.MkdirAll(path.Join(home, p), 0o700); err != nil {
 			return nil, err
 		}
 	}
@@ -191,13 +181,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 }
 
 // Return a nil error if the kernel supports aufs
-// We cannot modprobe because inside dind modprobe fails
-// to run
 func supportsAufs() error {
-	// We can try to modprobe aufs first before looking at
-	// proc/filesystems for when aufs is supported
-	exec.Command("modprobe", "aufs").Run()
-
 	if unshare.IsRootless() {
 		return ErrAufsNested
 	}
@@ -334,7 +318,7 @@ func (a *Driver) createDirsFor(id, parent string) error {
 	// The path of directories are <aufs_root_path>/mnt/<image_id>
 	// and <aufs_root_path>/diff/<image_id>
 	for _, p := range paths {
-		rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair()
+		rootPair := idtools.IDPair{UID: 0, GID: 0}
 		rootPerms := defaultPerms
 		if parent != "" {
 			st, err := system.Stat(path.Join(a.rootPath(), p, parent))
@@ -355,7 +339,9 @@ func (a *Driver) createDirsFor(id, parent string) error {
 // Remove will unmount and remove the given id.
 func (a *Driver) Remove(id string) error {
 	a.locker.Lock(id)
-	defer a.locker.Unlock(id)
+	defer func() {
+		_ = a.locker.Unlock(id)
+	}()
 	a.pathCacheLock.Lock()
 	mountpoint, exists := a.pathCache[id]
 	a.pathCacheLock.Unlock()
@@ -446,7 +432,10 @@ func atomicRemove(source string) error {
 // This will mount the dir at its given path
 func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
 	a.locker.Lock(id)
-	defer a.locker.Unlock(id)
+	defer func() {
+		_ = a.locker.Unlock(id)
+	}()
+
 	parents, err := a.getParentLayerPaths(id)
 	if err != nil && !os.IsNotExist(err) {
 		return "", err
@@ -483,7 +472,10 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
 // Put unmounts and updates list of active mounts.
 func (a *Driver) Put(id string) error {
 	a.locker.Lock(id)
-	defer a.locker.Unlock(id)
+	defer func() {
+		_ = a.locker.Unlock(id)
+	}()
+
 	a.pathCacheLock.Lock()
 	m, exists := a.pathCache[id]
 	if !exists {
@@ -506,7 +498,9 @@ func (a *Driver) Put(id string) error {
 // For AUFS, it queries the mountpoint for this ID.
 func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
 	a.locker.Lock(id)
-	defer a.locker.Unlock(id)
+	defer func() {
+		_ = a.locker.Unlock(id)
+	}()
 	a.pathCacheLock.Lock()
 	m, exists := a.pathCache[id]
 	if !exists {
@@ -689,7 +683,9 @@ func (a *Driver) Cleanup() error {
 func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
 	defer func() {
 		if err != nil {
-			Unmount(target)
+			if err1 := Unmount(target); err1 != nil {
+				logrus.Warnf("Unmount %q: %v", target, err1)
+			}
 		}
 	}()
 
@@ -780,3 +776,8 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
 func (a *Driver) SupportsShifting() bool {
 	return false
 }
+
+// Dedup performs deduplication of the driver's storage.
+func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
+	return graphdriver.DedupResult{}, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
index 27e621633..9587bf63c 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package aufs
 
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go
index 156f4a4f0..51b3d6dfa 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/mount.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package aufs
 
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 11ae56364..4a80339f4 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -1,5 +1,4 @@
 //go:build linux && cgo
-// +build linux,cgo
 
 package btrfs
 
@@ -66,11 +65,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 		return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites)
 	}
 
-	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
-	if err != nil {
-		return nil, err
-	}
-	if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil {
+	if err := os.MkdirAll(filepath.Join(home, "subvolumes"), 0o700); err != nil {
 		return nil, err
 	}
 
@@ -85,8 +80,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 
 	driver := &Driver{
 		home:    home,
-		uidMaps: options.UIDMaps,
-		gidMaps: options.GIDMaps,
 		options: opt,
 	}
 
@@ -129,8 +122,6 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
 type Driver struct {
 	// root of the file system
 	home         string
-	uidMaps      []idtools.IDMap
-	gidMaps      []idtools.IDMap
 	options      btrfsOptions
 	quotaEnabled bool
 	once         sync.Once
@@ -481,11 +472,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
 func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
 	quotas := d.quotasDir()
 	subvolumes := d.subvolumesDir()
-	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
-	if err != nil {
-		return err
-	}
-	if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil {
+	if err := os.MkdirAll(subvolumes, 0o700); err != nil {
 		return err
 	}
 	if parent == "" {
@@ -523,7 +510,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
 		if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
 			return err
 		}
-		if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil {
+		if err := os.MkdirAll(quotas, 0o700); err != nil {
 			return err
 		}
 		if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil {
@@ -531,14 +518,6 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
 		}
 	}
 
-	// if we have a remapped root (user namespaces enabled), change the created snapshot
-	// dir ownership to match
-	if rootUID != 0 || rootGID != 0 {
-		if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
-			return err
-		}
-	}
-
 	mountLabel := ""
 	if opts != nil {
 		mountLabel = opts.MountLabel
@@ -694,3 +673,8 @@ func (d *Driver) ListLayers() ([]string, error) {
 func (d *Driver) AdditionalImageStores() []string {
 	return nil
 }
+
+// Dedup performs deduplication of the driver's storage.
+func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
+	return graphdriver.DedupResult{}, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
index c7d9d3b84..a4d77eaad 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go
@@ -1,4 +1,3 @@
 //go:build !linux || !cgo
-// +build !linux !cgo
 
 package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go
index 5816139f3..4f5d8a5b9 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/version.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go
@@ -1,5 +1,4 @@
 //go:build linux && !btrfs_noversion && cgo
-// +build linux,!btrfs_noversion,cgo
 
 package btrfs
 
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
index a61d8fbd9..58c1b0d0c 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
@@ -1,5 +1,4 @@
 //go:build linux && btrfs_noversion && cgo
-// +build linux,btrfs_noversion,cgo
 
 package btrfs
 
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index ca43c3f05..d728e919b 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -4,11 +4,12 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
+	"io/fs"
 	"os"
 
 	"github.com/containers/storage/pkg/idtools"
 	"github.com/containers/storage/pkg/reexec"
-	"github.com/opencontainers/selinux/pkg/pwalk"
+	"github.com/opencontainers/selinux/pkg/pwalkdir"
 )
 
 const (
@@ -54,13 +55,14 @@ func chownByMapsMain() {
 
 	chowner := newLChowner()
 
-	chown := func(path string, info os.FileInfo, _ error) error {
-		if path == "." {
+	var chown fs.WalkDirFunc = func(path string, d fs.DirEntry, _ error) error {
+		info, err := d.Info()
+		if path == "." || err != nil {
 			return nil
 		}
 		return chowner.LChown(path, info, toHost, toContainer)
 	}
-	if err := pwalk.Walk(".", chown); err != nil {
+	if err := pwalkdir.Walk(".", chown); err != nil {
 		fmt.Fprintf(os.Stderr, "error during chown: %v", err)
 		os.Exit(1)
 	}
diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go
index d6150ceee..4f2750207 100644
--- a/vendor/github.com/containers/storage/drivers/chown_darwin.go
+++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go
@@ -1,5 +1,4 @@
 //go:build darwin
-// +build darwin
 
 package graphdriver
 
@@ -84,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
 	}
 	if uid != int(st.Uid) || gid != int(st.Gid) {
 		capability, err := system.Lgetxattr(path, "security.capability")
-		if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
+		if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
 			return fmt.Errorf("%s: %w", os.Args[0], err)
 		}
 
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 42c12c627..b0c25cd99 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows && !darwin
-// +build !windows,!darwin
 
 package graphdriver
 
@@ -102,7 +101,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
 	}
 	if uid != int(st.Uid) || gid != int(st.Gid) {
 		cap, err := system.Lgetxattr(path, "security.capability")
-		if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
+		if err != nil && !errors.Is(err, system.ENOTSUP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
 			return fmt.Errorf("%s: %w", os.Args[0], err)
 		}
 
diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go
index 06ccf9fa4..6c2bd2ca2 100644
--- a/vendor/github.com/containers/storage/drivers/chown_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chown_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package graphdriver
 
diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go
index 9a1c6751f..2aa3e9e6b 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go
@@ -1,5 +1,4 @@
-//go:build linux || darwin || freebsd || solaris
-// +build linux darwin freebsd solaris
+//go:build !windows
 
 package graphdriver
 
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index 9c3d7c668..93fc0a326 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -1,5 +1,4 @@
 //go:build cgo
-// +build cgo
 
 package copy
 
@@ -17,7 +16,6 @@ import (
 	"errors"
 	"fmt"
 	"io"
-	"net"
 	"os"
 	"path/filepath"
 	"strings"
@@ -50,13 +48,13 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
 	defer srcFile.Close()
 
 	if *copyWithFileClone {
-		_, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
-		if err == nil {
+		_, _, errno := unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
+		if errno == 0 {
 			return nil
 		}
 
 		*copyWithFileClone = false
-		if err == unix.EXDEV {
+		if errno == unix.EXDEV {
 			*copyWithFileRange = false
 		}
 	}
@@ -108,7 +106,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
 
 func copyXattr(srcPath, dstPath, attr string) error {
 	data, err := system.Lgetxattr(srcPath, attr)
-	if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
+	if err != nil && !errors.Is(err, system.ENOTSUP) {
 		return err
 	}
 	if data != nil {
@@ -200,11 +198,9 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
 			}
 
 		case mode&os.ModeSocket != 0:
-			s, err := net.Listen("unix", dstPath)
-			if err != nil {
+			if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
 				return err
 			}
-			s.Close()
 
 		case mode&os.ModeDevice != 0:
 			if unshare.IsRootless() {
@@ -283,7 +279,7 @@ func doCopyXattrs(srcPath, dstPath string) error {
 	}
 
 	xattrs, err := system.Llistxattr(srcPath)
-	if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
+	if err != nil && !errors.Is(err, system.ENOTSUP) {
 		return err
 	}
 
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
index 5a4629b74..baaa86ddc 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux || !cgo
-// +build !linux !cgo
 
 package copy //nolint: predeclared
 
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index b42ba0757..1f7ac5ff0 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -8,6 +8,7 @@ import (
 	"path/filepath"
 	"strings"
 
+	"github.com/containers/storage/internal/dedup"
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/directory"
 	"github.com/containers/storage/pkg/fileutils"
@@ -81,6 +82,23 @@ type ApplyDiffWithDifferOpts struct {
 	Flags map[string]interface{}
 }
 
+// DedupArgs contains the information to perform storage deduplication.
+type DedupArgs struct {
+	// Layers is the list of layers to deduplicate.
+	Layers []string
+
+	// Options that are passed directly to the pkg/dedup.DedupDirs function.
+	Options dedup.DedupOptions
+}
+
+// DedupResult contains the result of the Dedup() call.
+type DedupResult struct {
+	// Deduped represents the total number of bytes saved by deduplication.
+	// This value accounts also for all previously deduplicated data, not only the savings
+	// from the last run.
+	Deduped uint64
+}
+
 // InitFunc initializes the storage driver.
 type InitFunc func(homedir string, options Options) (Driver, error)
 
@@ -139,6 +157,8 @@ type ProtoDriver interface {
 	// AdditionalImageStores returns additional image stores supported by the driver
 	// This API is experimental and can be changed without bumping the major version number.
 	AdditionalImageStores() []string
+	// Dedup performs deduplication of the driver's storage.
+	Dedup(DedupArgs) (DedupResult, error)
 }
 
 // DiffDriver is the interface to use to implement graph diffs
@@ -189,13 +209,14 @@ type Driver interface {
 type DriverWithDifferOutput struct {
 	Differ             Differ
 	Target             string
-	Size               int64
+	Size               int64 // Size of the uncompressed layer, -1 if unknown. Must be known if UncompressedDigest is set.
 	UIDs               []uint32
 	GIDs               []uint32
 	UncompressedDigest digest.Digest
+	CompressedDigest   digest.Digest
 	Metadata           string
 	BigData            map[string][]byte
-	TarSplit           []byte
+	TarSplit           []byte // nil if not available
 	TOCDigest          digest.Digest
 	// RootDirMode is the mode of the root directory of the layer, if specified.
 	RootDirMode *os.FileMode
@@ -210,25 +231,30 @@ const (
 	// DifferOutputFormatDir means the output is a directory and it will
 	// keep the original layout.
 	DifferOutputFormatDir = iota
-	// DifferOutputFormatFlat will store the files by their checksum, in the form
-	// checksum[0:2]/checksum[2:]
+	// DifferOutputFormatFlat will store the files by their checksum, per
+	// pkg/chunked/internal/composefs.RegularFilePathForValidatedDigest.
 	DifferOutputFormatFlat
 )
 
+// DifferFsVerity is a part of the experimental Differ interface and should not be used from outside of c/storage.
+// It configures the fsverity requirement.
 type DifferFsVerity int
 
 const (
 	// DifferFsVerityDisabled means no fs-verity is used
 	DifferFsVerityDisabled = iota
 
-	// DifferFsVerityEnabled means fs-verity is used when supported
-	DifferFsVerityEnabled
+	// DifferFsVerityIfAvailable means fs-verity is used when supported by
+	// the underlying kernel and filesystem.
+	DifferFsVerityIfAvailable
 
-	// DifferFsVerityRequired means fs-verity is required
+	// DifferFsVerityRequired means fs-verity is required.  Note this is not
+	// currently set or exposed by the overlay driver.
 	DifferFsVerityRequired
 )
 
-// DifferOptions overrides how the differ work
+// DifferOptions is a part of the experimental Differ interface and should not be used from outside of c/storage.
+// It overrides how the differ works.
 type DifferOptions struct {
 	// Format defines the destination directory layout format
 	Format DifferOutputFormat
@@ -248,8 +274,8 @@ type Differ interface {
 type DriverWithDiffer interface {
 	Driver
 	// ApplyDiffWithDiffer applies the changes using the callback function.
-	// If id is empty, then a staging directory is created.  The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
-	ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
+	// The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory.
+	ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
 	// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
 	ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
 	// CleanupStagingDirectory cleanups the staging directory.  It can be used to cleanup the staging directory on errors
@@ -377,8 +403,6 @@ type Options struct {
 	ImageStore          string
 	DriverPriority      []string
 	DriverOptions       []string
-	UIDMaps             []idtools.IDMap
-	GIDMaps             []idtools.IDMap
 	ExperimentalEnabled bool
 }
 
@@ -492,7 +516,7 @@ func driverPut(driver ProtoDriver, id string, mainErr *error) {
 		if *mainErr == nil {
 			*mainErr = err
 		} else {
-			logrus.Errorf(err.Error())
+			logrus.Error(err)
 		}
 	}
 }
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go
index ee0fc7bfc..d730dc38a 100644
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package graphdriver
 
diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go
index 6b6373a37..47749c6ef 100644
--- a/vendor/github.com/containers/storage/drivers/driver_solaris.go
+++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go
@@ -1,5 +1,4 @@
 //go:build solaris && cgo
-// +build solaris,cgo
 
 package graphdriver
 
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
index 7dfbef007..dcf169b4d 100644
--- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux && !windows && !freebsd && !solaris && !darwin
-// +build !linux,!windows,!freebsd,!solaris,!darwin
 
 package graphdriver
 
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index fba9ec4fc..e500585ff 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -128,6 +128,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
 
 	options := MountOpts{
 		MountLabel: mountLabel,
+		Options:    []string{"ro"},
 	}
 	layerFs, err := driver.Get(id, options)
 	if err != nil {
@@ -138,10 +139,6 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
 	parentFs := ""
 
 	if parent != "" {
-		options := MountOpts{
-			MountLabel: mountLabel,
-			Options:    []string{"ro"},
-		}
 		parentFs, err = driver.Get(parent, options)
 		if err != nil {
 			return nil, err
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index d8139f656..527701746 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlay
 
@@ -263,7 +262,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
 	if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
 		return false, fmt.Errorf("create mapped mount: %w", err)
 	}
-	defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
+	defer func() {
+		if err := unix.Unmount(lowerMappedDir, unix.MNT_DETACH); err != nil {
+			logrus.Warnf("Unmount %q: %v", lowerMappedDir, err)
+		}
+	}()
 
 	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
 	flags := uintptr(0)
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
index bec455dd4..5cbf5e1ce 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check_116.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlay
 
@@ -11,7 +10,6 @@ import (
 
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/system"
-	"golang.org/x/sys/unix"
 )
 
 func scanForMountProgramIndicators(home string) (detected bool, err error) {
@@ -29,7 +27,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
 		}
 		if d.IsDir() {
 			xattrs, err := system.Llistxattr(path)
-			if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
+			if err != nil && !errors.Is(err, system.ENOTSUP) {
 				return err
 			}
 			for _, xattr := range xattrs {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go
index 8f07c2360..797e3646e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go
@@ -1,5 +1,4 @@
-//go:build linux && cgo
-// +build linux,cgo
+//go:build linux
 
 package overlay
 
@@ -13,6 +12,7 @@ import (
 	"path/filepath"
 	"strings"
 	"sync"
+	"sync/atomic"
 
 	"github.com/containers/storage/pkg/chunked/dump"
 	"github.com/containers/storage/pkg/fsverity"
@@ -25,6 +25,10 @@ var (
 	composeFsHelperOnce sync.Once
 	composeFsHelperPath string
 	composeFsHelperErr  error
+
+	// skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel
+	// does not support it.  Mounting directly via a file is supported from Linux 6.12.
+	skipMountViaFile atomic.Bool
 )
 
 func getComposeFsHelper() (string, error) {
@@ -54,29 +58,26 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
 		return fmt.Errorf("failed to find mkcomposefs: %w", err)
 	}
 
-	fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
+	outFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o644)
 	if err != nil {
-		return fmt.Errorf("failed to open output file %q: %w", destFile, err)
+		return err
 	}
-	outFd := os.NewFile(uintptr(fd), "outFd")
 
-	fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0)
+	roFile, err := os.Open(fmt.Sprintf("/proc/self/fd/%d", outFile.Fd()))
 	if err != nil {
-		outFd.Close()
-		return fmt.Errorf("failed to dup output file: %w", err)
+		outFile.Close()
+		return fmt.Errorf("failed to reopen %s as read-only: %w", destFile, err)
 	}
-	newFd := os.NewFile(uintptr(fd), "newFd")
-	defer newFd.Close()
 
 	err = func() error {
-		// a scope to close outFd before setting fsverity on the read-only fd.
-		defer outFd.Close()
+		// a scope to close outFile before setting fsverity on the read-only fd.
+		defer outFile.Close()
 
 		errBuf := &bytes.Buffer{}
-		cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3")
-		cmd.ExtraFiles = []*os.File{outFd}
+		cmd := exec.Command(writerJson, "--from-file", "-", "-")
 		cmd.Stderr = errBuf
 		cmd.Stdin = dumpReader
+		cmd.Stdout = outFile
 		if err := cmd.Run(); err != nil {
 			rErr := fmt.Errorf("failed to convert json to erofs: %w", err)
 			exitErr := &exec.ExitError{}
@@ -91,7 +92,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
 		return err
 	}
 
-	if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
+	if err := fsverity.EnableVerity("manifest file", int(roFile.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
 		logrus.Warningf("%s", err)
 	}
 
@@ -113,42 +114,112 @@ struct lcfs_erofs_header_s {
 
 // hasACL returns true if the erofs blob has ACLs enabled
 func hasACL(path string) (bool, error) {
-	const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
-
-	fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
+	const (
+		LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
+		versionNumberSize        = 4
+		magicNumberSize          = 4
+		flagsSize                = 4
+	)
+
+	file, err := os.Open(path)
 	if err != nil {
 		return false, err
 	}
-	defer unix.Close(fd)
+	defer file.Close()
+
 	// do not worry about checking the magic number, if the file is invalid
 	// we will fail to mount it anyway
-	flags := make([]byte, 4)
-	nread, err := unix.Pread(fd, flags, 8)
+	buffer := make([]byte, versionNumberSize+magicNumberSize+flagsSize)
+	nread, err := file.Read(buffer)
 	if err != nil {
 		return false, err
 	}
-	if nread != 4 {
+	if nread != len(buffer) {
 		return false, fmt.Errorf("failed to read flags from %q", path)
 	}
+	flags := buffer[versionNumberSize+magicNumberSize:]
 	return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil
 }
 
-func mountComposefsBlob(dataDir, mountPoint string) error {
-	blobFile := getComposefsBlob(dataDir)
-	loop, err := loopback.AttachLoopDeviceRO(blobFile)
+func openBlobFile(blobFile string, hasACL, useLoopDevice bool) (int, error) {
+	if useLoopDevice {
+		loop, err := loopback.AttachLoopDeviceRO(blobFile)
+		if err != nil {
+			return -1, err
+		}
+		defer loop.Close()
+
+		blobFile = loop.Name()
+	}
+
+	fsfd, err := unix.Fsopen("erofs", 0)
 	if err != nil {
-		return err
+		return -1, fmt.Errorf("failed to open erofs filesystem: %w", err)
+	}
+	defer unix.Close(fsfd)
+
+	if err := unix.FsconfigSetString(fsfd, "source", blobFile); err != nil {
+		return -1, fmt.Errorf("failed to set source for erofs filesystem: %w", err)
+	}
+
+	if err := unix.FsconfigSetFlag(fsfd, "ro"); err != nil {
+		return -1, fmt.Errorf("failed to set erofs filesystem read-only: %w", err)
+	}
+
+	if !hasACL {
+		if err := unix.FsconfigSetFlag(fsfd, "noacl"); err != nil {
+			return -1, fmt.Errorf("failed to set noacl for erofs filesystem: %w", err)
+		}
+	}
+
+	if err := unix.FsconfigCreate(fsfd); err != nil {
+		buffer := make([]byte, 4096)
+		if n, _ := unix.Read(fsfd, buffer); n > 0 {
+			return -1, fmt.Errorf("failed to create erofs filesystem: %s: %w", strings.TrimSuffix(string(buffer[:n]), "\n"), err)
+		}
+		return -1, fmt.Errorf("failed to create erofs filesystem: %w", err)
+	}
+
+	mfd, err := unix.Fsmount(fsfd, 0, unix.MOUNT_ATTR_RDONLY)
+	if err != nil {
+		buffer := make([]byte, 4096)
+		if n, _ := unix.Read(fsfd, buffer); n > 0 {
+			return -1, fmt.Errorf("failed to mount erofs filesystem: %s: %w", string(buffer[:n]), err)
+		}
+		return -1, fmt.Errorf("failed to mount erofs filesystem: %w", err)
 	}
-	defer loop.Close()
+	return mfd, nil
+}
+
+func openComposefsMount(dataDir string) (int, error) {
+	blobFile := getComposefsBlob(dataDir)
 
 	hasACL, err := hasACL(blobFile)
 	if err != nil {
-		return err
+		return -1, err
 	}
-	mountOpts := "ro"
-	if !hasACL {
-		mountOpts += ",noacl"
+
+	if !skipMountViaFile.Load() {
+		fd, err := openBlobFile(blobFile, hasACL, false)
+		if err == nil || !errors.Is(err, unix.ENOTBLK) {
+			return fd, err
+		}
+		logrus.Debugf("The current kernel doesn't support mounting EROFS directly from a file, fallback to a loopback device")
+		skipMountViaFile.Store(true)
+	}
+
+	return openBlobFile(blobFile, hasACL, true)
+}
+
+func mountComposefsBlob(dataDir, mountPoint string) error {
+	mfd, err := openComposefsMount(dataDir)
+	if err != nil {
+		return err
 	}
+	defer unix.Close(mfd)
 
-	return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts)
+	if err := unix.MoveMount(mfd, "", unix.AT_FDCWD, mountPoint, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
+		return fmt.Errorf("failed to move mount to %q: %w", mountPoint, err)
+	}
+	return nil
 }
diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
index bedda3507..ca32c62b6 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlay
 
diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go
index 8829e55e9..b3ddac022 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/mount.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlay
 
@@ -103,20 +102,20 @@ func mountOverlayFromMain() {
 	// paths, but we don't want to mess with other options.
 	var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
 	for _, arg := range strings.Split(options.Label, ",") {
-		kv := strings.SplitN(arg, "=", 2)
-		switch kv[0] {
+		key, val, _ := strings.Cut(arg, "=")
+		switch key {
 		case "upperdir":
 			upperk = "upperdir="
-			upperv = kv[1]
+			upperv = val
 		case "workdir":
 			workk = "workdir="
-			workv = kv[1]
+			workv = val
 		case "lowerdir":
 			lowerk = "lowerdir="
-			lowerv = kv[1]
+			lowerv = val
 		case "label":
 			labelk = "label="
-			labelv = kv[1]
+			labelv = val
 		default:
 			if others == "" {
 				others = arg
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 8b6f64b1e..56278805f 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlay
 
@@ -9,10 +8,12 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"io/fs"
 	"os"
 	"os/exec"
 	"path"
 	"path/filepath"
+	"slices"
 	"strconv"
 	"strings"
 	"sync"
@@ -21,6 +22,7 @@ import (
 	graphdriver "github.com/containers/storage/drivers"
 	"github.com/containers/storage/drivers/overlayutils"
 	"github.com/containers/storage/drivers/quota"
+	"github.com/containers/storage/internal/dedup"
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/chrootarchive"
 	"github.com/containers/storage/pkg/directory"
@@ -119,14 +121,13 @@ type Driver struct {
 	home             string
 	runhome          string
 	imageStore       string
-	uidMaps          []idtools.IDMap
-	gidMaps          []idtools.IDMap
 	ctr              *graphdriver.RefCounter
 	quotaCtl         *quota.Control
 	options          overlayOptions
 	naiveDiff        graphdriver.DiffDriver
 	supportsDType    bool
 	supportsVolatile *bool
+	supportsDataOnly *bool
 	usingMetacopy    bool
 	usingComposefs   bool
 
@@ -159,30 +160,7 @@ func init() {
 }
 
 func hasMetacopyOption(opts []string) bool {
-	for _, s := range opts {
-		if s == "metacopy=on" {
-			return true
-		}
-	}
-	return false
-}
-
-func stripOption(opts []string, option string) []string {
-	for i, s := range opts {
-		if s == option {
-			return stripOption(append(opts[:i], opts[i+1:]...), option)
-		}
-	}
-	return opts
-}
-
-func hasVolatileOption(opts []string) bool {
-	for _, s := range opts {
-		if s == "volatile" {
-			return true
-		}
-	}
-	return false
+	return slices.Contains(opts, "metacopy=on")
 }
 
 func getMountProgramFlagFile(path string) string {
@@ -295,6 +273,18 @@ func (d *Driver) getSupportsVolatile() (bool, error) {
 	return supportsVolatile, nil
 }
 
+func (d *Driver) getSupportsDataOnly() (bool, error) {
+	if d.supportsDataOnly != nil {
+		return *d.supportsDataOnly, nil
+	}
+	supportsDataOnly, err := supportsDataOnlyLayersCached(d.home, d.runhome)
+	if err != nil {
+		return false, err
+	}
+	d.supportsDataOnly = &supportsDataOnly
+	return supportsDataOnly, nil
+}
+
 // isNetworkFileSystem checks if the specified file system is supported by native overlay
 // as backing store when running in a user namespace.
 func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
@@ -332,13 +322,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 	backingFs = fsName
 
 	runhome := filepath.Join(options.RunRoot, filepath.Base(home))
-	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
-	if err != nil {
-		return nil, err
-	}
 
 	// Create the driver home dir
-	if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil {
+	if err := os.MkdirAll(path.Join(home, linkDir), 0o755); err != nil {
 		return nil, err
 	}
 
@@ -348,7 +334,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 		}
 	}
 
-	if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil {
+	if err := os.MkdirAll(runhome, 0o700); err != nil {
 		return nil, err
 	}
 
@@ -373,9 +359,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 			return nil, err
 		}
 	} else {
-		if opts.forceMask != nil {
-			return nil, errors.New("'force_mask' is supported only with 'mount_program'")
-		}
 		// check if they are running over btrfs, aufs, overlay, or ecryptfs
 		switch fsMagic {
 		case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
@@ -390,13 +373,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 		if unshare.IsRootless() {
 			return nil, fmt.Errorf("composefs is not supported in user namespaces")
 		}
-		supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome)
-		if err != nil {
-			return nil, err
-		}
-		if !supportsDataOnly {
-			return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS)
-		}
 		if _, err := getComposeFsHelper(); err != nil {
 			return nil, fmt.Errorf("composefs helper program not found: %w", err)
 		}
@@ -457,8 +433,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 		home:             home,
 		imageStore:       options.ImageStore,
 		runhome:          runhome,
-		uidMaps:          options.UIDMaps,
-		gidMaps:          options.GIDMaps,
 		ctr:              graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
 		supportsDType:    supportsDType,
 		usingMetacopy:    usingMetacopy,
@@ -616,7 +590,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
 			m := os.FileMode(mask)
 			o.forceMask = &m
 		default:
-			return nil, fmt.Errorf("overlay: Unknown option %s", key)
+			return nil, fmt.Errorf("overlay: unknown option %s", key)
 		}
 	}
 	return o, nil
@@ -698,12 +672,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
 }
 
 func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
-	// We can try to modprobe overlay first
-
 	selinuxLabelTest := selinux.PrivContainerMountLabel()
 
-	exec.Command("modprobe", "overlay").Run()
-
 	logLevel := logrus.ErrorLevel
 	if unshare.IsRootless() {
 		logLevel = logrus.DebugLevel
@@ -831,7 +801,9 @@ func (d *Driver) useNaiveDiff() bool {
 			logrus.Info(nativeDiffCacheText)
 			useNaiveDiffOnly = true
 		}
-		cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
+		if err := cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText); err != nil {
+			logrus.Warnf("Recording overlay native-diff support status: %v", err)
+		}
 	})
 	return useNaiveDiffOnly
 }
@@ -860,14 +832,14 @@ func (d *Driver) Status() [][2]string {
 // Metadata returns meta data about the overlay driver such as
 // LowerDir, UpperDir, WorkDir and MergeDir used to store data.
 func (d *Driver) Metadata(id string) (map[string]string, error) {
-	dir := d.dir(id)
+	dir, _, inAdditionalStore := d.dir2(id, false)
 	if err := fileutils.Exists(dir); err != nil {
 		return nil, err
 	}
 
 	metadata := map[string]string{
 		"WorkDir":   path.Join(dir, "work"),
-		"MergedDir": path.Join(dir, "merged"),
+		"MergedDir": d.getMergedDir(id, dir, inAdditionalStore),
 		"UpperDir":  path.Join(dir, "diff"),
 	}
 
@@ -903,11 +875,11 @@ func (d *Driver) pruneStagingDirectories() bool {
 
 	anyPresent := false
 
-	homeStagingDir := filepath.Join(d.home, stagingDir)
-	dirs, err := os.ReadDir(homeStagingDir)
+	stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
+	dirs, err := os.ReadDir(stagingDirBase)
 	if err == nil {
 		for _, dir := range dirs {
-			stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name())
+			stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name())
 			lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
 			if err != nil {
 				anyPresent = true
@@ -983,6 +955,10 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
 		}
 	}
 
+	if d.options.forceMask != nil && d.options.mountProgram == "" {
+		return fmt.Errorf("overlay: force_mask option for writeable layers is only supported with a mount_program")
+	}
+
 	if _, ok := opts.StorageOpt["size"]; !ok {
 		if opts.StorageOpt == nil {
 			opts.StorageOpt = map[string]string{}
@@ -1021,8 +997,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
 
 	disableQuota := readOnly
 
-	uidMaps := d.uidMaps
-	gidMaps := d.gidMaps
+	var uidMaps []idtools.IDMap
+	var gidMaps []idtools.IDMap
 
 	if opts != nil && opts.IDMappings != nil {
 		uidMaps = opts.IDMappings.UIDs()
@@ -1047,14 +1023,23 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
 	if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
 		return err
 	}
+
+	st := idtools.Stat{IDs: idPair, Mode: defaultPerms}
+
 	if parent != "" {
 		parentBase := d.dir(parent)
-		st, err := system.Stat(filepath.Join(parentBase, "diff"))
-		if err != nil {
-			return err
+		parentDiff := filepath.Join(parentBase, "diff")
+		if xSt, err := idtools.GetContainersOverrideXattr(parentDiff); err == nil {
+			st = xSt
+		} else {
+			systemSt, err := system.Stat(parentDiff)
+			if err != nil {
+				return err
+			}
+			st.IDs.UID = int(systemSt.UID())
+			st.IDs.GID = int(systemSt.GID())
+			st.Mode = os.FileMode(systemSt.Mode())
 		}
-		rootUID = int(st.UID())
-		rootGID = int(st.GID())
 	}
 
 	if err := fileutils.Lexists(dir); err == nil {
@@ -1100,22 +1085,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
 		}
 	}
 
-	perms := defaultPerms
+	forcedSt := st
 	if d.options.forceMask != nil {
-		perms = *d.options.forceMask
+		forcedSt.IDs = idPair
+		forcedSt.Mode = *d.options.forceMask
 	}
 
-	if parent != "" {
-		parentBase := d.dir(parent)
-		st, err := system.Stat(filepath.Join(parentBase, "diff"))
-		if err != nil {
-			return err
-		}
-		perms = os.FileMode(st.Mode())
+	diff := path.Join(dir, "diff")
+	if err := idtools.MkdirAs(diff, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
+		return err
 	}
 
-	if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
-		return err
+	if d.options.forceMask != nil {
+		st.Mode |= os.ModeDir
+		if err := idtools.SetContainersOverrideXattr(diff, st); err != nil {
+			return err
+		}
 	}
 
 	lid := generateID(idLength)
@@ -1130,16 +1115,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
 		return err
 	}
 
-	if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
+	if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
 		return err
 	}
-	if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
+	if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
 		return err
 	}
 
 	// if no parent directory, create a dummy lower directory and skip writing a "lowers" file
 	if parent == "" {
-		return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID)
+		return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID)
 	}
 
 	lower, err := d.getLower(parent)
@@ -1227,17 +1212,22 @@ func (d *Driver) getAllImageStores() []string {
 	return additionalImageStores
 }
 
-func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
-	var homedir string
-
-	if useImageStore && d.imageStore != "" {
-		homedir = path.Join(d.imageStore, d.name)
-	} else {
-		homedir = d.home
+// homeDirForImageStore returns the home directory to use when an image store is configured
+func (d *Driver) homeDirForImageStore() string {
+	if d.imageStore != "" {
+		return path.Join(d.imageStore, d.name)
 	}
+	// If there is not an image store configured, use the same
+	// store
+	return d.home
+}
 
+func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
+	homedir := d.home
+	if useImageStore {
+		homedir = d.homeDirForImageStore()
+	}
 	newpath := path.Join(homedir, id)
-
 	if err := fileutils.Exists(newpath); err != nil {
 		for _, p := range d.getAllImageStores() {
 			l := path.Join(p, d.name, id)
@@ -1283,12 +1273,6 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
 }
 
 func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string {
-	if uidMaps == nil {
-		uidMaps = d.uidMaps
-	}
-	if gidMaps == nil {
-		gidMaps = d.gidMaps
-	}
 	if uidMaps != nil {
 		var uids, gids bytes.Buffer
 		if len(uidMaps) == 1 && uidMaps[0].Size == 1 {
@@ -1461,6 +1445,38 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 	if err := fileutils.Exists(dir); err != nil {
 		return "", err
 	}
+	if _, err := redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), true); err != nil {
+		return "", err
+	}
+
+	// user namespace requires this to move a directory from lower to upper.
+	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
+	if err != nil {
+		return "", err
+	}
+
+	mergedDir := d.getMergedDir(id, dir, inAdditionalStore)
+	// Attempt to create the merged dir if it doesn't exist, but don't chown an already existing directory (it might be in an additional store)
+	if err := idtools.MkdirAllAndChownNew(mergedDir, 0o700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil && !os.IsExist(err) {
+		return "", err
+	}
+
+	if count := d.ctr.Increment(mergedDir); count > 1 {
+		return mergedDir, nil
+	}
+	defer func() {
+		if retErr != nil {
+			if c := d.ctr.Decrement(mergedDir); c <= 0 {
+				if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
+					// Ignore EINVAL, it means the directory is not a mount point and it can happen
+					// if the current function fails before the mount point is created.
+					if !errors.Is(mntErr, unix.EINVAL) {
+						logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
+					}
+				}
+			}
+		}
+	}()
 
 	readWrite := !inAdditionalStore
 
@@ -1498,19 +1514,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 				if err := unix.Uname(&uts); err == nil {
 					release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
 				}
-				logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release)
+				logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel %s", release)
 			} else {
 				logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
 			}
 		}
-		optsList = stripOption(optsList, "metacopy=on")
+		optsList = slices.DeleteFunc(optsList, func(opt string) bool {
+			return opt == "metacopy=on"
+		})
 	}
 
-	for _, o := range optsList {
-		if o == "ro" {
-			readWrite = false
-			break
-		}
+	if slices.Contains(optsList, "ro") {
+		readWrite = false
 	}
 
 	lowers, err := os.ReadFile(path.Join(dir, lowerFile))
@@ -1539,11 +1554,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 	for err == nil {
 		absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
 		diffN++
-		st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
-		if err == nil && !permsKnown {
-			perms = os.FileMode(st.Mode())
-			permsKnown = true
-		}
+		err = fileutils.Exists(filepath.Join(dir, nameWithSuffix("diff", diffN)))
 	}
 
 	idmappedMountProcessPid := -1
@@ -1561,12 +1572,15 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 	composefsMounts := []string{}
 	defer func() {
 		for _, m := range composefsMounts {
-			defer unix.Unmount(m, unix.MNT_DETACH)
+			defer func(m string) {
+				if err := unix.Unmount(m, unix.MNT_DETACH); err != nil {
+					logrus.Warnf("Unmount %q: %v", m, err)
+				}
+			}(m)
 		}
 	}()
 
 	composeFsLayers := []string{}
-	composeFsLayersDir := filepath.Join(dir, "composefs-layers")
 	maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
 		composefsBlob := d.getComposefsData(lowerID)
 		if err := fileutils.Exists(composefsBlob); err != nil {
@@ -1581,7 +1595,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 			return "", fmt.Errorf("cannot mount a composefs layer as writeable")
 		}
 
-		dest := filepath.Join(composeFsLayersDir, fmt.Sprintf("%d", i))
+		dest := d.getStorePrivateDirectory(id, dir, fmt.Sprintf("composefs-layers/%d", i), inAdditionalStore)
 		if err := os.MkdirAll(dest, 0o700); err != nil {
 			return "", err
 		}
@@ -1665,7 +1679,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 				skipIDMappingLayers[composefsMount] = composefsMount
 				// overlay takes a reference on the mount, so it is safe to unmount
 				// the mapped idmounts as soon as the final overlay file system is mounted.
-				defer unix.Unmount(composefsMount, unix.MNT_DETACH)
+				defer func() {
+					if err := unix.Unmount(composefsMount, unix.MNT_DETACH); err != nil {
+						logrus.Warnf("Unmount %q: %v", composefsMount, err)
+					}
+				}()
 			}
 			absLowers = append(absLowers, composefsMount)
 			continue
@@ -1685,12 +1703,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 		optsList = append(optsList, "metacopy=on", "redirect_dir=on")
 	}
 
-	// user namespace requires this to move a directory from lower to upper.
-	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
-	if err != nil {
-		return "", err
-	}
-
 	if len(absLowers) == 0 {
 		absLowers = append(absLowers, path.Join(dir, "empty"))
 	}
@@ -1705,33 +1717,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 		}
 	}
 
-	mergedDir := path.Join(dir, "merged")
-	// Attempt to create the merged dir only if it doesn't exist.
-	if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) {
-		if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
-			return "", err
-		}
-	}
-	if count := d.ctr.Increment(mergedDir); count > 1 {
-		return mergedDir, nil
-	}
-	defer func() {
-		if retErr != nil {
-			if c := d.ctr.Decrement(mergedDir); c <= 0 {
-				if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
-					logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
-				}
-			}
-		}
-	}()
-
 	workdir := path.Join(dir, "work")
 
 	if d.options.mountProgram == "" && unshare.IsRootless() {
 		optsList = append(optsList, "userxattr")
 	}
 
-	if options.Volatile && !hasVolatileOption(optsList) {
+	if options.Volatile && !slices.Contains(optsList, "volatile") {
 		supported, err := d.getSupportsVolatile()
 		if err != nil {
 			return "", err
@@ -1772,7 +1764,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 
 				// overlay takes a reference on the mount, so it is safe to unmount
 				// the mapped idmounts as soon as the final overlay file system is mounted.
-				defer unix.Unmount(root, unix.MNT_DETACH)
+				defer func() {
+					if err := unix.Unmount(root, unix.MNT_DETACH); err != nil {
+						logrus.Warnf("Unmount %q: %v", root, err)
+					}
+				}()
 			}
 
 			// relative path to the layer through the id mapped mount
@@ -1788,8 +1784,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 
 	lowerDirs := strings.Join(absLowers, ":")
 	if len(composeFsLayers) > 0 {
-		composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::")
-		lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs
+		sep := "::"
+		supportsDataOnly, err := d.getSupportsDataOnly()
+		if err != nil {
+			return "", err
+		}
+		if !supportsDataOnly {
+			sep = ":"
+		}
+		composeFsLayersLowerDirs := strings.Join(composeFsLayers, sep)
+		lowerDirs = lowerDirs + sep + composeFsLayersLowerDirs
 	}
 	// absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent
 	// its usage.
@@ -1854,7 +1858,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 		mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
 			return mountOverlayFrom(d.home, source, target, mType, flags, label)
 		}
-		mountTarget = path.Join(id, "merged")
+		if !inAdditionalStore {
+			mountTarget = path.Join(id, "merged")
+		}
 	}
 
 	// overlay has a check in place to prevent mounting the same file system twice
@@ -1873,13 +1879,46 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
 	return mergedDir, nil
 }
 
+// getStorePrivateDirectory returns a directory path for storing data that requires exclusive access.
+// If 'inAdditionalStore' is true, the path will be under the rundir, otherwise it will be placed in
+// the primary store.
+func (d *Driver) getStorePrivateDirectory(id, layerDir, subdir string, inAdditionalStore bool) string {
+	if inAdditionalStore {
+		return path.Join(d.runhome, id, subdir)
+	}
+	return path.Join(layerDir, subdir)
+}
+
+// getMergedDir returns the directory path that should be used as the mount point for the overlayfs.
+func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string {
+	// Ordinarily, .Get() (layer mounting) callers are supposed to guarantee exclusion.
+	//
+	// But additional stores are initialized with RO locks and don’t support a write
+	// lock operation at all; and naiveDiff operations cause mounts/unmounts, so they might
+	// happen on code paths where we might only holding a RO lock for the additional store.
+	// To prevent races with other processes mounting or unmounting the layer,
+	// use a private directory under the main store rundir, not the "merged" directory inside the
+	// original layer store holding the layer data.
+	//
+	// To support this, contrary to the _general_ locking rules for .Diff / .Changes (which allow a RO lock),
+	// the top-level Store implementation uses an exclusive lock for the primary layer store;
+	// and since the rundir cannot be shared for different stores, it is safe to assume the
+	// current process has exclusive access to it.
+	//
+	// TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store.
+	// (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize,
+	// but DiffSize probably needs to remain for computing sizes of container’s RW layers.)
+	return d.getStorePrivateDirectory(id, dir, "merged", inAdditionalStore)
+}
+
 // Put unmounts the mount path created for the give id.
 func (d *Driver) Put(id string) error {
 	dir, _, inAdditionalStore := d.dir2(id, false)
 	if err := fileutils.Exists(dir); err != nil {
 		return err
 	}
-	mountpoint := path.Join(dir, "merged")
+	mountpoint := d.getMergedDir(id, dir, inAdditionalStore)
+
 	if count := d.ctr.Decrement(mountpoint); count > 0 {
 		return nil
 	}
@@ -1917,7 +1956,7 @@ func (d *Driver) Put(id string) error {
 		// If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all
 		// pending changes are propagated to the file system
 		if !unmounted {
-			fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0)
+			fd, err := unix.Open(mountpoint, unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
 			if err == nil {
 				if err := unix.Syncfs(fd); err != nil {
 					logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err)
@@ -1936,7 +1975,15 @@ func (d *Driver) Put(id string) error {
 		}
 	}
 
-	if !inAdditionalStore {
+	if inAdditionalStore {
+		// check the base name for extra safety
+		if strings.HasPrefix(mountpoint, d.runhome) && filepath.Base(mountpoint) == "merged" {
+			err := os.RemoveAll(filepath.Dir(mountpoint))
+			if err != nil {
+				logrus.Warningf("Failed to remove mountpoint %s overlay: %s: %v", id, mountpoint, err)
+			}
+		}
+	} else {
 		uid, gid := int(0), int(0)
 		fi, err := os.Stat(mountpoint)
 		if err != nil {
@@ -1953,7 +2000,7 @@ func (d *Driver) Put(id string) error {
 		// rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
 		// its atomic semantic.  In this way the "merged" directory is never removed.
 		if err := unix.Rename(tmpMountpoint, mountpoint); err != nil {
-			logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err)
+			logrus.Debugf("Failed to replace mountpoint %s overlay: %s: %v", id, mountpoint, err)
 			return fmt.Errorf("replacing mount point %q: %w", mountpoint, err)
 		}
 	}
@@ -2024,11 +2071,27 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
 }
 
 type overlayFileGetter struct {
-	diffDirs []string
+	diffDirs        []string
+	composefsMounts map[string]*os.File // map from diff dir to the directory with the composefs blob mounted
 }
 
 func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
+	buf := make([]byte, unix.PathMax)
 	for _, d := range g.diffDirs {
+		if f, found := g.composefsMounts[d]; found {
+			// there is no *at equivalent for getxattr, but it can be emulated by opening the file under /proc/self/fd/$FD/$PATH
+			len, err := unix.Getxattr(fmt.Sprintf("/proc/self/fd/%d/%s", int(f.Fd()), path), "trusted.overlay.redirect", buf)
+			if err != nil {
+				if errors.Is(err, unix.ENODATA) {
+					continue
+				}
+				return nil, &fs.PathError{Op: "getxattr", Path: path, Err: err}
+			}
+
+			// the xattr value is the path to the file in the composefs layer diff directory
+			return os.Open(filepath.Join(d, string(buf[:len])))
+		}
+
 		f, err := os.Open(filepath.Join(d, path))
 		if err == nil {
 			return f, nil
@@ -2041,21 +2104,32 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
 }
 
 func (g *overlayFileGetter) Close() error {
-	return nil
+	var errs *multierror.Error
+	for _, f := range g.composefsMounts {
+		if err := f.Close(); err != nil {
+			errs = multierror.Append(errs, err)
+		}
+		if err := unix.Rmdir(f.Name()); err != nil {
+			errs = multierror.Append(errs, err)
+		}
+	}
+	return errs.ErrorOrNil()
 }
 
-func (d *Driver) getStagingDir(id string) string {
-	_, homedir, _ := d.dir2(id, d.imageStore != "")
-	return filepath.Join(homedir, stagingDir)
+// newStagingDir creates a new staging directory and returns the path to it.
+func (d *Driver) newStagingDir() (string, error) {
+	stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
+	err := os.MkdirAll(stagingDirBase, 0o700)
+	if err != nil && !os.IsExist(err) {
+		return "", err
+	}
+	return os.MkdirTemp(stagingDirBase, "")
 }
 
 // DiffGetter returns a FileGetCloser that can read files from the directory that
 // contains files for the layer differences, either for this layer, or one of our
 // lowers if we're just a template directory. Used for direct access for tar-split.
-func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
-	if d.usingComposefs {
-		return nil, nil
-	}
+func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) {
 	p, err := d.getDiffPath(id)
 	if err != nil {
 		return nil, err
@@ -2064,7 +2138,35 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
 	if err != nil {
 		return nil, err
 	}
-	return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil
+
+	// map from diff dir to the directory with the composefs blob mounted
+	composefsMounts := make(map[string]*os.File)
+	defer func() {
+		if Err != nil {
+			for _, f := range composefsMounts {
+				f.Close()
+				if err := unix.Rmdir(f.Name()); err != nil && !os.IsNotExist(err) {
+					logrus.Warnf("Failed to remove %s: %v", f.Name(), err)
+				}
+			}
+		}
+	}()
+	diffDirs := append([]string{p}, paths...)
+	for _, diffDir := range diffDirs {
+		// diffDir has the form $GRAPH_ROOT/overlay/$ID/diff, so grab the $ID from the parent directory
+		id := path.Base(path.Dir(diffDir))
+		composefsData := d.getComposefsData(id)
+		if fileutils.Exists(composefsData) != nil {
+			// not a composefs layer, ignore it
+			continue
+		}
+		fd, err := openComposefsMount(composefsData)
+		if err != nil {
+			return nil, err
+		}
+		composefsMounts[diffDir] = os.NewFile(uintptr(fd), composefsData)
+	}
+	return &overlayFileGetter{diffDirs: diffDirs, composefsMounts: composefsMounts}, nil
 }
 
 // CleanupStagingDirectory cleanups the staging directory.
@@ -2081,14 +2183,14 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
 
 func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
 	feature := "dataonly-layers"
-	overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
+	overlayCacheResult, _, err := cachedFeatureCheck(runhome, feature)
 	if err == nil {
 		if overlayCacheResult {
 			logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
 			return true, nil
 		}
 		logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
-		return false, errors.New(overlayCacheText)
+		return false, nil
 	}
 	supportsDataOnly, err := supportsDataOnlyLayers(home)
 	if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil {
@@ -2098,55 +2200,47 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
 }
 
 // ApplyDiffWithDiffer applies the changes in the new layer using the specified function
-func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
+func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
 	var idMappings *idtools.IDMappings
+	var forceMask *os.FileMode
+
 	if options != nil {
 		idMappings = options.Mappings
+		forceMask = options.ForceMask
+	}
+	if d.options.forceMask != nil {
+		forceMask = d.options.forceMask
 	}
+
 	if idMappings == nil {
 		idMappings = &idtools.IDMappings{}
 	}
 
-	var applyDir string
-
-	if id == "" {
-		stagingDir := d.getStagingDir(id)
-		err := os.MkdirAll(stagingDir, 0o700)
-		if err != nil && !os.IsExist(err) {
-			return graphdriver.DriverWithDifferOutput{}, err
-		}
-		layerDir, err := os.MkdirTemp(stagingDir, "")
-		if err != nil {
-			return graphdriver.DriverWithDifferOutput{}, err
-		}
-		perms := defaultPerms
-		if d.options.forceMask != nil {
-			perms = *d.options.forceMask
-		}
-		applyDir = filepath.Join(layerDir, "dir")
-		if err := os.Mkdir(applyDir, perms); err != nil {
-			return graphdriver.DriverWithDifferOutput{}, err
-		}
+	layerDir, err := d.newStagingDir()
+	if err != nil {
+		return graphdriver.DriverWithDifferOutput{}, err
+	}
+	perms := defaultPerms
+	if forceMask != nil {
+		perms = *forceMask
+	}
+	applyDir := filepath.Join(layerDir, "dir")
+	if err := os.Mkdir(applyDir, perms); err != nil {
+		return graphdriver.DriverWithDifferOutput{}, err
+	}
 
-		lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
-		if err != nil {
-			return graphdriver.DriverWithDifferOutput{}, err
-		}
-		defer func() {
-			if errRet != nil {
-				delete(d.stagingDirsLocks, layerDir)
-				lock.Unlock()
-			}
-		}()
-		d.stagingDirsLocks[layerDir] = lock
-		lock.Lock()
-	} else {
-		var err error
-		applyDir, err = d.getDiffPath(id)
-		if err != nil {
-			return graphdriver.DriverWithDifferOutput{}, err
-		}
+	lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
+	if err != nil {
+		return graphdriver.DriverWithDifferOutput{}, err
 	}
+	defer func() {
+		if errRet != nil {
+			delete(d.stagingDirsLocks, layerDir)
+			lock.Unlock()
+		}
+	}()
+	d.stagingDirsLocks[layerDir] = lock
+	lock.Lock()
 
 	logrus.Debugf("Applying differ in %s", applyDir)
 
@@ -2155,7 +2249,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
 	}
 	if d.usingComposefs {
 		differOptions.Format = graphdriver.DifferOutputFormatFlat
-		differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled
+		differOptions.UseFsVerity = graphdriver.DifferFsVerityIfAvailable
 	}
 	out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
 		UIDMaps:           idMappings.UIDs(),
@@ -2163,6 +2257,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
 		IgnoreChownErrors: d.options.ignoreChownErrors,
 		WhiteoutFormat:    d.getWhiteoutFormat(),
 		InUserNS:          unshare.IsRootless(),
+		ForceMask:         forceMask,
 	}, &differOptions)
 
 	out.Target = applyDir
@@ -2182,10 +2277,6 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr
 		}
 	}()
 
-	if filepath.Dir(parentStagingDir) != d.getStagingDir(id) {
-		return fmt.Errorf("%q is not a staging directory", stagingDirectory)
-	}
-
 	diffPath, err := d.getDiffPath(id)
 	if err != nil {
 		return err
@@ -2270,7 +2361,7 @@ func (d *Driver) getComposefsData(id string) string {
 
 func (d *Driver) getDiffPath(id string) (string, error) {
 	dir := d.dir(id)
-	return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
+	return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), false)
 }
 
 func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@@ -2279,7 +2370,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
 		return nil, err
 	}
 	for i, l := range layers {
-		layers[i], err = redirectDiffIfAdditionalLayer(l)
+		layers[i], err = redirectDiffIfAdditionalLayer(l, false)
 		if err != nil {
 			return nil, err
 		}
@@ -2342,14 +2433,18 @@ func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent strin
 	// layers.
 	diffPath, err := d.getDiffPath(id)
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("failed to get diff path: %w", err)
 	}
 	layers, err := d.getLowerDiffPaths(id)
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("failed to get lower diff path: %w", err)
 	}
 
-	return archive.OverlayChanges(layers, diffPath)
+	c, err := archive.OverlayChanges(layers, diffPath)
+	if err != nil {
+		return nil, fmt.Errorf("computing changes: %w", err)
+	}
+	return c, nil
 }
 
 // AdditionalImageStores returns additional image stores supported by the driver
@@ -2476,6 +2571,19 @@ func nameWithSuffix(name string, number int) string {
 	return fmt.Sprintf("%s%d", name, number)
 }
 
+func validateOneAdditionalLayerPath(target string) error {
+	for _, p := range []string{
+		filepath.Join(target, "diff"),
+		filepath.Join(target, "info"),
+		filepath.Join(target, "blob"),
+	} {
+		if err := fileutils.Exists(p); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
 func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) {
 	refElem := base64.StdEncoding.EncodeToString([]byte(ref))
 	for _, ls := range d.options.layerStores {
@@ -2484,18 +2592,11 @@ func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (st
 			ref = refElem
 		}
 		target := path.Join(ls.path, ref, tocDigest.String())
-		// Check if all necessary files exist
-		for _, p := range []string{
-			filepath.Join(target, "diff"),
-			filepath.Join(target, "info"),
-			filepath.Join(target, "blob"),
-		} {
-			if err := fileutils.Exists(p); err != nil {
-				wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err)
-				return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown)
-			}
+		err := validateOneAdditionalLayerPath(target)
+		if err == nil {
+			return target, nil
 		}
-		return target, nil
+		logrus.Debugf("additional Layer Store %v failed to stat additional layer: %v", ls, err)
 	}
 
 	return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown)
@@ -2612,12 +2713,17 @@ func notifyReleaseAdditionalLayer(al string) {
 // redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and
 // returns the redirected path. If the passed diff is not the one in Additional Layer
 // Store, it returns the original path without changes.
-func redirectDiffIfAdditionalLayer(diffPath string) (string, error) {
+func redirectDiffIfAdditionalLayer(diffPath string, checkExistence bool) (string, error) {
 	if ld, err := os.Readlink(diffPath); err == nil {
 		// diff is the link to Additional Layer Store
 		if !path.IsAbs(ld) {
 			return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld)
 		}
+		if checkExistence {
+			if err := fileutils.Exists(ld); err != nil {
+				return "", fmt.Errorf("failed to access to the linked additional layer: %w", err)
+			}
+		}
 		diffPath = ld
 	} else if err.(*os.PathError).Err != syscall.EINVAL {
 		return "", err
@@ -2636,3 +2742,22 @@ func getMappedMountRoot(path string) string {
 	}
 	return dirName
 }
+
+// Dedup performs deduplication of the driver's storage.
+func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
+	var dirs []string
+	for _, layer := range req.Layers {
+		dir, _, inAdditionalStore := d.dir2(layer, false)
+		if inAdditionalStore {
+			continue
+		}
+		if err := fileutils.Exists(dir); err == nil {
+			dirs = append(dirs, filepath.Join(dir, "diff"))
+		}
+	}
+	r, err := dedup.DedupDirs(dirs, req.Options)
+	if err != nil {
+		return graphdriver.DedupResult{}, err
+	}
+	return graphdriver.DedupResult{Deduped: r.Deduped}, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go
similarity index 92%
rename from vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go
rename to vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go
index 88bfbf9c7..39ca489f5 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota.go
@@ -1,5 +1,4 @@
-//go:build linux && cgo
-// +build linux,cgo
+//go:build linux && cgo && !exclude_disk_quota
 
 package overlay
 
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go
new file mode 100644
index 000000000..221006b28
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_disk_quota_unsupported.go
@@ -0,0 +1,16 @@
+//go:build linux && (!cgo || exclude_disk_quota)
+
+package overlay
+
+import (
+	"path"
+
+	"github.com/containers/storage/pkg/directory"
+)
+
+// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
+// For Overlay, it attempts to check the XFS quota for size, and falls back to
+// finding the size of the "diff" directory.
+func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
+	return directory.Usage(path.Join(d.dir(id), "diff"))
+}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
deleted file mode 100644
index d4f540c9c..000000000
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build linux && !cgo
-// +build linux,!cgo
-
-package overlay
-
-import (
-	"fmt"
-	"path"
-
-	"github.com/containers/storage/pkg/directory"
-)
-
-// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
-// For Overlay, it attempts to check the XFS quota for size, and falls back to
-// finding the size of the "diff" directory.
-func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
-	return directory.Usage(path.Join(d.dir(id), "diff"))
-}
-
-func getComposeFsHelper() (string, error) {
-	return "", fmt.Errorf("composefs not supported on this build")
-}
-
-func mountComposefsBlob(dataDir, mountPoint string) error {
-	return fmt.Errorf("composefs not supported on this build")
-}
-
-func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
-	return fmt.Errorf("composefs not supported on this build")
-}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
index 33b163a8c..b35633143 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package overlay
 
diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
index 651990089..85045f66e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlay
 
diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
index 2670ef3df..b5baa11f4 100644
--- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
+++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package overlayutils
 
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
index b0623bdac..59ba5b0b2 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
@@ -1,5 +1,4 @@
 //go:build linux && !exclude_disk_quota && cgo
-// +build linux,!exclude_disk_quota,cgo
 
 //
 // projectquota.go - implements XFS project quota controls
@@ -173,6 +172,11 @@ func NewControl(basePath string) (*Control, error) {
 		return nil, err
 	}
 
+	// Clear inherit flag from top-level directory if necessary.
+	if err := stripProjectInherit(basePath); err != nil {
+		return nil, err
+	}
+
 	//
 	// get first project id to be used for next container
 	//
@@ -350,6 +354,8 @@ func setProjectID(targetPath string, projectID uint32) error {
 	}
 	defer closeDir(dir)
 
+	logrus.Debugf("Setting quota project ID %d on %s", projectID, targetPath)
+
 	var fsx C.struct_fsxattr
 	_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
 		uintptr(unsafe.Pointer(&fsx)))
@@ -367,6 +373,36 @@ func setProjectID(targetPath string, projectID uint32) error {
 	return nil
 }
 
+// stripProjectInherit strips the project inherit flag from a directory.
+// Used on the top-level directory to ensure project IDs are only inherited for
+// files in directories we set quotas on - not the directories we want to set
+// the quotas on, as that would make everything use the same project ID.
+func stripProjectInherit(targetPath string) error {
+	dir, err := openDir(targetPath)
+	if err != nil {
+		return err
+	}
+	defer closeDir(dir)
+
+	var fsx C.struct_fsxattr
+	_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
+		uintptr(unsafe.Pointer(&fsx)))
+	if errno != 0 {
+		return fmt.Errorf("failed to get xfs attrs for %s: %w", targetPath, errno)
+	}
+	if fsx.fsx_xflags&C.FS_XFLAG_PROJINHERIT != 0 {
+		// Flag is set, need to clear it.
+		logrus.Debugf("Clearing PROJINHERIT flag from directory %s", targetPath)
+		fsx.fsx_xflags = fsx.fsx_xflags &^ C.FS_XFLAG_PROJINHERIT
+		_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
+			uintptr(unsafe.Pointer(&fsx)))
+		if errno != 0 {
+			return fmt.Errorf("failed to clear PROJINHERIT for %s: %w", targetPath, errno)
+		}
+	}
+	return nil
+}
+
 // findNextProjectID - find the next project id to be used for containers
 // by scanning driver home directory to find used project ids
 func (q *Control) findNextProjectID() error {
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index 648fd3379..fdc2ad161 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux || exclude_disk_quota || !cgo
-// +build !linux exclude_disk_quota !cgo
 
 package quota
 
diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go
index bbb9cb657..d95bd398b 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_aufs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go
@@ -1,5 +1,4 @@
 //go:build !exclude_graphdriver_aufs && linux
-// +build !exclude_graphdriver_aufs,linux
 
 package register
 
diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
index 425ebd798..01a322374 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go
@@ -1,5 +1,4 @@
 //go:build !exclude_graphdriver_btrfs && linux
-// +build !exclude_graphdriver_btrfs,linux
 
 package register
 
diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go
index 95b77b73e..c4f962822 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go
@@ -1,5 +1,4 @@
-//go:build !exclude_graphdriver_overlay && linux && cgo
-// +build !exclude_graphdriver_overlay,linux,cgo
+//go:build !exclude_graphdriver_overlay && linux
 
 package register
 
diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
index 8e5788a43..136848f4a 100644
--- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go
+++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go
@@ -1,5 +1,4 @@
 //go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris
-// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
 
 package register
 
diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
index d94756bdd..17e9d5870 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package vfs // import "github.com/containers/storage/drivers/vfs"
 
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index db9032117..98dc55b0e 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -10,6 +10,7 @@ import (
 	"strings"
 
 	graphdriver "github.com/containers/storage/drivers"
+	"github.com/containers/storage/internal/dedup"
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/directory"
 	"github.com/containers/storage/pkg/fileutils"
@@ -33,12 +34,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
 	d := &Driver{
 		name:       "vfs",
 		home:       home,
-		idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
 		imageStore: options.ImageStore,
 	}
 
-	rootIDs := d.idMappings.RootPair()
-	if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil {
+	if err := os.MkdirAll(filepath.Join(home, "dir"), 0o700); err != nil {
 		return nil, err
 	}
 	for _, option := range options.DriverOptions {
@@ -79,7 +78,6 @@ type Driver struct {
 	name              string
 	home              string
 	additionalHomes   []string
-	idMappings        *idtools.IDMappings
 	ignoreChownErrors bool
 	naiveDiff         graphdriver.DiffDriver
 	updater           graphdriver.LayerIDMapUpdater
@@ -152,14 +150,21 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
 		return fmt.Errorf("--storage-opt is not supported for vfs")
 	}
 
-	idMappings := d.idMappings
+	var uidMaps []idtools.IDMap
+	var gidMaps []idtools.IDMap
+
 	if opts != nil && opts.IDMappings != nil {
-		idMappings = opts.IDMappings
+		uidMaps = opts.IDMappings.UIDs()
+		gidMaps = opts.IDMappings.GIDs()
+	}
+
+	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+	if err != nil {
+		return err
 	}
 
 	dir := d.dir2(id, ro)
-	rootIDs := idMappings.RootPair()
-	if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
+	if err := os.MkdirAll(filepath.Dir(dir), 0o700); err != nil {
 		return err
 	}
 
@@ -174,21 +179,24 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
 		rootPerms = os.FileMode(0o700)
 	}
 
+	idPair := idtools.IDPair{UID: rootUID, GID: rootGID}
 	if parent != "" {
 		st, err := system.Stat(d.dir(parent))
 		if err != nil {
 			return err
 		}
 		rootPerms = os.FileMode(st.Mode())
-		rootIDs.UID = int(st.UID())
-		rootIDs.GID = int(st.GID())
+		idPair.UID = int(st.UID())
+		idPair.GID = int(st.GID())
 	}
-	if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil {
+	if err := idtools.MkdirAllAndChownNew(dir, rootPerms, idPair); err != nil {
 		return err
 	}
 	labelOpts := []string{"level:s0"}
 	if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
-		label.SetFileLabel(dir, mountLabel)
+		if err := label.SetFileLabel(dir, mountLabel); err != nil {
+			logrus.Debugf("Set %s label to %q file ended with error: %v", mountLabel, dir, err)
+		}
 	}
 	if parent != "" {
 		parentDir, err := d.Get(parent, graphdriver.MountOpts{})
@@ -341,3 +349,19 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
 func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
 	return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
 }
+
+// Dedup performs deduplication of the driver's storage.
+func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
+	var dirs []string
+	for _, layer := range req.Layers {
+		dir := d.dir2(layer, false)
+		if err := fileutils.Exists(dir); err == nil {
+			dirs = append(dirs, dir)
+		}
+	}
+	r, err := dedup.DedupDirs(dirs, req.Options)
+	if err != nil {
+		return graphdriver.DedupResult{}, err
+	}
+	return graphdriver.DedupResult{Deduped: r.Deduped}, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 18f90fdc5..59ed9a756 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -764,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64,
 	buf := bufio.NewWriter(nil)
 	for err == nil {
 		base := path.Base(hdr.Name)
-		if strings.HasPrefix(base, archive.WhiteoutPrefix) {
-			name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
+		if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
+			name := path.Join(path.Dir(hdr.Name), rm)
 			err = w.Remove(filepath.FromSlash(name))
 			if err != nil {
 				return 0, err
@@ -975,6 +975,11 @@ func (d *Driver) AdditionalImageStores() []string {
 	return nil
 }
 
+// Dedup performs deduplication of the driver's storage.
+func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
+	return graphdriver.DedupResult{}, nil
+}
+
 // UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
 // matching those in toContainer to matching those in toHost.
 func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index e02289784..f53b0e1b6 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -1,5 +1,4 @@
 //go:build linux || freebsd
-// +build linux freebsd
 
 package zfs
 
@@ -106,11 +105,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
 		return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
 	}
 
-	rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps)
-	if err != nil {
-		return nil, fmt.Errorf("failed to get root uid/gid: %w", err)
-	}
-	if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil {
+	if err := os.MkdirAll(base, 0o700); err != nil {
 		return nil, fmt.Errorf("failed to create '%s': %w", base, err)
 	}
 
@@ -118,8 +113,6 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
 		dataset:          rootDataset,
 		options:          options,
 		filesystemsCache: filesystemsCache,
-		uidMaps:          opt.UIDMaps,
-		gidMaps:          opt.GIDMaps,
 		ctr:              graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
 	}
 	return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
@@ -177,8 +170,6 @@ type Driver struct {
 	options          zfsOptions
 	sync.Mutex       // protects filesystem cache against concurrent access
 	filesystemsCache map[string]bool
-	uidMaps          []idtools.IDMap
-	gidMaps          []idtools.IDMap
 	ctr              *graphdriver.RefCounter
 }
 
@@ -248,7 +239,9 @@ func (d *Driver) cloneFilesystem(name, parentName string) error {
 	}
 
 	if err != nil {
-		snapshot.Destroy(zfs.DestroyDeferDeletion)
+		if err1 := snapshot.Destroy(zfs.DestroyDeferDeletion); err1 != nil {
+			logrus.Warnf("Destroy zfs.DestroyDeferDeletion: %v", err1)
+		}
 		return err
 	}
 	return snapshot.Destroy(zfs.DestroyDeferDeletion)
@@ -399,12 +392,18 @@ func (d *Driver) Remove(id string) error {
 	name := d.zfsPath(id)
 	dataset := zfs.Dataset{Name: name}
 	err := dataset.Destroy(zfs.DestroyRecursive)
-	if err == nil {
-		d.Lock()
-		delete(d.filesystemsCache, name)
-		d.Unlock()
+	if err != nil {
+		// We must be tolerant in case the image has already been removed,
+		// for example, accidentally by hand.
+		if _, err1 := zfs.GetDataset(name); err1 == nil {
+			return err
+		}
+		logrus.WithField("storage-driver", "zfs").Debugf("Layer %s has already been removed; ignore it and continue to delete the cache", id)
 	}
-	return err
+	d.Lock()
+	delete(d.filesystemsCache, name)
+	d.Unlock()
+	return nil
 }
 
 // Get returns the mountpoint for the given id after creating the target directories if necessary.
@@ -448,12 +447,8 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
 	opts := label.FormatMountLabel(mountOptions, options.MountLabel)
 	logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
 
-	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
-	if err != nil {
-		return "", err
-	}
 	// Create the target directories if they don't exist
-	if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil {
+	if err := os.MkdirAll(mountpoint, 0o755); err != nil {
 		return "", err
 	}
 
@@ -516,3 +511,8 @@ func (d *Driver) ListLayers() ([]string, error) {
 func (d *Driver) AdditionalImageStores() []string {
 	return nil
 }
+
+// Dedup performs deduplication of the driver's storage.
+func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
+	return graphdriver.DedupResult{}, nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
index 738b0ae1b..15a1447ac 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
@@ -1,4 +1,3 @@
 //go:build !linux && !freebsd
-// +build !linux,!freebsd
 
 package zfs
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index d71eab08b..5c9127ede 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"slices"
 	"strings"
 	"sync"
 	"time"
@@ -181,18 +182,18 @@ func copyImage(i *Image) *Image {
 	return &Image{
 		ID:              i.ID,
 		Digest:          i.Digest,
-		Digests:         copyDigestSlice(i.Digests),
-		Names:           copyStringSlice(i.Names),
-		NamesHistory:    copyStringSlice(i.NamesHistory),
+		Digests:         copySlicePreferringNil(i.Digests),
+		Names:           copySlicePreferringNil(i.Names),
+		NamesHistory:    copySlicePreferringNil(i.NamesHistory),
 		TopLayer:        i.TopLayer,
-		MappedTopLayers: copyStringSlice(i.MappedTopLayers),
+		MappedTopLayers: copySlicePreferringNil(i.MappedTopLayers),
 		Metadata:        i.Metadata,
-		BigDataNames:    copyStringSlice(i.BigDataNames),
-		BigDataSizes:    copyStringInt64Map(i.BigDataSizes),
-		BigDataDigests:  copyStringDigestMap(i.BigDataDigests),
+		BigDataNames:    copySlicePreferringNil(i.BigDataNames),
+		BigDataSizes:    copyMapPreferringNil(i.BigDataSizes),
+		BigDataDigests:  copyMapPreferringNil(i.BigDataDigests),
 		Created:         i.Created,
 		ReadOnly:        i.ReadOnly,
-		Flags:           copyStringInterfaceMap(i.Flags),
+		Flags:           copyMapPreferringNil(i.Flags),
 	}
 }
 
@@ -716,14 +717,14 @@ func (r *imageStore) create(id string, names []string, layer string, options Ima
 		Digest:         options.Digest,
 		Digests:        dedupeDigests(options.Digests),
 		Names:          names,
-		NamesHistory:   copyStringSlice(options.NamesHistory),
+		NamesHistory:   copySlicePreferringNil(options.NamesHistory),
 		TopLayer:       layer,
 		Metadata:       options.Metadata,
 		BigDataNames:   []string{},
 		BigDataSizes:   make(map[string]int64),
 		BigDataDigests: make(map[string]digest.Digest),
 		Created:        options.CreationDate,
-		Flags:          copyStringInterfaceMap(options.Flags),
+		Flags:          newMapFrom(options.Flags),
 	}
 	if image.Created.IsZero() {
 		image.Created = time.Now().UTC()
@@ -863,12 +864,6 @@ func (r *imageStore) Delete(id string) error {
 		return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
 	}
 	id = image.ID
-	toDeleteIndex := -1
-	for i, candidate := range r.images {
-		if candidate.ID == id {
-			toDeleteIndex = i
-		}
-	}
 	delete(r.byid, id)
 	// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
 	// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
@@ -877,21 +872,18 @@ func (r *imageStore) Delete(id string) error {
 		delete(r.byname, name)
 	}
 	for _, digest := range image.Digests {
-		prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
+		prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
+			return i == image
+		})
 		if len(prunedList) == 0 {
 			delete(r.bydigest, digest)
 		} else {
 			r.bydigest[digest] = prunedList
 		}
 	}
-	if toDeleteIndex != -1 {
-		// delete the image at toDeleteIndex
-		if toDeleteIndex == len(r.images)-1 {
-			r.images = r.images[:len(r.images)-1]
-		} else {
-			r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
-		}
-	}
+	r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool {
+		return candidate.ID == id
+	})
 	if err := r.Save(); err != nil {
 		return err
 	}
@@ -974,18 +966,7 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
 	if !ok {
 		return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
 	}
-	return copyStringSlice(image.BigDataNames), nil
-}
-
-func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
-	modified := make([]*Image, 0, len(slice))
-	for _, v := range slice {
-		if v == value {
-			continue
-		}
-		modified = append(modified, v)
-	}
-	return modified
+	return copySlicePreferringNil(image.BigDataNames), nil
 }
 
 // Requires startWriting.
@@ -1037,21 +1018,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
 		if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
 			save = true
 		}
-		addName := true
-		for _, name := range image.BigDataNames {
-			if name == key {
-				addName = false
-				break
-			}
-		}
-		if addName {
+		if !slices.Contains(image.BigDataNames, key) {
 			image.BigDataNames = append(image.BigDataNames, key)
 			save = true
 		}
 		for _, oldDigest := range image.Digests {
 			// remove the image from the list of images in the digest-based index
 			if list, ok := r.bydigest[oldDigest]; ok {
-				prunedList := imageSliceWithoutValue(list, image)
+				prunedList := slices.DeleteFunc(list, func(i *Image) bool {
+					return i == image
+				})
 				if len(prunedList) == 0 {
 					delete(r.bydigest, oldDigest)
 				} else {
@@ -1066,9 +1042,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
 			// add the image to the list of images in the digest-based index which
 			// corresponds to the new digest for this item, unless it's already there
 			list := r.bydigest[newDigest]
-			if len(list) == len(imageSliceWithoutValue(list, image)) {
-				// the list isn't shortened by trying to prune this image from it,
-				// so it's not in there yet
+			if !slices.Contains(list, image) {
 				r.bydigest[newDigest] = append(list, image)
 			}
 		}
diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup.go b/vendor/github.com/containers/storage/internal/dedup/dedup.go
new file mode 100644
index 000000000..59fcd0d23
--- /dev/null
+++ b/vendor/github.com/containers/storage/internal/dedup/dedup.go
@@ -0,0 +1,163 @@
+package dedup
+
+import (
+	"crypto/sha256"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/crc64"
+	"io/fs"
+	"sync"
+
+	"github.com/opencontainers/selinux/pkg/pwalkdir"
+	"github.com/sirupsen/logrus"
+)
+
+var notSupported = errors.New("reflinks are not supported on this platform")
+
+const (
+	DedupHashInvalid DedupHashMethod = iota
+	DedupHashCRC
+	DedupHashFileSize
+	DedupHashSHA256
+)
+
+type DedupHashMethod int
+
+type DedupOptions struct {
+	// HashMethod is the hash function to use to find identical files
+	HashMethod DedupHashMethod
+}
+
+type DedupResult struct {
+	// Deduped represents the total number of bytes saved by deduplication.
+	// This value accounts also for all previously deduplicated data, not only the savings
+	// from the last run.
+	Deduped uint64
+}
+
+func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) (string, error) {
+	switch hashMethod {
+	case DedupHashInvalid:
+		return "", fmt.Errorf("invalid hash method: %v", hashMethod)
+	case DedupHashFileSize:
+		return fmt.Sprintf("%v", info.Size()), nil
+	case DedupHashSHA256:
+		return readAllFile(path, info, func(buf []byte) (string, error) {
+			h := sha256.New()
+			if _, err := h.Write(buf); err != nil {
+				return "", err
+			}
+			return string(h.Sum(nil)), nil
+		})
+	case DedupHashCRC:
+		return readAllFile(path, info, func(buf []byte) (string, error) {
+			c := crc64.New(crc64.MakeTable(crc64.ECMA))
+			if _, err := c.Write(buf); err != nil {
+				return "", err
+			}
+			bufRet := make([]byte, 8)
+			binary.BigEndian.PutUint64(bufRet, c.Sum64())
+			return string(bufRet), nil
+		})
+	default:
+		return "", fmt.Errorf("unknown hash method: %v", hashMethod)
+	}
+}
+
+type pathsLocked struct {
+	paths []string
+	lock  sync.Mutex
+}
+
+func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
+	res := DedupResult{}
+	hashToPaths := make(map[string]*pathsLocked)
+	lock := sync.Mutex{} // protects `hashToPaths` and `res`
+
+	dedup, err := newDedupFiles()
+	if err != nil {
+		return res, err
+	}
+
+	for _, dir := range dirs {
+		logrus.Debugf("Deduping directory %s", dir)
+		if err := pwalkdir.Walk(dir, func(path string, d fs.DirEntry, err error) error {
+			if err != nil {
+				return err
+			}
+			if !d.Type().IsRegular() {
+				return nil
+			}
+			info, err := d.Info()
+			if err != nil {
+				return err
+			}
+			size := uint64(info.Size())
+			if size == 0 {
+				// do not bother with empty files
+				return nil
+			}
+
+			// the file was already deduplicated
+			if visited, err := dedup.isFirstVisitOf(info); err != nil {
+				return err
+			} else if visited {
+				return nil
+			}
+
+			h, err := getFileChecksum(options.HashMethod, path, info)
+			if err != nil {
+				return err
+			}
+
+			lock.Lock()
+			item, foundItem := hashToPaths[h]
+			if !foundItem {
+				item = &pathsLocked{paths: []string{path}}
+				hashToPaths[h] = item
+				lock.Unlock()
+				return nil
+			}
+			item.lock.Lock()
+			lock.Unlock()
+
+			dedupBytes, err := func() (uint64, error) { // function to have a scope for the defer statement
+				defer item.lock.Unlock()
+
+				var dedupBytes uint64
+				for _, src := range item.paths {
+					deduped, err := dedup.dedup(src, path, info)
+					if err == nil && deduped > 0 {
+						logrus.Debugf("Deduped %q -> %q (%d bytes)", src, path, deduped)
+						dedupBytes += deduped
+						break
+					}
+					logrus.Debugf("Failed to deduplicate: %v", err)
+					if errors.Is(err, notSupported) {
+						return dedupBytes, err
+					}
+				}
+				if dedupBytes == 0 {
+					item.paths = append(item.paths, path)
+				}
+				return dedupBytes, nil
+			}()
+			if err != nil {
+				return err
+			}
+
+			lock.Lock()
+			res.Deduped += dedupBytes
+			lock.Unlock()
+			return nil
+		}); err != nil {
+			// if reflinks are not supported, return immediately without errors
+			if errors.Is(err, notSupported) {
+				return res, nil
+			}
+			return res, err
+		}
+	}
+	return res, nil
+}
diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go b/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
new file mode 100644
index 000000000..90ccb5f31
--- /dev/null
+++ b/vendor/github.com/containers/storage/internal/dedup/dedup_linux.go
@@ -0,0 +1,139 @@
+package dedup
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/fs"
+	"os"
+	"sync"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+type deviceInodePair struct {
+	dev uint64
+	ino uint64
+}
+
+type dedupFiles struct {
+	lock          sync.Mutex
+	visitedInodes map[deviceInodePair]struct{}
+}
+
+func newDedupFiles() (*dedupFiles, error) {
+	return &dedupFiles{
+		visitedInodes: make(map[deviceInodePair]struct{}),
+	}, nil
+}
+
+func (d *dedupFiles) recordInode(dev, ino uint64) (bool, error) {
+	d.lock.Lock()
+	defer d.lock.Unlock()
+
+	di := deviceInodePair{
+		dev: dev,
+		ino: ino,
+	}
+
+	_, visited := d.visitedInodes[di]
+	d.visitedInodes[di] = struct{}{}
+	return visited, nil
+}
+
+// isFirstVisitOf records that the file is being processed.  Returns true if the file was already visited.
+func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
+	st, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return false, fmt.Errorf("unable to get raw syscall.Stat_t data")
+	}
+	return d.recordInode(uint64(st.Dev), st.Ino)
+}
+
+// dedup deduplicates the file at src path to dst path
+func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
+	srcFile, err := os.OpenFile(src, os.O_RDONLY, 0)
+	if err != nil {
+		return 0, fmt.Errorf("failed to open source file: %w", err)
+	}
+	defer srcFile.Close()
+
+	dstFile, err := os.OpenFile(dst, os.O_WRONLY, 0)
+	if err != nil {
+		return 0, fmt.Errorf("failed to open destination file: %w", err)
+	}
+	defer dstFile.Close()
+
+	stSrc, err := srcFile.Stat()
+	if err != nil {
+		return 0, fmt.Errorf("failed to stat source file: %w", err)
+	}
+	sSrc, ok := stSrc.Sys().(*syscall.Stat_t)
+	if !ok {
+		return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
+	}
+	sDest, ok := fiDst.Sys().(*syscall.Stat_t)
+	if !ok {
+		return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
+	}
+	if sSrc.Dev == sDest.Dev && sSrc.Ino == sDest.Ino {
+		// same inode, we are dealing with a hard link, no need to deduplicate
+		return 0, nil
+	}
+
+	value := unix.FileDedupeRange{
+		Src_offset: 0,
+		Src_length: uint64(stSrc.Size()),
+		Info: []unix.FileDedupeRangeInfo{
+			{
+				Dest_fd:     int64(dstFile.Fd()),
+				Dest_offset: 0,
+			},
+		},
+	}
+	err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
+	if err == nil {
+		return uint64(value.Info[0].Bytes_deduped), nil
+	}
+
+	if errors.Is(err, unix.ENOTSUP) {
+		return 0, notSupported
+	}
+	return 0, fmt.Errorf("failed to clone file %q: %w", src, err)
+}
+
+func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
+	size := info.Size()
+	if size == 0 {
+		return fn(nil)
+	}
+
+	file, err := os.Open(path)
+	if err != nil {
+		return "", err
+	}
+	defer file.Close()
+
+	if size < 4096 {
+		// small file, read it all
+		data := make([]byte, size)
+		_, err = io.ReadFull(file, data)
+		if err != nil {
+			return "", err
+		}
+		return fn(data)
+	}
+
+	mmap, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_PRIVATE)
+	if err != nil {
+		return "", fmt.Errorf("failed to mmap file: %w", err)
+	}
+	defer func() {
+		_ = unix.Munmap(mmap)
+	}()
+
+	_ = unix.Madvise(mmap, unix.MADV_SEQUENTIAL)
+
+	return fn(mmap)
+}
diff --git a/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go b/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
new file mode 100644
index 000000000..cfadf8326
--- /dev/null
+++ b/vendor/github.com/containers/storage/internal/dedup/dedup_unsupported.go
@@ -0,0 +1,27 @@
+//go:build !linux
+
+package dedup
+
+import (
+	"io/fs"
+)
+
+type dedupFiles struct{}
+
+func newDedupFiles() (*dedupFiles, error) {
+	return nil, notSupported
+}
+
+// isFirstVisitOf records that the file is being processed.  Returns true if the file was already visited.
+func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
+	return false, notSupported
+}
+
+// dedup deduplicates the file at src path to dst path
+func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
+	return 0, notSupported
+}
+
+func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
+	return "", notSupported
+}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index f1325262b..1f8203fbf 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -5,10 +5,12 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"maps"
 	"os"
 	"path"
 	"path/filepath"
 	"reflect"
+	"slices"
 	"sort"
 	"strings"
 	"sync"
@@ -134,9 +136,12 @@ type Layer struct {
 	TOCDigest digest.Digest `json:"toc-digest,omitempty"`
 
 	// UncompressedSize is the length of the blob that was last passed to
-	// ApplyDiff() or create(), after we decompressed it.  If
-	// UncompressedDigest is not set, this should be treated as if it were
-	// an uninitialized value.
+	// ApplyDiff() or create(), after we decompressed it.
+	//
+	//   - If UncompressedDigest is set, this must be set to a valid value.
+	//   - Otherwise, if TOCDigest is set, this is either valid or -1.
+	//   - If neither of this digests is set, this should be treated as if it were
+	//     an uninitialized value.
 	UncompressedSize int64 `json:"diff-size,omitempty"`
 
 	// CompressionType is the type of compression which we detected on the blob
@@ -312,9 +317,8 @@ type rwLayerStore interface {
 	// applies its changes to a specified layer.
 	ApplyDiff(to string, diff io.Reader) (int64, error)
 
-	// ApplyDiffWithDiffer applies the changes through the differ callback function.
-	// If to is the empty string, then a staging directory is created by the driver.
-	ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+	// applyDiffWithDifferNoLock applies the changes through the differ callback function.
+	applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
 
 	// CleanupStagingDirectory cleanups the staging directory.  It can be used to cleanup the staging directory on errors
 	CleanupStagingDirectory(stagingDirectory string) error
@@ -332,6 +336,9 @@ type rwLayerStore interface {
 
 	// Clean up unreferenced layers
 	GarbageCollect() error
+
+	// Dedup deduplicates layers in the store.
+	dedup(drivers.DedupArgs) (drivers.DedupResult, error)
 }
 
 type multipleLockFile struct {
@@ -435,7 +442,7 @@ func layerLocation(l *Layer) layerLocations {
 func copyLayer(l *Layer) *Layer {
 	return &Layer{
 		ID:                 l.ID,
-		Names:              copyStringSlice(l.Names),
+		Names:              copySlicePreferringNil(l.Names),
 		Parent:             l.Parent,
 		Metadata:           l.Metadata,
 		MountLabel:         l.MountLabel,
@@ -450,12 +457,12 @@ func copyLayer(l *Layer) *Layer {
 		CompressionType:    l.CompressionType,
 		ReadOnly:           l.ReadOnly,
 		volatileStore:      l.volatileStore,
-		BigDataNames:       copyStringSlice(l.BigDataNames),
-		Flags:              copyStringInterfaceMap(l.Flags),
-		UIDMap:             copyIDMap(l.UIDMap),
-		GIDMap:             copyIDMap(l.GIDMap),
-		UIDs:               copyUint32Slice(l.UIDs),
-		GIDs:               copyUint32Slice(l.GIDs),
+		BigDataNames:       copySlicePreferringNil(l.BigDataNames),
+		Flags:              copyMapPreferringNil(l.Flags),
+		UIDMap:             copySlicePreferringNil(l.UIDMap),
+		GIDMap:             copySlicePreferringNil(l.GIDMap),
+		UIDs:               copySlicePreferringNil(l.UIDs),
+		GIDs:               copySlicePreferringNil(l.GIDs),
 	}
 }
 
@@ -909,22 +916,31 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
 		// user of this storage area marked for deletion but didn't manage to
 		// actually delete.
 		var incompleteDeletionErrors error // = nil
+		var layersToDelete []*Layer
 		for _, layer := range r.layers {
 			if layer.Flags == nil {
 				layer.Flags = make(map[string]interface{})
 			}
 			if layerHasIncompleteFlag(layer) {
-				logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
-				err := r.deleteInternal(layer.ID)
-				if err != nil {
-					// Don't return the error immediately, because deleteInternal does not saveLayers();
-					// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
-					// deleted incomplete layers have their metadata correctly removed.
-					incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
-						fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
-				}
-				modifiedLocations |= layerLocation(layer)
+				// Important: Do not call r.deleteInternal() here. It modifies r.layers
+				// which causes unexpected side effects while iterating over r.layers here.
+				// The range loop has no idea that the underlying elements where shifted
+				// around.
+				layersToDelete = append(layersToDelete, layer)
+			}
+		}
+		// Now actually delete the layers
+		for _, layer := range layersToDelete {
+			logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID)
+			err := r.deleteInternal(layer.ID)
+			if err != nil {
+				// Don't return the error immediately, because deleteInternal does not saveLayers();
+				// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
+				// deleted incomplete layers have their metadata correctly removed.
+				incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
+					fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
 			}
+			modifiedLocations |= layerLocation(layer)
 		}
 		if err := r.saveLayers(modifiedLocations); err != nil {
 			return false, err
@@ -1213,8 +1229,8 @@ func (r *layerStore) Size(name string) (int64, error) {
 	// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
 	// a zero value is not just present because it was never set to anything else (which can happen if the layer was
 	// created by a version of this library that didn't keep track of digest and size information).
-	if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
-		return layer.UncompressedSize, nil
+	if layer.UncompressedDigest != "" || layer.TOCDigest != "" {
+		return layer.UncompressedSize, nil // This may return -1 if only TOCDigest is set
 	}
 	return -1, nil
 }
@@ -1372,7 +1388,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
 		templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
 		templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
 		templateCompressionType = templateLayer.CompressionType
-		templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
+		templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs)
 		templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
 		if err != nil && !errors.Is(err, os.ErrNotExist) {
 			return nil, -1, err
@@ -1402,9 +1418,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
 		CompressionType:    templateCompressionType,
 		UIDs:               templateUIDs,
 		GIDs:               templateGIDs,
-		Flags:              copyStringInterfaceMap(moreOptions.Flags),
-		UIDMap:             copyIDMap(moreOptions.UIDMap),
-		GIDMap:             copyIDMap(moreOptions.GIDMap),
+		Flags:              newMapFrom(moreOptions.Flags),
+		UIDMap:             copySlicePreferringNil(moreOptions.UIDMap),
+		GIDMap:             copySlicePreferringNil(moreOptions.GIDMap),
 		BigDataNames:       []string{},
 		volatileStore:      moreOptions.Volatile,
 	}
@@ -1564,19 +1580,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
 	// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
 	// - r.bymount (via loadMounts / saveMounts)
 
-	// check whether options include ro option
-	hasReadOnlyOpt := func(opts []string) bool {
-		for _, item := range opts {
-			if item == "ro" {
-				return true
-			}
-		}
-		return false
-	}
-
 	// You are not allowed to mount layers from readonly stores if they
 	// are not mounted read/only.
-	if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
+	if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") {
 		return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
 	}
 	r.mountsLockfile.Lock()
@@ -1836,14 +1842,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
 		return fmt.Errorf("closing bigdata file for the layer: %w", err)
 	}
 
-	addName := true
-	for _, name := range layer.BigDataNames {
-		if name == key {
-			addName = false
-			break
-		}
-	}
-	if addName {
+	if !slices.Contains(layer.BigDataNames, key) {
 		layer.BigDataNames = append(layer.BigDataNames, key)
 		return r.saveFor(layer)
 	}
@@ -1856,7 +1855,7 @@ func (r *layerStore) BigDataNames(id string) ([]string, error) {
 	if !ok {
 		return nil, fmt.Errorf("locating layer with ID %q to retrieve bigdata names: %w", id, ErrImageUnknown)
 	}
-	return copyStringSlice(layer.BigDataNames), nil
+	return copySlicePreferringNil(layer.BigDataNames), nil
 }
 
 // Requires startReading or startWriting.
@@ -1938,32 +1937,13 @@ func (r *layerStore) deleteInternal(id string) error {
 		delete(r.bymount, layer.MountPoint)
 	}
 	r.deleteInDigestMap(id)
-	toDeleteIndex := -1
-	for i, candidate := range r.layers {
-		if candidate.ID == id {
-			toDeleteIndex = i
-			break
-		}
-	}
-	if toDeleteIndex != -1 {
-		// delete the layer at toDeleteIndex
-		if toDeleteIndex == len(r.layers)-1 {
-			r.layers = r.layers[:len(r.layers)-1]
-		} else {
-			r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
-		}
-	}
-	if mountLabel != "" {
-		var found bool
-		for _, candidate := range r.layers {
-			if candidate.MountLabel == mountLabel {
-				found = true
-				break
-			}
-		}
-		if !found {
-			selinux.ReleaseLabel(mountLabel)
-		}
+	r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool {
+		return candidate.ID == id
+	})
+	if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool {
+		return candidate.MountLabel == mountLabel
+	}) {
+		selinux.ReleaseLabel(mountLabel)
 	}
 	return nil
 }
@@ -1971,21 +1951,15 @@ func (r *layerStore) deleteInternal(id string) error {
 // Requires startWriting.
 func (r *layerStore) deleteInDigestMap(id string) {
 	for digest, layers := range r.bycompressedsum {
-		for i, layerID := range layers {
-			if layerID == id {
-				layers = append(layers[:i], layers[i+1:]...)
-				r.bycompressedsum[digest] = layers
-				break
-			}
+		if i := slices.Index(layers, id); i != -1 {
+			layers = slices.Delete(layers, i, i+1)
+			r.bycompressedsum[digest] = layers
 		}
 	}
 	for digest, layers := range r.byuncompressedsum {
-		for i, layerID := range layers {
-			if layerID == id {
-				layers = append(layers[:i], layers[i+1:]...)
-				r.byuncompressedsum[digest] = layers
-				break
-			}
+		if i := slices.Index(layers, id); i != -1 {
+			layers = slices.Delete(layers, i, i+1)
+			r.byuncompressedsum[digest] = layers
 		}
 	}
 }
@@ -2095,6 +2069,9 @@ func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings {
 }
 
 // Requires startReading or startWriting.
+//
+// NOTE: Overlay’s implementation assumes use of an exclusive lock over the primary layer store,
+// see drivers/overlay.Driver.getMergedDir.
 func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
 	from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to)
 	if err != nil {
@@ -2161,6 +2138,9 @@ func writeCompressedDataGoroutine(pwriter *io.PipeWriter, compressor io.WriteClo
 }
 
 // Requires startReading or startWriting.
+//
+// NOTE: Overlay’s implementation assumes use of an exclusive lock over the primary layer store,
+// see drivers/overlay.Driver.getMergedDir.
 func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
 	var metadata storage.Unpacker
 
@@ -2529,7 +2509,9 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
 	layer.GIDs = diffOutput.GIDs
 	updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
 	layer.UncompressedDigest = diffOutput.UncompressedDigest
-	updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID)
+	updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, diffOutput.CompressedDigest, layer.ID)
+	layer.CompressedDigest = diffOutput.CompressedDigest
+	updateDigestMap(&r.bytocsum, layer.TOCDigest, diffOutput.TOCDigest, layer.ID)
 	layer.TOCDigest = diffOutput.TOCDigest
 	layer.UncompressedSize = diffOutput.Size
 	layer.Metadata = diffOutput.Metadata
@@ -2537,15 +2519,13 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
 		if layer.Flags == nil {
 			layer.Flags = make(map[string]interface{})
 		}
-		for k, v := range options.Flags {
-			layer.Flags[k] = v
-		}
+		maps.Copy(layer.Flags, options.Flags)
 	}
 	if err = r.saveFor(layer); err != nil {
 		return err
 	}
 
-	if len(diffOutput.TarSplit) != 0 {
+	if diffOutput.TarSplit != nil {
 		tsdata := bytes.Buffer{}
 		compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
 		if err != nil {
@@ -2577,37 +2557,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
 	return err
 }
 
-// Requires startWriting.
-func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+// It must be called without any c/storage locks held to allow differ to make c/storage calls.
+func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
 	ddriver, ok := r.driver.(drivers.DriverWithDiffer)
 	if !ok {
 		return nil, ErrNotSupported
 	}
 
-	if to == "" {
-		output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ)
-		return &output, err
-	}
-
-	layer, ok := r.lookup(to)
-	if !ok {
-		return nil, ErrLayerUnknown
-	}
-	if options == nil {
-		options = &drivers.ApplyDiffWithDifferOpts{
-			ApplyDiffOpts: drivers.ApplyDiffOpts{
-				Mappings:   r.layerMappings(layer),
-				MountLabel: layer.MountLabel,
-			},
-		}
-	}
-	output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
-	if err != nil {
-		return nil, err
-	}
-	layer.UIDs = output.UIDs
-	layer.GIDs = output.GIDs
-	err = r.saveFor(layer)
+	output, err := ddriver.ApplyDiffWithDiffer(options, differ)
 	return &output, err
 }
 
@@ -2647,6 +2604,11 @@ func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
 	return r.layersByDigestMap(r.bytocsum, d)
 }
 
+// Requires startWriting.
+func (r *layerStore) dedup(req drivers.DedupArgs) (drivers.DedupResult, error) {
+	return r.driver.Dedup(req)
+}
+
 func closeAll(closes ...func() error) (rErr error) {
 	for _, f := range closes {
 		if err := f(); err != nil {
diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go
index 640203881..ec98b40ce 100644
--- a/vendor/github.com/containers/storage/lockfile_compat.go
+++ b/vendor/github.com/containers/storage/lockfile_compat.go
@@ -5,7 +5,7 @@ import (
 )
 
 // Deprecated: Use lockfile.*LockFile.
-type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated
+type Locker = lockfile.Locker //nolint:staticcheck // SA1019 lockfile.Locker is deprecated
 
 // Deprecated: Use lockfile.GetLockFile.
 func GetLockfile(path string) (lockfile.Locker, error) {
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 77c9c818c..41daad853 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -70,12 +70,15 @@ type (
 	}
 )
 
+const PaxSchilyXattr = "SCHILY.xattr."
+
 const (
 	tarExt  = "tar"
 	solaris = "solaris"
 	windows = "windows"
 	darwin  = "darwin"
 	freebsd = "freebsd"
+	linux   = "linux"
 )
 
 var xattrsToIgnore = map[string]interface{}{
@@ -169,10 +172,17 @@ func DetectCompression(source []byte) Compression {
 }
 
 // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
-func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
 	p := pools.BufioReader32KPool
 	buf := p.Get(archive)
 	bs, err := buf.Peek(10)
+
+	defer func() {
+		if Err != nil {
+			p.Put(buf)
+		}
+	}()
+
 	if err != nil && err != io.EOF {
 		// Note: we'll ignore any io.EOF error because there are some odd
 		// cases where the layer.tar file will be empty (zero bytes) and
@@ -189,6 +199,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
 		readBufWrapper := p.NewReadCloserWrapper(buf, buf)
 		return readBufWrapper, nil
 	case Gzip:
+		cleanup := func() {
+			p.Put(buf)
+		}
+		if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse {
+			return rc, nil
+		}
 		gzReader, err := gzip.NewReader(buf)
 		if err != nil {
 			return nil, err
@@ -207,6 +223,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
 		readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
 		return readBufWrapper, nil
 	case Zstd:
+		cleanup := func() {
+			p.Put(buf)
+		}
+		if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse {
+			return rc, nil
+		}
 		return zstdReader(buf)
 	default:
 		return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
@@ -214,9 +236,16 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
 }
 
 // CompressStream compresses the dest with specified compression algorithm.
-func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) {
 	p := pools.BufioWriter32KPool
 	buf := p.Get(dest)
+
+	defer func() {
+		if Err != nil {
+			p.Put(buf)
+		}
+	}()
+
 	switch compression {
 	case Uncompressed:
 		writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
@@ -391,28 +420,28 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
 	return hdr, nil
 }
 
-// ReadSecurityXattrToTarHeader reads security.capability, security,image
+// readSecurityXattrToTarHeader reads security.capability, security,image
 // xattrs from filesystem to a tar header
-func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
-	if hdr.Xattrs == nil {
-		hdr.Xattrs = make(map[string]string)
+func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
+	if hdr.PAXRecords == nil {
+		hdr.PAXRecords = make(map[string]string)
 	}
 	for _, xattr := range []string{"security.capability", "security.ima"} {
 		capability, err := system.Lgetxattr(path, xattr)
-		if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
+		if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
 			return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
 		}
 		if capability != nil {
-			hdr.Xattrs[xattr] = string(capability)
+			hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability)
 		}
 	}
 	return nil
 }
 
-// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
-func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
+// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
+func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
 	xattrs, err := system.Llistxattr(path)
-	if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
+	if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
 		return err
 	}
 	for _, key := range xattrs {
@@ -425,10 +454,10 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
 				}
 				return err
 			}
-			if hdr.Xattrs == nil {
-				hdr.Xattrs = make(map[string]string)
+			if hdr.PAXRecords == nil {
+				hdr.PAXRecords = make(map[string]string)
 			}
-			hdr.Xattrs[key] = string(value)
+			hdr.PAXRecords[PaxSchilyXattr+key] = string(value)
 		}
 	}
 	return nil
@@ -516,10 +545,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
 	if err != nil {
 		return err
 	}
-	if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
+	if err := readSecurityXattrToTarHeader(path, hdr); err != nil {
 		return err
 	}
-	if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
+	if err := readUserXattrToTarHeader(path, hdr); err != nil {
 		return err
 	}
 	if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
@@ -627,12 +656,20 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
 	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
 	hdrInfo := hdr.FileInfo()
 
+	typeFlag := hdr.Typeflag
 	mask := hdrInfo.Mode()
+
+	// update also the implementation of ForceMask in pkg/chunked
 	if forceMask != nil {
 		mask = *forceMask
+		// If we have a forceMask, force the real type to either be a directory,
+		// a link, or a regular file.
+		if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink {
+			typeFlag = tar.TypeReg
+		}
 	}
 
-	switch hdr.Typeflag {
+	switch typeFlag {
 	case tar.TypeDir:
 		// Create directory unless it exists as a directory already.
 		// In that case we just want to merge the two
@@ -642,7 +679,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
 			}
 		}
 
-	case tar.TypeReg, tar.TypeRegA:
+	case tar.TypeReg:
 		// Source is regular file. We use system.OpenFileSequential to use sequential
 		// file access to avoid depleting the standby list on Windows.
 		// On Linux, this equates to a regular os.OpenFile
@@ -700,13 +737,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
 		return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
 	}
 
-	if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
-		value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777)
-		if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
-			return err
-		}
-	}
-
 	// Lchown is not supported on Windows.
 	if Lchown && runtime.GOOS != windows {
 		if chownOpts == nil {
@@ -753,23 +783,39 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
 	}
 
 	var errs []string
-	for key, value := range hdr.Xattrs {
-		if _, found := xattrsToIgnore[key]; found {
+	for key, value := range hdr.PAXRecords {
+		xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr)
+		if !ok {
+			continue
+		}
+		if _, found := xattrsToIgnore[xattrKey]; found {
 			continue
 		}
-		if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
-			if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
-				// We ignore errors here because not all graphdrivers support
-				// xattrs *cough* old versions of AUFS *cough*. However only
-				// ENOTSUP should be emitted in that case, otherwise we still
-				// bail.  We also ignore EPERM errors if we are running in a
-				// user namespace.
+		if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
+			if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
+				// Ignore specific error cases:
+				// - ENOTSUP: Expected for graphdrivers lacking extended attribute support:
+				//   - Legacy AUFS versions
+				//   - FreeBSD with unsupported namespaces (trusted, security)
+				// - EPERM: Expected when operating within a user namespace
+				// All other errors will cause a failure.
 				errs = append(errs, err.Error())
 				continue
 			}
 			return err
 		}
+	}
 
+	if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") {
+		value := idtools.Stat{
+			IDs:   idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
+			Mode:  hdrInfo.Mode(),
+			Major: int(hdr.Devmajor),
+			Minor: int(hdr.Devminor),
+		}
+		if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
+			return err
+		}
 	}
 
 	// We defer setting flags on directories until the end of
@@ -1113,9 +1159,14 @@ loop:
 		}
 	}
 
-	if options.ForceMask != nil && rootHdr != nil {
-		value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode)
-		if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
+	if options.ForceMask != nil {
+		value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)}
+		if rootHdr != nil {
+			value.IDs.UID = rootHdr.Uid
+			value.IDs.GID = rootHdr.Gid
+			value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode)
+		}
+		if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
 			return err
 		}
 	}
@@ -1337,9 +1388,9 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
 			}
 		} else if runtime.GOOS == darwin {
 			uid, gid = hdr.Uid, hdr.Gid
-			if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok {
+			if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
 				attrs := strings.Split(string(xstat), ":")
-				if len(attrs) == 3 {
+				if len(attrs) >= 3 {
 					val, err := strconv.ParseUint(attrs[0], 10, 32)
 					if err != nil {
 						uid = int(val)
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
index eab9da51a..db614cdd6 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_110.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
@@ -1,5 +1,4 @@
 //go:build go1.10
-// +build go1.10
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
index f591bf389..304464fe7 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_19.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
@@ -1,5 +1,4 @@
 //go:build !go1.10
-// +build !go1.10
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
index 4d362f075..74e62331a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_bsd.go
@@ -1,5 +1,4 @@
-//go:build freebsd || darwin
-// +build freebsd darwin
+//go:build netbsd || freebsd || darwin
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 02995d767..b9d718b60 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -48,8 +48,8 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
 			return nil, err
 		}
 		if len(opaque) == 1 && opaque[0] == 'y' {
-			if hdr.Xattrs != nil {
-				delete(hdr.Xattrs, getOverlayOpaqueXattrName())
+			if hdr.PAXRecords != nil {
+				delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName())
 			}
 			// If there are no lower layers, then it can't have been deleted in this layer.
 			if len(o.rolayers) == 0 {
@@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
 	}
 
 	// if a file was deleted and we are using overlay, we need to create a character device
-	if strings.HasPrefix(base, WhiteoutPrefix) {
-		originalBase := base[len(WhiteoutPrefix):]
+	if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok {
 		originalPath := filepath.Join(dir, originalBase)
 
 		if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
index 2468ab3ca..b342ff75e 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index c6811031f..56f2086bc 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index 85a5b3a5d..6db31cf4c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index 448784549..2b5265493 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -5,6 +5,7 @@ import (
 	"bytes"
 	"fmt"
 	"io"
+	"maps"
 	"os"
 	"path/filepath"
 	"reflect"
@@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
 	f := filepath.Base(path)
 
 	// If there is a whiteout, then the file was removed
-	if strings.HasPrefix(f, WhiteoutPrefix) {
-		originalFile := f[len(WhiteoutPrefix):]
+	if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok {
 		return filepath.Join(filepath.Dir(path), originalFile), nil
 	}
 
@@ -270,6 +270,7 @@ type FileInfo struct {
 	capability []byte
 	added      bool
 	xattrs     map[string]string
+	target     string
 }
 
 // LookUp looks up the file information of a file.
@@ -319,9 +320,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
 	// otherwise any previous delete/change is considered recursive
 	oldChildren := make(map[string]*FileInfo)
 	if oldInfo != nil && info.isDir() {
-		for k, v := range oldInfo.children {
-			oldChildren[k] = v
-		}
+		maps.Copy(oldChildren, oldInfo.children)
 	}
 
 	for name, newChild := range info.children {
@@ -338,6 +337,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
 			// back mtime
 			if statDifferent(oldStat, oldInfo, newStat, info) ||
 				!bytes.Equal(oldChild.capability, newChild.capability) ||
+				oldChild.target != newChild.target ||
 				!reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) {
 				change := Change{
 					Path: newChild.path(),
@@ -392,6 +392,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
 		name:       string(os.PathSeparator),
 		idMappings: idMappings,
 		children:   make(map[string]*FileInfo),
+		target:     "",
 	}
 	return root
 }
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
index f8414717b..42e77c4de 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
@@ -79,6 +79,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
 		children:   make(map[string]*FileInfo),
 		parent:     parent,
 		idMappings: root.idMappings,
+		target:     "",
 	}
 	cpath := filepath.Join(dir, path)
 	stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
@@ -87,11 +88,11 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
 	}
 	info.stat = stat
 	info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
-	if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
+	if err != nil && !errors.Is(err, system.ENOTSUP) {
 		return err
 	}
 	xattrs, err := system.Llistxattr(cpath)
-	if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
+	if err != nil && !errors.Is(err, system.ENOTSUP) {
 		return err
 	}
 	for _, key := range xattrs {
@@ -110,6 +111,12 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
 			info.xattrs[key] = string(value)
 		}
 	}
+	if fi.Mode()&os.ModeSymlink != 0 {
+		info.target, err = os.Readlink(cpath)
+		if err != nil {
+			return err
+		}
+	}
 	parent.children[info.name] = info
 	return nil
 }
@@ -316,7 +323,11 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno)
 // with respect to the parent layers
 func OverlayChanges(layers []string, rw string) ([]Change, error) {
 	dc := func(root, path string, fi os.FileInfo) (string, error) {
-		return overlayDeletedFile(layers, root, path, fi)
+		r, err := overlayDeletedFile(layers, root, path, fi)
+		if err != nil {
+			return "", fmt.Errorf("overlay deleted file query: %w", err)
+		}
+		return r, nil
 	}
 	return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout)
 }
@@ -351,7 +362,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
 	// If the directory isn't marked as opaque, then it's just a normal directory.
 	opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName())
 	if err != nil {
-		return "", err
+		return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err)
 	}
 	if len(opaque) != 1 || opaque[0] != 'y' {
 		return "", err
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
index ca272e68f..2965ccc9f 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package archive
 
@@ -31,7 +30,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
 	}()
 
 	// block until both routines have returned
-	for i := 0; i < 2; i++ {
+	for range 2 {
 		if err := <-errs; err != nil {
 			return nil, nil, err
 		}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
index 6b2e59380..fb2cb70c2 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package archive
 
@@ -31,9 +30,9 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
 		ownerChanged ||
 		oldStat.Rdev() != newStat.Rdev() ||
 		oldStat.Flags() != newStat.Flags() ||
+		!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
 		// Don't look at size for dirs, its not a good measure of change
-		(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
-			(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+		((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) {
 		return true
 	}
 	return false
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
index d6c5fd98b..f57928244 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
index 92b8d05ed..4da1ced3e 100644
--- a/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
+++ b/vendor/github.com/containers/storage/pkg/archive/fflags_bsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd
-// +build freebsd
 
 package archive
 
@@ -80,9 +79,9 @@ func parseFileFlags(fflags string) (uint32, uint32, error) {
 	var set, clear uint32 = 0, 0
 	for _, fflag := range strings.Split(fflags, ",") {
 		isClear := false
-		if strings.HasPrefix(fflag, "no") {
+		if clean, ok := strings.CutPrefix(fflag, "no"); ok {
 			isClear = true
-			fflag = strings.TrimPrefix(fflag, "no")
+			fflag = clean
 		}
 		if value, ok := flagNameToValue[fflag]; ok {
 			if isClear {
diff --git a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
index 27a0bae2b..5ad480f7c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/archive/fflags_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !freebsd
-// +build !freebsd
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/archive/filter.go b/vendor/github.com/containers/storage/pkg/archive/filter.go
new file mode 100644
index 000000000..9902a1ef5
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/filter.go
@@ -0,0 +1,73 @@
+package archive
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os/exec"
+	"strings"
+	"sync"
+)
+
+var filterPath sync.Map
+
+func getFilterPath(name string) string {
+	path, ok := filterPath.Load(name)
+	if ok {
+		return path.(string)
+	}
+
+	path, err := exec.LookPath(name)
+	if err != nil {
+		path = ""
+	}
+
+	filterPath.Store(name, path)
+	return path.(string)
+}
+
+type errorRecordingReader struct {
+	r   io.Reader
+	err error
+}
+
+func (r *errorRecordingReader) Read(p []byte) (int, error) {
+	n, err := r.r.Read(p)
+	if r.err == nil && err != io.EOF {
+		r.err = err
+	}
+	return n, err
+}
+
+// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout.
+// cleanup() is a caller provided function that will be called when the command finishes running, regardless of
+// whether it succeeds or fails.
+// If the command is not found, it returns (nil, false) and the cleanup function is not called.
+func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) {
+	path := getFilterPath(args[0])
+	if path == "" {
+		return nil, false
+	}
+
+	var stderrBuf bytes.Buffer
+
+	inputWithError := &errorRecordingReader{r: input}
+
+	r, w := io.Pipe()
+	cmd := exec.Command(path, args[1:]...)
+	cmd.Stdin = inputWithError
+	cmd.Stdout = w
+	cmd.Stderr = &stderrBuf
+	go func() {
+		err := cmd.Run()
+		// if there is an error reading from input, prefer to return that error
+		if inputWithError.err != nil {
+			err = inputWithError.err
+		} else if err != nil && stderrBuf.Len() > 0 {
+			err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err)
+		}
+		w.CloseWithError(err) // CloseWithErr(nil) == Close()
+		cleanup()
+	}()
+	return r, true
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
index 8db64e804..3555d753a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package archive
 
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index 5ff9f6b51..5a1617814 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -47,7 +47,7 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error
 // This should be used to prevent a potential attacker from manipulating `dest`
 // such that it would provide access to files outside of `dest` through things
 // like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
-// sanitizing symlinks in this manner is inherrently racey:
+// sanitizing symlinks in this manner is inherently racey:
 // ref: CVE-2018-15664
 func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
 	return untarHandler(tarArchive, dest, options, true, root)
@@ -83,6 +83,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
 		}
 	}
 
+	destVal, err := newUnpackDestination(root, dest)
+	if err != nil {
+		return err
+	}
+	defer destVal.Close()
+
 	r := tarArchive
 	if decompress {
 		decompressedArchive, err := archive.DecompressStream(tarArchive)
@@ -93,7 +99,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
 		r = decompressedArchive
 	}
 
-	return invokeUnpack(r, dest, options, root)
+	return invokeUnpack(r, destVal, options)
 }
 
 // Tar tars the requested path while chrooted to the specified root.
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
index f7a16e9f9..caf348493 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
@@ -6,13 +6,29 @@ import (
 	"github.com/containers/storage/pkg/archive"
 )
 
+type unpackDestination struct {
+	dest string
+}
+
+func (dst *unpackDestination) Close() error {
+	return nil
+}
+
+// newUnpackDestination is a no-op on this platform
+func newUnpackDestination(root, dest string) (*unpackDestination, error) {
+	return &unpackDestination{
+		dest: dest,
+	}, nil
+}
+
 func invokeUnpack(decompressedArchive io.Reader,
-	dest string,
-	options *archive.TarOptions, root string,
+	dest *unpackDestination,
+	options *archive.TarOptions,
 ) error {
-	return archive.Unpack(decompressedArchive, dest, options)
+	return archive.Unpack(decompressedArchive, dest.dest, options)
 }
 
 func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+	_ = root // Restricting the operation to this root is not implemented on macOS
 	return archive.TarWithOptions(srcPath, options)
 }
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index 259f8c99a..88df9e56f 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows && !darwin
-// +build !windows,!darwin
 
 package chrootarchive
 
@@ -9,15 +8,41 @@ import (
 	"flag"
 	"fmt"
 	"io"
+	"io/fs"
 	"os"
 	"path/filepath"
 	"runtime"
 	"strings"
 
+	"golang.org/x/sys/unix"
+
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/reexec"
 )
 
+type unpackDestination struct {
+	root *os.File
+	dest string
+}
+
+func (dst *unpackDestination) Close() error {
+	return dst.root.Close()
+}
+
+// tarOptionsDescriptor is passed as an extra file
+const tarOptionsDescriptor = 3
+
+// rootFileDescriptor is passed as an extra file
+const rootFileDescriptor = 4
+
+// procPathForFd gives us a string for a descriptor.
+// Note that while Linux supports actually *reading* this
+// path, FreeBSD and other platforms don't; but in this codebase
+// we only compare strings.
+func procPathForFd(fd int) string {
+	return fmt.Sprintf("/proc/self/fd/%d", fd)
+}
+
 // untar is the entry-point for storage-untar on re-exec. This is not used on
 // Windows as it does not support chroot, hence no point sandboxing through
 // chroot and rexec.
@@ -28,7 +53,7 @@ func untar() {
 	var options archive.TarOptions
 
 	// read the options from the pipe "ExtraFiles"
-	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
+	if err := json.NewDecoder(os.NewFile(tarOptionsDescriptor, "options")).Decode(&options); err != nil {
 		fatal(err)
 	}
 
@@ -38,7 +63,17 @@ func untar() {
 		root = flag.Arg(1)
 	}
 
-	if root == "" {
+	// FreeBSD doesn't have proc/self, but we can handle it here
+	if root == procPathForFd(rootFileDescriptor) {
+		// Take ownership to ensure it's closed; no need to leak
+		// this afterwards.
+		rootFd := os.NewFile(rootFileDescriptor, "tar-root")
+		defer rootFd.Close()
+		if err := unix.Fchdir(int(rootFd.Fd())); err != nil {
+			fatal(err)
+		}
+		root = "."
+	} else if root == "" {
 		root = dst
 	}
 
@@ -57,11 +92,35 @@ func untar() {
 	os.Exit(0)
 }
 
-func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+// newUnpackDestination takes a root directory and a destination which
+// must be underneath it, and returns an object that can unpack
+// in the target root using a file descriptor.
+func newUnpackDestination(root, dest string) (*unpackDestination, error) {
 	if root == "" {
-		return errors.New("must specify a root to chroot to")
+		return nil, errors.New("must specify a root to chroot to")
+	}
+	relDest, err := filepath.Rel(root, dest)
+	if err != nil {
+		return nil, err
+	}
+	if relDest == "." {
+		relDest = "/"
+	}
+	if relDest[0] != '/' {
+		relDest = "/" + relDest
 	}
 
+	rootfdRaw, err := unix.Open(root, unix.O_RDONLY|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+	if err != nil {
+		return nil, &fs.PathError{Op: "open", Path: root, Err: err}
+	}
+	return &unpackDestination{
+		root: os.NewFile(uintptr(rootfdRaw), "rootfs"),
+		dest: relDest,
+	}, nil
+}
+
+func invokeUnpack(decompressedArchive io.Reader, dest *unpackDestination, options *archive.TarOptions) error {
 	// We can't pass a potentially large exclude list directly via cmd line
 	// because we easily overrun the kernel's max argument/environment size
 	// when the full image list is passed (e.g. when this is used by
@@ -72,24 +131,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
 		return fmt.Errorf("untar pipe failure: %w", err)
 	}
 
-	if root != "" {
-		relDest, err := filepath.Rel(root, dest)
-		if err != nil {
-			return err
-		}
-		if relDest == "." {
-			relDest = "/"
-		}
-		if relDest[0] != '/' {
-			relDest = "/" + relDest
-		}
-		dest = relDest
-	}
-
-	cmd := reexec.Command("storage-untar", dest, root)
+	cmd := reexec.Command("storage-untar", dest.dest, procPathForFd(rootFileDescriptor))
 	cmd.Stdin = decompressedArchive
 
-	cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+	// If you change this, change tarOptionsDescriptor above
+	cmd.ExtraFiles = append(cmd.ExtraFiles, r) // fd 3
+	// If you change this, change rootFileDescriptor above too
+	cmd.ExtraFiles = append(cmd.ExtraFiles, dest.root) // fd 4
 	output := bytes.NewBuffer(nil)
 	cmd.Stdout = output
 	cmd.Stderr = output
@@ -107,12 +155,15 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
 	w.Close()
 
 	if err := cmd.Wait(); err != nil {
+		errorOut := fmt.Errorf("unpacking failed (error: %w; output: %s)", err, output)
 		// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
 		// we need to exhaust `xz`'s output, otherwise the `xz` side will be
 		// pending on write pipe forever
-		io.Copy(io.Discard, decompressedArchive)
+		if _, err := io.Copy(io.Discard, decompressedArchive); err != nil {
+			return fmt.Errorf("%w\nexhausting input failed (error: %w)", errorOut, err)
+		}
 
-		return fmt.Errorf("processing tar file(%s): %w", output, err)
+		return errorOut
 	}
 	return nil
 }
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
index 745502204..6611cbade 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
@@ -7,19 +7,34 @@ import (
 	"github.com/containers/storage/pkg/longpath"
 )
 
+type unpackDestination struct {
+	dest string
+}
+
+func (dst *unpackDestination) Close() error {
+	return nil
+}
+
+// newUnpackDestination is a no-op on this platform
+func newUnpackDestination(root, dest string) (*unpackDestination, error) {
+	return &unpackDestination{
+		dest: dest,
+	}, nil
+}
+
 // chroot is not supported by Windows
 func chroot(path string) error {
 	return nil
 }
 
 func invokeUnpack(decompressedArchive io.Reader,
-	dest string,
-	options *archive.TarOptions, root string,
+	dest *unpackDestination,
+	options *archive.TarOptions,
 ) error {
 	// Windows is different to Linux here because Windows does not support
 	// chroot. Hence there is no point sandboxing a chrooted process to
 	// do the unpack. We call inline instead within the daemon process.
-	return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
+	return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest.dest), options)
 }
 
 func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
index 09ef6d5de..3ca99a2c2 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -8,7 +8,7 @@ import (
 	"path/filepath"
 
 	"github.com/containers/storage/pkg/mount"
-	"github.com/syndtr/gocapability/capability"
+	"github.com/moby/sys/capability"
 	"golang.org/x/sys/unix"
 )
 
@@ -19,10 +19,13 @@ import (
 // Old root is removed after the call to pivot_root so it is no longer available under the new root.
 // This is similar to how libcontainer sets up a container's rootfs
 func chroot(path string) (err error) {
-	caps, err := capability.NewPid(0)
+	caps, err := capability.NewPid2(0)
 	if err != nil {
 		return err
 	}
+	if err := caps.Load(); err != nil {
+		return err
+	}
 
 	// initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
 	// environment not in the chroot from untrusted files.
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
index b03e97460..0aab11d22 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows && !linux && !darwin
-// +build !windows,!linux,!darwin
 
 package chrootarchive
 
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index 71ed094d1..ee215ce4f 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows && !darwin
-// +build !windows,!darwin
 
 package chrootarchive
 
@@ -40,11 +39,13 @@ func applyLayer() {
 	}
 
 	// We need to be able to set any perms
-	oldmask, err := system.Umask(0)
-	defer system.Umask(oldmask)
+	oldMask, err := system.Umask(0)
 	if err != nil {
 		fatal(err)
 	}
+	defer func() {
+		_, _ = system.Umask(oldMask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above.
+	}()
 
 	if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
 		fatal(err)
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
index 274a946e2..0fd96190a 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows && !darwin
-// +build !windows,!darwin
 
 package chrootarchive
 
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
index 40eec4dc0..2245f894e 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go
@@ -1,5 +1,4 @@
 //go:build !windows && !darwin
-// +build !windows,!darwin
 
 package chrootarchive
 
diff --git a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
similarity index 87%
rename from vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go
rename to vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
index 45d76ec30..09e75680a 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
@@ -2,10 +2,15 @@ package chunked
 
 import (
 	"encoding/binary"
+	"fmt"
 	"hash/crc32"
 	"io"
+
+	"github.com/docker/go-units"
 )
 
+const bloomFilterMaxLength = 100 * units.MB // max size for bloom filter
+
 type bloomFilter struct {
 	bitArray []uint64
 	k        uint32
@@ -79,6 +84,10 @@ func readBloomFilter(reader io.Reader) (*bloomFilter, error) {
 	if err := binary.Read(reader, binary.LittleEndian, &k); err != nil {
 		return nil, err
 	}
+	// sanity check
+	if bloomFilterLen > bloomFilterMaxLength {
+		return nil, fmt.Errorf("bloom filter length %d exceeds max length %d", bloomFilterLen, bloomFilterMaxLength)
+	}
 	bloomFilterArray := make([]uint64, bloomFilterLen)
 	if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil {
 		return nil, err
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
index 34d1b92f4..0e49ddd88 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -16,8 +16,9 @@ import (
 
 	storage "github.com/containers/storage"
 	graphdriver "github.com/containers/storage/drivers"
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
 	"github.com/containers/storage/pkg/ioutils"
+	"github.com/docker/go-units"
 	jsoniter "github.com/json-iterator/go"
 	digest "github.com/opencontainers/go-digest"
 	"github.com/sirupsen/logrus"
@@ -34,6 +35,8 @@ const (
 	// https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
 	bloomFilterScale  = 10 // how much bigger is the bloom filter than the number of entries
 	bloomFilterHashes = 3  // number of hash functions for the bloom filter
+
+	maxTagsLen = 100 * units.MB // max size for tags len
 )
 
 type cacheFile struct {
@@ -62,11 +65,10 @@ type layer struct {
 }
 
 type layersCache struct {
-	layers  []*layer
-	refs    int
-	store   storage.Store
-	mutex   sync.RWMutex
-	created time.Time
+	layers []*layer
+	refs   int
+	store  storage.Store
+	mutex  sync.RWMutex
 }
 
 var (
@@ -77,7 +79,10 @@ var (
 func (c *layer) release() {
 	runtime.SetFinalizer(c, nil)
 	if c.mmapBuffer != nil {
-		unix.Munmap(c.mmapBuffer)
+		if err := unix.Munmap(c.mmapBuffer); err != nil {
+			logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
+		}
+		c.mmapBuffer = nil
 	}
 }
 
@@ -102,14 +107,13 @@ func (c *layersCache) release() {
 func getLayersCacheRef(store storage.Store) *layersCache {
 	cacheMutex.Lock()
 	defer cacheMutex.Unlock()
-	if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
+	if cache != nil && cache.store == store {
 		cache.refs++
 		return cache
 	}
-	cache := &layersCache{
-		store:   store,
-		refs:    1,
-		created: time.Now(),
+	cache = &layersCache{
+		store: store,
+		refs:  1,
 	}
 	return cache
 }
@@ -178,6 +182,9 @@ func makeBinaryDigest(stringDigest string) ([]byte, error) {
 	return buf, nil
 }
 
+// loadLayerCache attempts to load the cache file for the specified layer.
+// If the cache file is not present or it it using a different cache file version, then
+// the function returns (nil, nil).
 func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
 	buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
 	if err != nil && !errors.Is(err, os.ErrNotExist) {
@@ -189,13 +196,18 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
 	}
 	defer func() {
 		if errRet != nil && mmapBuffer != nil {
-			unix.Munmap(mmapBuffer)
+			if err := unix.Munmap(mmapBuffer); err != nil {
+				logrus.Warnf("Error Munmap: layer %q: %v", layerID, err)
+			}
 		}
 	}()
 	cacheFile, err := readCacheFileFromMemory(buffer)
 	if err != nil {
 		return nil, err
 	}
+	if cacheFile == nil {
+		return nil, nil
+	}
 	return c.createLayer(layerID, cacheFile, mmapBuffer)
 }
 
@@ -262,7 +274,7 @@ func (c *layersCache) load() error {
 	var newLayers []*layer
 	for _, r := range allLayers {
 		// The layer is present in the store and it is already loaded.  Attempt to
-		// re-use it if mmap'ed.
+		// reuse it if mmap'ed.
 		if l, found := loadedLayers[r.ID]; found {
 			// If the layer is not marked for re-load, move it to newLayers.
 			if !l.reloadWithMmap {
@@ -280,9 +292,18 @@ func (c *layersCache) load() error {
 			newLayers = append(newLayers, l)
 			continue
 		}
+
+		if r.ReadOnly {
+			// If the layer is coming from a read-only store, do not attempt
+			// to write to it.
+			// Therefore, we won’t find any matches in read-only-store layers,
+			// unless the read-only store layer comes prepopulated with cacheKey data.
+			continue
+		}
+
 		// the cache file is either not present or broken.  Try to generate it from the TOC.
 		l, err = c.createCacheFileFromTOC(r.ID)
-		if err != nil {
+		if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
 			logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err)
 		}
 		if l != nil {
@@ -603,6 +624,8 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
 	}, nil
 }
 
+// readCacheFileFromMemory reads a cache file from a buffer.
+// It can return (nil, nil) if the cache file uses a different file version that the one currently supported.
 func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
 	bigData := bytes.NewReader(bigDataBuffer)
 
@@ -635,6 +658,14 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
 	if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil {
 		return nil, err
 	}
+
+	if tagsLen > maxTagsLen {
+		return nil, fmt.Errorf("tags len %d exceeds the maximum allowed size %d", tagsLen, maxTagsLen)
+	}
+	if digestLen > tagLen {
+		return nil, fmt.Errorf("digest len %d exceeds the tag len %d", digestLen, tagLen)
+	}
+
 	tags := make([]byte, tagsLen)
 	if _, err := bigData.Read(tags); err != nil {
 		return nil, err
@@ -643,6 +674,10 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
 	// retrieve the unread part of the buffer.
 	remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():]
 
+	if vdataLen >= uint64(len(remaining)) {
+		return nil, fmt.Errorf("vdata len %d exceeds the remaining buffer size %d", vdataLen, len(remaining))
+	}
+
 	vdata := remaining[:vdataLen]
 	fnames := remaining[vdataLen:]
 
@@ -675,7 +710,7 @@ func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([
 	switch format {
 	case graphdriver.DifferOutputFormatDir:
 	case graphdriver.DifferOutputFormatFlat:
-		entries, err = makeEntriesFlat(entries)
+		entries, err = makeEntriesFlat(entries, nil)
 		if err != nil {
 			return nil, err
 		}
@@ -753,14 +788,14 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
 		return "", "", -1, nil
 	}
 
+	c.mutex.RLock()
+	defer c.mutex.RUnlock()
+
 	binaryDigest, err := makeBinaryDigest(digest)
 	if err != nil {
 		return "", "", 0, err
 	}
 
-	c.mutex.RLock()
-	defer c.mutex.RUnlock()
-
 	for _, layer := range c.layers {
 		if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) {
 			continue
@@ -813,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo
 	return "", "", nil
 }
 
-func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
+func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) {
 	return c.findDigestInternal(chunk.ChunkDigest)
 }
 
-func unmarshalToc(manifest []byte) (*internal.TOC, error) {
-	var toc internal.TOC
+func unmarshalToc(manifest []byte) (*minimal.TOC, error) {
+	var toc minimal.TOC
 
 	iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
 
@@ -829,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
 
 		case "entries":
 			for iter.ReadArray() {
-				var m internal.FileMetadata
+				var m minimal.FileMetadata
 				for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
 					switch strings.ToLower(field) {
 					case "type":
@@ -901,7 +936,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
 			s := iter.ReadString()
 			d, err := digest.Parse(s)
 			if err != nil {
-				return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err)
+				return nil, fmt.Errorf("invalid tarSplitDigest %q: %w", s, err)
 			}
 			toc.TarSplitDigest = d
 
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
index e828d479f..564efc8bf 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -4,18 +4,18 @@ import (
 	"io"
 
 	"github.com/containers/storage/pkg/chunked/compressor"
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
 )
 
 const (
-	TypeReg     = internal.TypeReg
-	TypeChunk   = internal.TypeChunk
-	TypeLink    = internal.TypeLink
-	TypeChar    = internal.TypeChar
-	TypeBlock   = internal.TypeBlock
-	TypeDir     = internal.TypeDir
-	TypeFifo    = internal.TypeFifo
-	TypeSymlink = internal.TypeSymlink
+	TypeReg     = minimal.TypeReg
+	TypeChunk   = minimal.TypeChunk
+	TypeLink    = minimal.TypeLink
+	TypeChar    = minimal.TypeChar
+	TypeBlock   = minimal.TypeBlock
+	TypeDir     = minimal.TypeDir
+	TypeFifo    = minimal.TypeFifo
+	TypeSymlink = minimal.TypeSymlink
 )
 
 // ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
index 7b3879a99..67cc6cf08 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
@@ -2,16 +2,28 @@ package chunked
 
 import (
 	archivetar "archive/tar"
+	"bytes"
 	"errors"
 	"fmt"
 	"io"
+	"maps"
 	"strconv"
+	"time"
 
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
 	"github.com/klauspost/compress/zstd"
 	"github.com/klauspost/pgzip"
 	digest "github.com/opencontainers/go-digest"
 	"github.com/vbatts/tar-split/archive/tar"
+	"github.com/vbatts/tar-split/tar/asm"
+	"github.com/vbatts/tar-split/tar/storage"
+	expMaps "golang.org/x/exp/maps"
+)
+
+const (
+	// maxTocSize is the maximum size of a blob that we will attempt to process.
+	// It is used to prevent DoS attacks from layers that embed a very large TOC file.
+	maxTocSize = (1 << 20) * 150
 )
 
 var typesToTar = map[string]byte{
@@ -32,31 +44,33 @@ func typeToTarType(t string) (byte, error) {
 	return r, nil
 }
 
+// readEstargzChunkedManifest reads the estargz manifest from the seekable stream blobStream.
+// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
 func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) {
 	// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
 	footerSize := int64(51)
 	if blobSize <= footerSize {
 		return nil, 0, errors.New("blob too small")
 	}
-	chunk := ImageSourceChunk{
-		Offset: uint64(blobSize - footerSize),
-		Length: uint64(footerSize),
-	}
-	parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
+
+	footer := make([]byte, footerSize)
+	streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)})
 	if err != nil {
+		var badRequestErr ErrBadRequest
+		if errors.As(err, &badRequestErr) {
+			err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
+		}
 		return nil, 0, err
 	}
-	var reader io.ReadCloser
-	select {
-	case r := <-parts:
-		reader = r
-	case err := <-errs:
-		return nil, 0, err
-	}
-	defer reader.Close()
-	footer := make([]byte, footerSize)
-	if _, err := io.ReadFull(reader, footer); err != nil {
-		return nil, 0, err
+
+	for soe := range streamsOrErrors {
+		if soe.stream != nil {
+			_, err = io.ReadFull(soe.stream, footer)
+			_ = soe.stream.Close()
+		}
+		if soe.err != nil && err == nil {
+			err = soe.err
+		}
 	}
 
 	/* Read the ToC offset:
@@ -75,48 +89,59 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
 
 	size := int64(blobSize - footerSize - tocOffset)
 	// set a reasonable limit
-	if size > (1<<20)*50 {
-		return nil, 0, errors.New("manifest too big")
+	if size > maxTocSize {
+		// Not errFallbackCanConvert: we would still use too much memory.
+		return nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz manifest too big to process in memory (%d bytes)", size))
 	}
 
-	chunk = ImageSourceChunk{
-		Offset: uint64(tocOffset),
-		Length: uint64(size),
-	}
-	parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
+	streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)})
 	if err != nil {
+		var badRequestErr ErrBadRequest
+		if errors.As(err, &badRequestErr) {
+			err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
+		}
 		return nil, 0, err
 	}
 
-	var tocReader io.ReadCloser
-	select {
-	case r := <-parts:
-		tocReader = r
-	case err := <-errs:
-		return nil, 0, err
-	}
-	defer tocReader.Close()
+	var manifestUncompressed []byte
 
-	r, err := pgzip.NewReader(tocReader)
-	if err != nil {
-		return nil, 0, err
-	}
-	defer r.Close()
+	for soe := range streamsOrErrors {
+		if soe.stream != nil {
+			err1 := func() error {
+				defer soe.stream.Close()
 
-	aTar := archivetar.NewReader(r)
+				r, err := pgzip.NewReader(soe.stream)
+				if err != nil {
+					return err
+				}
+				defer r.Close()
 
-	header, err := aTar.Next()
-	if err != nil {
-		return nil, 0, err
-	}
-	// set a reasonable limit
-	if header.Size > (1<<20)*50 {
-		return nil, 0, errors.New("manifest too big")
-	}
+				aTar := archivetar.NewReader(r)
 
-	manifestUncompressed := make([]byte, header.Size)
-	if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
-		return nil, 0, err
+				header, err := aTar.Next()
+				if err != nil {
+					return err
+				}
+				// set a reasonable limit
+				if header.Size > maxTocSize {
+					return errors.New("manifest too big")
+				}
+
+				manifestUncompressed = make([]byte, header.Size)
+				if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
+					return err
+				}
+				return nil
+			}()
+			if err == nil {
+				err = err1
+			}
+		} else if err == nil {
+			err = soe.err
+		}
+	}
+	if manifestUncompressed == nil {
+		return nil, 0, errors.New("manifest not found")
 	}
 
 	manifestDigester := digest.Canonical.Digester()
@@ -133,11 +158,12 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
 }
 
 // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
-// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
-func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
-	offsetMetadata := annotations[internal.ManifestInfoKey]
+// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
+// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
+func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) {
+	offsetMetadata := annotations[minimal.ManifestInfoKey]
 	if offsetMetadata == "" {
-		return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
+		return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey)
 	}
 	var manifestChunk ImageSourceChunk
 	var manifestLengthUncompressed, manifestType uint64
@@ -147,48 +173,59 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
 	// The tarSplit… values are valid if tarSplitChunk.Offset > 0
 	var tarSplitChunk ImageSourceChunk
 	var tarSplitLengthUncompressed uint64
-	if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
+	if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found {
 		if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
 			return nil, nil, nil, 0, err
 		}
 	}
 
-	if manifestType != internal.ManifestTypeCRFS {
+	if manifestType != minimal.ManifestTypeCRFS {
 		return nil, nil, nil, 0, errors.New("invalid manifest type")
 	}
 
 	// set a reasonable limit
-	if manifestChunk.Length > (1<<20)*50 {
-		return nil, nil, nil, 0, errors.New("manifest too big")
+	if manifestChunk.Length > maxTocSize {
+		// Not errFallbackCanConvert: we would still use too much memory.
+		return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes compressed)", manifestChunk.Length))
 	}
-	if manifestLengthUncompressed > (1<<20)*50 {
-		return nil, nil, nil, 0, errors.New("manifest too big")
+	if manifestLengthUncompressed > maxTocSize {
+		// Not errFallbackCanConvert: we would still use too much memory.
+		return nil, nil, nil, 0, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked manifest too big to process in memory (%d bytes uncompressed)", manifestLengthUncompressed))
 	}
 
 	chunks := []ImageSourceChunk{manifestChunk}
 	if tarSplitChunk.Offset > 0 {
 		chunks = append(chunks, tarSplitChunk)
 	}
-	parts, errs, err := blobStream.GetBlobAt(chunks)
+
+	streamsOrErrors, err := getBlobAt(blobStream, chunks...)
 	if err != nil {
+		var badRequestErr ErrBadRequest
+		if errors.As(err, &badRequestErr) {
+			err = errFallbackCanConvert{newErrFallbackToOrdinaryLayerDownload(err)}
+		}
 		return nil, nil, nil, 0, err
 	}
 
+	defer func() {
+		err := ensureAllBlobsDone(streamsOrErrors)
+		if retErr == nil {
+			retErr = err
+		}
+	}()
+
 	readBlob := func(len uint64) ([]byte, error) {
-		var reader io.ReadCloser
-		select {
-		case r := <-parts:
-			reader = r
-		case err := <-errs:
-			return nil, err
+		soe, ok := <-streamsOrErrors
+		if !ok {
+			return nil, errors.New("stream closed")
+		}
+		if soe.err != nil {
+			return nil, soe.err
 		}
+		defer soe.stream.Close()
 
 		blob := make([]byte, len)
-		if _, err := io.ReadFull(reader, blob); err != nil {
-			reader.Close()
-			return nil, err
-		}
-		if err := reader.Close(); err != nil {
+		if _, err := io.ReadFull(soe.stream, blob); err != nil {
 			return nil, err
 		}
 		return blob, nil
@@ -208,21 +245,194 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
 		return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
 	}
 
-	decodedTarSplit := []byte{}
-	if tarSplitChunk.Offset > 0 {
+	var decodedTarSplit []byte = nil
+	if toc.TarSplitDigest != "" {
+		if tarSplitChunk.Offset <= 0 {
+			return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey)
+		}
 		tarSplit, err := readBlob(tarSplitChunk.Length)
 		if err != nil {
 			return nil, nil, nil, 0, err
 		}
-
 		decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
 		if err != nil {
 			return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
 		}
+		// We use the TOC for creating on-disk files, but the tar-split for creating metadata
+		// when exporting the layer contents. Ensure the two match, otherwise local inspection of a container
+		// might be misleading about the exported contents.
+		if err := ensureTOCMatchesTarSplit(toc, decodedTarSplit); err != nil {
+			return nil, nil, nil, 0, fmt.Errorf("tar-split and TOC data is inconsistent: %w", err)
+		}
+	} else if tarSplitChunk.Offset > 0 {
+		// We must ignore the tar-split when the digest is not present in the TOC, because we can’t authenticate it.
+		//
+		// But if we asked for the chunk, now we must consume the data to not block the producer.
+		// Ideally the GetBlobAt API should be changed so that this is not necessary.
+		_, err := readBlob(tarSplitChunk.Length)
+		if err != nil {
+			return nil, nil, nil, 0, err
+		}
 	}
 	return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
 }
 
+// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
+func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
+	pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries
+	for i := range toc.Entries {
+		e := &toc.Entries[i]
+		if e.Type != minimal.TypeChunk {
+			if _, ok := pendingFiles[e.Name]; ok {
+				return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
+			}
+			pendingFiles[e.Name] = e
+		}
+	}
+
+	unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
+	if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error {
+		e, ok := pendingFiles[hdr.Name]
+		if !ok {
+			return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
+		}
+		delete(pendingFiles, hdr.Name)
+		expected, err := minimal.NewFileMetadata(hdr)
+		if err != nil {
+			return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
+		}
+		if err := ensureFileMetadataAttributesMatch(e, &expected); err != nil {
+			return fmt.Errorf("TOC and tar-split metadata doesn’t match: %w", err)
+		}
+
+		return nil
+	}); err != nil {
+		return err
+	}
+	if len(pendingFiles) != 0 {
+		remaining := expMaps.Keys(pendingFiles)
+		if len(remaining) > 5 {
+			remaining = remaining[:5] // Just to limit the size of the output.
+		}
+		return fmt.Errorf("TOC contains entries not present in tar-split, incl. %q", remaining)
+	}
+	return nil
+}
+
+// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata
+func tarSizeFromTarSplit(tarSplit []byte) (int64, error) {
+	var res int64 = 0
+
+	unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
+	for {
+		entry, err := unpacker.Next()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return -1, fmt.Errorf("reading tar-split entries: %w", err)
+		}
+		switch entry.Type {
+		case storage.SegmentType:
+			res += int64(len(entry.Payload))
+		case storage.FileType:
+			// entry.Size is the “logical size”, which might not be the physical size for sparse entries;
+			// but the way tar-split/tar/asm.WriteOutputTarStream combines FileType entries and returned files contents,
+			// sparse files are not supported.
+			// Also https://github.com/opencontainers/image-spec/blob/main/layer.md says
+			// > Sparse files SHOULD NOT be used because they lack consistent support across tar implementations.
+			res += entry.Size
+		default:
+			return -1, fmt.Errorf("unexpected tar-split entry type %q", entry.Type)
+		}
+	}
+	return res, nil
+}
+
+// ensureTimePointersMatch ensures that a and b are equal
+func ensureTimePointersMatch(a, b *time.Time) error {
+	// We didn’t always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.
+	// The archive/tar code turns time.IsZero() timestamps into an Unix timestamp of 0 when writing, but turns an Unix timestamp of 0
+	// when writing into a (local-timezone) Jan 1 1970, which is not IsZero(). So, treat that the same as IsZero as well.
+	unixZero := time.Unix(0, 0)
+	if a != nil && (a.IsZero() || a.Equal(unixZero)) {
+		a = nil
+	}
+	if b != nil && (b.IsZero() || b.Equal(unixZero)) {
+		b = nil
+	}
+	switch {
+	case a == nil && b == nil:
+		return nil
+	case a == nil:
+		return fmt.Errorf("nil != %v", *b)
+	case b == nil:
+		return fmt.Errorf("%v != nil", *a)
+	default:
+		if a.Equal(*b) {
+			return nil
+		}
+		return fmt.Errorf("%v != %v", *a, *b)
+	}
+}
+
+// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
+// in the tar stream or matching contents)
+func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error {
+	// Keep this in sync with minimal.FileMetadata!
+
+	if a.Type != b.Type {
+		return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)
+	}
+	if a.Name != b.Name {
+		return fmt.Errorf("mismatch of Name: %q != %q", a.Name, b.Name)
+	}
+	if a.Linkname != b.Linkname {
+		return fmt.Errorf("mismatch of Linkname: %q != %q", a.Linkname, b.Linkname)
+	}
+	if a.Mode != b.Mode {
+		return fmt.Errorf("mismatch of Mode: %q != %q", a.Mode, b.Mode)
+	}
+	if a.Size != b.Size {
+		return fmt.Errorf("mismatch of Size: %q != %q", a.Size, b.Size)
+	}
+	if a.UID != b.UID {
+		return fmt.Errorf("mismatch of UID: %q != %q", a.UID, b.UID)
+	}
+	if a.GID != b.GID {
+		return fmt.Errorf("mismatch of GID: %q != %q", a.GID, b.GID)
+	}
+
+	if err := ensureTimePointersMatch(a.ModTime, b.ModTime); err != nil {
+		return fmt.Errorf("mismatch of ModTime: %w", err)
+	}
+	if err := ensureTimePointersMatch(a.AccessTime, b.AccessTime); err != nil {
+		return fmt.Errorf("mismatch of AccessTime: %w", err)
+	}
+	if err := ensureTimePointersMatch(a.ChangeTime, b.ChangeTime); err != nil {
+		return fmt.Errorf("mismatch of ChangeTime: %w", err)
+	}
+	if a.Devmajor != b.Devmajor {
+		return fmt.Errorf("mismatch of Devmajor: %q != %q", a.Devmajor, b.Devmajor)
+	}
+	if a.Devminor != b.Devminor {
+		return fmt.Errorf("mismatch of Devminor: %q != %q", a.Devminor, b.Devminor)
+	}
+	if !maps.Equal(a.Xattrs, b.Xattrs) {
+		return fmt.Errorf("mismatch of Xattrs: %q != %q", a.Xattrs, b.Xattrs)
+	}
+
+	// Digest is not compared
+	// Offset is not compared
+	// EndOffset is not compared
+
+	// ChunkSize is not compared
+	// ChunkOffset is not compared
+	// ChunkDigest is not compared
+	// ChunkType is not compared
+	return nil
+}
+
 func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
 	d, err := digest.Parse(expectedCompressedChecksum)
 	if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index a2fd904ca..56ae4c770 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -7,10 +7,9 @@ package compressor
 import (
 	"bufio"
 	"bytes"
-	"encoding/base64"
 	"io"
 
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
 	"github.com/containers/storage/pkg/ioutils"
 	"github.com/klauspost/compress/zstd"
 	"github.com/opencontainers/go-digest"
@@ -214,7 +213,7 @@ func newTarSplitData(level int) (*tarSplitData, error) {
 	compressed := bytes.NewBuffer(nil)
 	digester := digest.Canonical.Digester()
 
-	zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
+	zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
 	if err != nil {
 		return nil, err
 	}
@@ -255,7 +254,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
 
 	buf := make([]byte, 4096)
 
-	zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
+	zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level)
 	if err != nil {
 		return err
 	}
@@ -277,7 +276,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
 		return offset, nil
 	}
 
-	var metadata []internal.FileMetadata
+	var metadata []minimal.FileMetadata
 	for {
 		hdr, err := tr.Next()
 		if err != nil {
@@ -342,9 +341,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
 
 				chunkSize := rcReader.WrittenOut - lastChunkOffset
 				if chunkSize > 0 {
-					chunkType := internal.ChunkTypeData
+					chunkType := minimal.ChunkTypeData
 					if rcReader.IsLastChunkZeros {
-						chunkType = internal.ChunkTypeZeros
+						chunkType = minimal.ChunkTypeZeros
 					}
 
 					chunks = append(chunks, chunk{
@@ -369,37 +368,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
 			}
 		}
 
-		typ, err := internal.GetType(hdr.Typeflag)
+		mainEntry, err := minimal.NewFileMetadata(hdr)
 		if err != nil {
 			return err
 		}
-		xattrs := make(map[string]string)
-		for k, v := range hdr.Xattrs {
-			xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
-		}
-		entries := []internal.FileMetadata{
-			{
-				Type:       typ,
-				Name:       hdr.Name,
-				Linkname:   hdr.Linkname,
-				Mode:       hdr.Mode,
-				Size:       hdr.Size,
-				UID:        hdr.Uid,
-				GID:        hdr.Gid,
-				ModTime:    &hdr.ModTime,
-				AccessTime: &hdr.AccessTime,
-				ChangeTime: &hdr.ChangeTime,
-				Devmajor:   hdr.Devmajor,
-				Devminor:   hdr.Devminor,
-				Xattrs:     xattrs,
-				Digest:     checksum,
-				Offset:     startOffset,
-				EndOffset:  lastOffset,
-			},
-		}
+		mainEntry.Digest = checksum
+		mainEntry.Offset = startOffset
+		mainEntry.EndOffset = lastOffset
+		entries := []minimal.FileMetadata{mainEntry}
 		for i := 1; i < len(chunks); i++ {
-			entries = append(entries, internal.FileMetadata{
-				Type:        internal.TypeChunk,
+			entries = append(entries, minimal.FileMetadata{
+				Type:        minimal.TypeChunk,
 				Name:        hdr.Name,
 				ChunkOffset: chunks[i].ChunkOffset,
 			})
@@ -445,13 +424,13 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
 	}
 	tarSplitData.zstd = nil
 
-	ts := internal.TarSplitData{
+	ts := minimal.TarSplitData{
 		Data:             tarSplitData.compressed.Bytes(),
 		Digest:           tarSplitData.digester.Digest(),
 		UncompressedSize: tarSplitData.uncompressedCounter.Count,
 	}
 
-	return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
+	return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
 }
 
 type zstdChunkedWriter struct {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
index 701b6aa53..0e673f3f2 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
@@ -1,15 +1,19 @@
+//go:build unix
+
 package dump
 
 import (
 	"bufio"
+	"encoding/base64"
 	"fmt"
 	"io"
 	"path/filepath"
-	"strings"
+	"reflect"
 	"time"
-	"unicode"
 
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
+	storagePath "github.com/containers/storage/pkg/chunked/internal/path"
+	"github.com/opencontainers/go-digest"
 	"golang.org/x/sys/unix"
 )
 
@@ -20,20 +24,26 @@ const (
 	ESCAPE_LONE_DASH
 )
 
-func escaped(val string, escape int) string {
+func escaped(val []byte, escape int) string {
 	noescapeSpace := escape&NOESCAPE_SPACE != 0
 	escapeEqual := escape&ESCAPE_EQUAL != 0
 	escapeLoneDash := escape&ESCAPE_LONE_DASH != 0
 
-	length := len(val)
-
-	if escapeLoneDash && val == "-" {
+	if escapeLoneDash && len(val) == 1 && val[0] == '-' {
 		return fmt.Sprintf("\\x%.2x", val[0])
 	}
 
+	// This is intended to match the C isprint API with LC_CTYPE=C
+	isprint := func(c byte) bool {
+		return c >= 32 && c < 127
+	}
+	// This is intended to match the C isgraph API with LC_CTYPE=C
+	isgraph := func(c byte) bool {
+		return c > 32 && c < 127
+	}
+
 	var result string
-	for i := 0; i < length; i++ {
-		c := val[i]
+	for _, c := range []byte(val) {
 		hexEscape := false
 		var special string
 
@@ -50,9 +60,9 @@ func escaped(val string, escape int) string {
 			hexEscape = escapeEqual
 		default:
 			if noescapeSpace {
-				hexEscape = !unicode.IsPrint(rune(c))
+				hexEscape = !isprint(c)
 			} else {
-				hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c))
+				hexEscape = !isgraph(c)
 			}
 		}
 
@@ -67,8 +77,8 @@ func escaped(val string, escape int) string {
 	return result
 }
 
-func escapedOptional(val string, escape int) string {
-	if val == "" {
+func escapedOptional(val []byte, escape int) string {
+	if len(val) == 0 {
 		return "-"
 	}
 	return escaped(val, escape)
@@ -76,17 +86,17 @@ func escapedOptional(val string, escape int) string {
 
 func getStMode(mode uint32, typ string) (uint32, error) {
 	switch typ {
-	case internal.TypeReg, internal.TypeLink:
+	case minimal.TypeReg, minimal.TypeLink:
 		mode |= unix.S_IFREG
-	case internal.TypeChar:
+	case minimal.TypeChar:
 		mode |= unix.S_IFCHR
-	case internal.TypeBlock:
+	case minimal.TypeBlock:
 		mode |= unix.S_IFBLK
-	case internal.TypeDir:
+	case minimal.TypeDir:
 		mode |= unix.S_IFDIR
-	case internal.TypeFifo:
+	case minimal.TypeFifo:
 		mode |= unix.S_IFIFO
-	case internal.TypeSymlink:
+	case minimal.TypeSymlink:
 		mode |= unix.S_IFLNK
 	default:
 		return 0, fmt.Errorf("unknown type %s", typ)
@@ -94,26 +104,37 @@ func getStMode(mode uint32, typ string) (uint32, error) {
 	return mode, nil
 }
 
-func sanitizeName(name string) string {
-	path := filepath.Clean(name)
-	if path == "." {
-		path = "/"
-	} else if path[0] != '/' {
-		path = "/" + path
-	}
-	return path
-}
+func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error {
+	path := storagePath.CleanAbsPath(entry.Name)
+
+	parent := filepath.Dir(path)
+	if _, found := added[parent]; !found && path != "/" {
+		parentEntry := &minimal.FileMetadata{
+			Name: parent,
+			Type: minimal.TypeDir,
+			Mode: 0o755,
+		}
+		if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
+			return err
+		}
 
-func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
-	path := sanitizeName(entry.Name)
+	}
+	if e, found := added[path]; found {
+		// if the entry was already added, make sure it has the same data
+		if !reflect.DeepEqual(*e, *entry) {
+			return fmt.Errorf("entry %q already added with different data", path)
+		}
+		return nil
+	}
+	added[path] = entry
 
-	if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil {
+	if _, err := fmt.Fprint(out, escaped([]byte(path), ESCAPE_STANDARD)); err != nil {
 		return err
 	}
 
 	nlinks := links[entry.Name] + links[entry.Linkname] + 1
 	link := ""
-	if entry.Type == internal.TypeLink {
+	if entry.Type == minimal.TypeLink {
 		link = "@"
 	}
 
@@ -139,19 +160,24 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
 
 	var payload string
 	if entry.Linkname != "" {
-		if entry.Type == internal.TypeSymlink {
+		if entry.Type == minimal.TypeSymlink {
 			payload = entry.Linkname
 		} else {
-			payload = sanitizeName(entry.Linkname)
+			payload = storagePath.CleanAbsPath(entry.Linkname)
 		}
-	} else {
-		if len(entry.Digest) > 10 {
-			d := strings.Replace(entry.Digest, "sha256:", "", 1)
-			payload = d[:2] + "/" + d[2:]
+	} else if entry.Digest != "" {
+		d, err := digest.Parse(entry.Digest)
+		if err != nil {
+			return fmt.Errorf("invalid digest %q for %q: %w", entry.Digest, entry.Name, err)
 		}
+		path, err := storagePath.RegularFilePathForValidatedDigest(d)
+		if err != nil {
+			return fmt.Errorf("determining physical file path for %q: %w", entry.Name, err)
+		}
+		payload = path
 	}
 
-	if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil {
+	if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
 		return err
 	}
 
@@ -165,14 +191,18 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
 		return err
 	}
 	digest := verityDigests[payload]
-	if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil {
+	if _, err := fmt.Fprint(out, escapedOptional([]byte(digest), ESCAPE_LONE_DASH)); err != nil {
 		return err
 	}
 
-	for k, v := range entry.Xattrs {
-		name := escaped(k, ESCAPE_EQUAL)
-		value := escaped(v, ESCAPE_EQUAL)
+	for k, vEncoded := range entry.Xattrs {
+		v, err := base64.StdEncoding.DecodeString(vEncoded)
+		if err != nil {
+			return fmt.Errorf("decode xattr %q: %w", k, err)
+		}
+		name := escaped([]byte(k), ESCAPE_EQUAL)
 
+		value := escaped(v, ESCAPE_EQUAL)
 		if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil {
 			return err
 		}
@@ -185,7 +215,7 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
 
 // GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
 func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
-	toc, ok := tocI.(*internal.TOC)
+	toc, ok := tocI.(*minimal.TOC)
 	if !ok {
 		return nil, fmt.Errorf("invalid TOC type")
 	}
@@ -201,24 +231,25 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
 		}()
 
 		links := make(map[string]int)
+		added := make(map[string]*minimal.FileMetadata)
 		for _, e := range toc.Entries {
 			if e.Linkname == "" {
 				continue
 			}
-			if e.Type == internal.TypeSymlink {
+			if e.Type == minimal.TypeSymlink {
 				continue
 			}
 			links[e.Linkname] = links[e.Linkname] + 1
 		}
 
-		if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") {
-			root := &internal.FileMetadata{
+		if len(toc.Entries) == 0 {
+			root := &minimal.FileMetadata{
 				Name: "/",
-				Type: internal.TypeDir,
+				Type: minimal.TypeDir,
 				Mode: 0o755,
 			}
 
-			if err := dumpNode(w, links, verityDigests, root); err != nil {
+			if err := dumpNode(w, added, links, verityDigests, root); err != nil {
 				pipeW.CloseWithError(err)
 				closed = true
 				return
@@ -226,10 +257,10 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
 		}
 
 		for _, e := range toc.Entries {
-			if e.Type == internal.TypeChunk {
+			if e.Type == minimal.TypeChunk {
 				continue
 			}
-			if err := dumpNode(w, links, verityDigests, &e); err != nil {
+			if err := dumpNode(w, added, links, verityDigests, &e); err != nil {
 				pipeW.CloseWithError(err)
 				closed = true
 				return
diff --git a/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
new file mode 100644
index 000000000..82685e9c9
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/filesystem_linux.go
@@ -0,0 +1,646 @@
+package chunked
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync/atomic"
+	"syscall"
+	"time"
+
+	driversCopy "github.com/containers/storage/drivers/copy"
+	"github.com/containers/storage/pkg/archive"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
+	storagePath "github.com/containers/storage/pkg/chunked/internal/path"
+	securejoin "github.com/cyphar/filepath-securejoin"
+	"github.com/vbatts/tar-split/archive/tar"
+	"golang.org/x/sys/unix"
+)
+
+// procPathForFile returns an absolute path in /proc which
+// refers to the file; see procPathForFd.
+func procPathForFile(f *os.File) string {
+	return procPathForFd(int(f.Fd()))
+}
+
+// procPathForFd returns an absolute path in /proc which
+// refers to the file; this allows passing a file descriptor
+// in places that don't accept a file descriptor.
+func procPathForFd(fd int) string {
+	return fmt.Sprintf("/proc/self/fd/%d", fd)
+}
+
+// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that
+// are not part of the TOC document.
+// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
+type fileMetadata struct {
+	minimal.FileMetadata
+
+	// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
+	chunks []*minimal.FileMetadata
+
+	// skipSetAttrs is set when the file attributes must not be
+	// modified, e.g. it is a hard link from a different source,
+	// or a composefs file.
+	skipSetAttrs bool
+}
+
+// splitPath takes a file path as input and returns two components: dir and base.
+// Differently than filepath.Split(), this function handles some edge cases.
+// If the path refers to a file in the root directory, the returned dir is "/".
+// The returned base value is never empty, it never contains any slash and the
+// value "..".
+func splitPath(path string) (string, string, error) {
+	path = storagePath.CleanAbsPath(path)
+	dir, base := filepath.Split(path)
+	if base == "" {
+		base = "."
+	}
+	// Remove trailing slashes from dir, but make sure that "/" is preserved.
+	dir = strings.TrimSuffix(dir, "/")
+	if dir == "" {
+		dir = "/"
+	}
+
+	if strings.Contains(base, "/") {
+		// This should never happen, but be safe as the base is passed to *at syscalls.
+		return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path)
+	}
+	return dir, base, nil
+}
+
+func doHardLink(dirfd, srcFd int, destFile string) error {
+	destDir, destBase, err := splitPath(destFile)
+	if err != nil {
+		return err
+	}
+	destDirFd := dirfd
+	if destDir != "/" {
+		f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		destDirFd = int(f.Fd())
+	}
+
+	doLink := func() error {
+		// Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
+		// /proc/self/fd doesn't and can be used with rootless.
+		srcPath := procPathForFd(srcFd)
+		err := unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW)
+		if err != nil {
+			return &fs.PathError{Op: "linkat", Path: destFile, Err: err}
+		}
+		return nil
+	}
+
+	err = doLink()
+
+	// if the destination exists, unlink it first and try again
+	if err != nil && os.IsExist(err) {
+		if err := unix.Unlinkat(destDirFd, destBase, 0); err != nil {
+			return err
+		}
+		return doLink()
+	}
+	return err
+}
+
+func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
+	destFile := fileMetadata.Name
+	src := procPathForFd(srcFd)
+	st, err := os.Stat(src)
+	if err != nil {
+		return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err)
+	}
+
+	copyWithFileRange, copyWithFileClone := true, true
+
+	if useHardLinks {
+		err := doHardLink(dirfd, srcFd, destFile)
+		if err == nil {
+			// if the file was deduplicated with a hard link, skip overriding file metadata.
+			fileMetadata.skipSetAttrs = true
+			return nil, st.Size(), nil
+		}
+	}
+
+	// If the destination file already exists, we shouldn't blow it away
+	dstFile, err := openFileUnderRoot(dirfd, destFile, newFileFlags, mode)
+	if err != nil {
+		return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err)
+	}
+
+	err = driversCopy.CopyRegularToFile(src, dstFile, st, &copyWithFileRange, &copyWithFileClone)
+	if err != nil {
+		dstFile.Close()
+		return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err)
+	}
+	return dstFile, st.Size(), nil
+}
+
+func timeToTimespec(time *time.Time) (ts unix.Timespec) {
+	if time == nil || time.IsZero() {
+		// Return UTIME_OMIT special value
+		ts.Sec = 0
+		ts.Nsec = ((1 << 30) - 2)
+		return
+	}
+	return unix.NsecToTimespec(time.UnixNano())
+}
+
+// chown changes the owner and group of the file at the specified path under the directory
+// pointed by dirfd.
+// If nofollow is true, the function will not follow symlinks.
+// If path is empty, the function will change the owner and group of the file descriptor.
+// absolutePath is the absolute path of the file, used only for error messages.
+func chown(dirfd int, path string, uid, gid int, nofollow bool, absolutePath string) error {
+	var err error
+	flags := 0
+	if nofollow {
+		flags |= unix.AT_SYMLINK_NOFOLLOW
+	} else if path == "" {
+		flags |= unix.AT_EMPTY_PATH
+	}
+	err = unix.Fchownat(dirfd, path, uid, gid, flags)
+	if err == nil {
+		return nil
+	}
+	if errors.Is(err, syscall.EINVAL) {
+		return fmt.Errorf(`potentially insufficient UIDs or GIDs available in the user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, path, err)
+	}
+	return &fs.PathError{Op: "fchownat", Path: absolutePath, Err: err}
+}
+
+// setFileAttrs sets the file attributes for file given metadata
+func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error {
+	if metadata.skipSetAttrs {
+		return nil
+	}
+	if file == nil {
+		return errors.New("invalid file")
+	}
+	fd := int(file.Fd())
+
+	t, err := typeToTarType(metadata.Type)
+	if err != nil {
+		return err
+	}
+
+	// If it is a symlink, force to use the path
+	if t == tar.TypeSymlink {
+		usePath = true
+	}
+
+	baseName := ""
+	if usePath {
+		dirName := filepath.Dir(metadata.Name)
+		if dirName != "" {
+			parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
+			if err != nil {
+				return err
+			}
+			defer parentFd.Close()
+
+			dirfd = int(parentFd.Fd())
+		}
+		baseName = filepath.Base(metadata.Name)
+	}
+
+	doChown := func() error {
+		var err error
+		if usePath {
+			err = chown(dirfd, baseName, metadata.UID, metadata.GID, true, metadata.Name)
+		} else {
+			err = chown(fd, "", metadata.UID, metadata.GID, false, metadata.Name)
+		}
+		if options.IgnoreChownErrors {
+			return nil
+		}
+		return err
+	}
+
+	doSetXattr := func(k string, v []byte) error {
+		err := unix.Fsetxattr(fd, k, v, 0)
+		if err != nil {
+			return &fs.PathError{Op: "fsetxattr", Path: metadata.Name, Err: err}
+		}
+		return nil
+	}
+
+	doUtimes := func() error {
+		ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
+		var err error
+		if usePath {
+			err = unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW)
+		} else {
+			err = unix.UtimesNanoAt(unix.AT_FDCWD, procPathForFd(fd), ts, 0)
+		}
+		if err != nil {
+			return &fs.PathError{Op: "utimensat", Path: metadata.Name, Err: err}
+		}
+		return nil
+	}
+
+	doChmod := func() error {
+		var err error
+		op := ""
+		if usePath {
+			err = unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
+			op = "fchmodat"
+		} else {
+			err = unix.Fchmod(fd, uint32(mode))
+			op = "fchmod"
+		}
+		if err != nil {
+			return &fs.PathError{Op: op, Path: metadata.Name, Err: err}
+		}
+		return nil
+	}
+
+	if err := doChown(); err != nil {
+		return err
+	}
+
+	canIgnore := func(err error) bool {
+		return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP)
+	}
+
+	for k, v := range metadata.Xattrs {
+		if _, found := xattrsToIgnore[k]; found {
+			continue
+		}
+		data, err := base64.StdEncoding.DecodeString(v)
+		if err != nil {
+			return fmt.Errorf("decode xattr %q: %w", v, err)
+		}
+		if err := doSetXattr(k, data); !canIgnore(err) {
+			return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err)
+		}
+	}
+
+	if err := doUtimes(); !canIgnore(err) {
+		return err
+	}
+
+	if err := doChmod(); !canIgnore(err) {
+		return err
+	}
+	return nil
+}
+
+func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+	root := procPathForFd(dirfd)
+
+	targetRoot, err := os.Readlink(root)
+	if err != nil {
+		return -1, err
+	}
+
+	hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
+
+	var fd int
+	// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
+	// last component as the path to openat().
+	if hasNoFollow {
+		dirName, baseName, err := splitPath(name)
+		if err != nil {
+			return -1, err
+		}
+		if dirName != "/" {
+			newRoot, err := securejoin.SecureJoin(root, dirName)
+			if err != nil {
+				return -1, err
+			}
+			root = newRoot
+		}
+
+		parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
+		if err != nil {
+			return -1, &fs.PathError{Op: "open", Path: root, Err: err}
+		}
+		defer unix.Close(parentDirfd)
+
+		fd, err = unix.Openat(parentDirfd, baseName, int(flags), uint32(mode))
+		if err != nil {
+			return -1, &fs.PathError{Op: "openat", Path: name, Err: err}
+		}
+	} else {
+		newPath, err := securejoin.SecureJoin(root, name)
+		if err != nil {
+			return -1, err
+		}
+		fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode))
+		if err != nil {
+			return -1, &fs.PathError{Op: "openat", Path: newPath, Err: err}
+		}
+	}
+
+	target, err := os.Readlink(procPathForFd(fd))
+	if err != nil {
+		unix.Close(fd)
+		return -1, err
+	}
+
+	// Add an additional check to make sure the opened fd is inside the rootfs
+	if !strings.HasPrefix(target, targetRoot) {
+		unix.Close(fd)
+		return -1, fmt.Errorf("while resolving %q.  It resolves outside the root directory", name)
+	}
+
+	return fd, err
+}
+
+func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+	how := unix.OpenHow{
+		Flags:   flags,
+		Mode:    uint64(mode & 0o7777),
+		Resolve: unix.RESOLVE_IN_ROOT,
+	}
+	fd, err := unix.Openat2(dirfd, name, &how)
+	if err != nil {
+		return -1, &fs.PathError{Op: "openat2", Path: name, Err: err}
+	}
+	return fd, nil
+}
+
+// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid
+// using it again.
+var skipOpenat2 int32
+
+// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a
+// userspace lookup.
+func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
+	var fd int
+	var err error
+	if name == "" {
+		fd, err := unix.Dup(dirfd)
+		if err != nil {
+			return -1, fmt.Errorf("failed to duplicate file descriptor %d: %w", dirfd, err)
+		}
+		return fd, nil
+	}
+	if atomic.LoadInt32(&skipOpenat2) > 0 {
+		fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
+	} else {
+		fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode)
+		// If the function failed with ENOSYS, switch off the support for openat2
+		// and fallback to using safejoin.
+		if err != nil && errors.Is(err, unix.ENOSYS) {
+			atomic.StoreInt32(&skipOpenat2, 1)
+			fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
+		}
+	}
+	return fd, err
+}
+
+// openFileUnderRoot safely opens a file under the specified root directory using openat2
+// dirfd is an open file descriptor to the target checkout directory.
+// name is the path to open relative to dirfd.
+// flags are the flags to pass to the open syscall.
+// mode specifies the mode to use for newly created files.
+func openFileUnderRoot(dirfd int, name string, flags uint64, mode os.FileMode) (*os.File, error) {
+	fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
+	if err == nil {
+		return os.NewFile(uintptr(fd), name), nil
+	}
+
+	hasCreate := (flags & unix.O_CREAT) != 0
+	if errors.Is(err, unix.ENOENT) && hasCreate {
+		parent := filepath.Dir(name)
+		if parent != "" {
+			newDirfd, err2 := openOrCreateDirUnderRoot(dirfd, parent, 0)
+			if err2 == nil {
+				defer newDirfd.Close()
+				fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
+				if err == nil {
+					return os.NewFile(uintptr(fd), name), nil
+				}
+			}
+		}
+	}
+	return nil, fmt.Errorf("open %q under the rootfs: %w", name, err)
+}
+
+// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing.
+// dirfd is an open file descriptor to the target checkout directory.
+// name is the path to open relative to dirfd.
+// mode specifies the mode to use for newly created files.
+func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.File, error) {
+	fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
+	if err == nil {
+		return os.NewFile(uintptr(fd), name), nil
+	}
+
+	if errors.Is(err, unix.ENOENT) {
+		parent := filepath.Dir(name)
+		// do not create the root directory, it should always exist
+		if parent != name {
+			pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
+			if err2 != nil {
+				return nil, err
+			}
+			defer pDir.Close()
+
+			baseName := filepath.Base(name)
+
+			if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, uint32(mode)); err2 != nil {
+				return nil, &fs.PathError{Op: "mkdirat", Path: name, Err: err2}
+			}
+
+			fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
+			if err == nil {
+				return os.NewFile(uintptr(fd), name), nil
+			}
+		}
+	}
+	return nil, err
+}
+
+// appendHole creates a hole with the specified size at the open fd.
+// fd is the open file descriptor.
+// name is the path to use for error messages.
+// size is the size of the hole to create.
+func appendHole(fd int, name string, size int64) error {
+	off, err := unix.Seek(fd, size, unix.SEEK_CUR)
+	if err != nil {
+		return &fs.PathError{Op: "seek", Path: name, Err: err}
+	}
+	// Make sure the file size is changed.  It might be the last hole and no other data written afterwards.
+	if err := unix.Ftruncate(fd, off); err != nil {
+		return &fs.PathError{Op: "ftruncate", Path: name, Err: err}
+	}
+	return nil
+}
+
+func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
+	parent, base, err := splitPath(name)
+	if err != nil {
+		return err
+	}
+	parentFd := dirfd
+	if parent != "/" {
+		parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
+		if err != nil {
+			return err
+		}
+		defer parentFile.Close()
+		parentFd = int(parentFile.Fd())
+	}
+
+	if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
+		if !os.IsExist(err) {
+			return &fs.PathError{Op: "mkdirat", Path: name, Err: err}
+		}
+	}
+
+	file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY|unix.O_CLOEXEC, 0)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+
+	return setFileAttrs(dirfd, file, mode, metadata, options, false)
+}
+
+func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
+	sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
+	if err != nil {
+		return err
+	}
+	defer sourceFile.Close()
+
+	err = doHardLink(dirfd, int(sourceFile.Fd()), metadata.Name)
+	if err != nil {
+		return err
+	}
+
+	newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_WRONLY|unix.O_NOFOLLOW, 0)
+	if err != nil {
+		// If the target is a symlink, open the file with O_PATH.
+		if errors.Is(err, unix.ELOOP) {
+			newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_PATH|unix.O_NOFOLLOW, 0)
+			if err != nil {
+				return err
+			}
+			defer newFile.Close()
+
+			return setFileAttrs(dirfd, newFile, mode, metadata, options, true)
+		}
+		return err
+	}
+	defer newFile.Close()
+
+	return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
+}
+
+func safeSymlink(dirfd int, metadata *fileMetadata) error {
+	destDir, destBase, err := splitPath(metadata.Name)
+	if err != nil {
+		return err
+	}
+	destDirFd := dirfd
+	if destDir != "/" {
+		f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		destDirFd = int(f.Fd())
+	}
+
+	if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil {
+		return &fs.PathError{Op: "symlinkat", Path: metadata.Name, Err: err}
+	}
+	return nil
+}
+
+type whiteoutHandler struct {
+	Dirfd int
+	Root  string
+}
+
+func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
+	file, err := openOrCreateDirUnderRoot(d.Dirfd, path, 0)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+
+	if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil {
+		return &fs.PathError{Op: "fsetxattr", Path: path, Err: err}
+	}
+	return nil
+}
+
+func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
+	dir, base, err := splitPath(path)
+	if err != nil {
+		return err
+	}
+	dirfd := d.Dirfd
+	if dir != "/" {
+		dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
+		if err != nil {
+			return err
+		}
+		defer dir.Close()
+
+		dirfd = int(dir.Fd())
+	}
+
+	if err := unix.Mknodat(dirfd, base, mode, dev); err != nil {
+		return &fs.PathError{Op: "mknodat", Path: path, Err: err}
+	}
+
+	return nil
+}
+
+func (d whiteoutHandler) Chown(path string, uid, gid int) error {
+	file, err := openFileUnderRoot(d.Dirfd, path, unix.O_PATH, 0)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+
+	return chown(int(file.Fd()), "", uid, gid, false, path)
+}
+
+type readerAtCloser interface {
+	io.ReaderAt
+	io.Closer
+}
+
+// seekableFile is a struct that wraps an *os.File to provide an ImageSourceSeekable.
+type seekableFile struct {
+	reader readerAtCloser
+}
+
+func (f *seekableFile) Close() error {
+	return f.reader.Close()
+}
+
+func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+	streams := make(chan io.ReadCloser)
+	errs := make(chan error)
+
+	go func() {
+		for _, chunk := range chunks {
+			streams <- io.NopCloser(io.NewSectionReader(f.reader, int64(chunk.Offset), int64(chunk.Length)))
+		}
+		close(streams)
+		close(errs)
+	}()
+
+	return streams, errs, nil
+}
+
+func newSeekableFile(reader readerAtCloser) *seekableFile {
+	return &seekableFile{reader: reader}
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
similarity index 59%
rename from vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
rename to vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
index 5decbfb63..377ece2e8 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
@@ -1,29 +1,61 @@
-package internal
+package minimal
 
 // NOTE: This is used from github.com/containers/image by callers that
 // don't otherwise use containers/storage, so don't make this depend on any
 // larger software like the graph drivers.
 
 import (
-	"archive/tar"
 	"bytes"
+	"encoding/base64"
 	"encoding/binary"
 	"fmt"
 	"io"
+	"strings"
 	"time"
 
+	"github.com/containers/storage/pkg/archive"
 	jsoniter "github.com/json-iterator/go"
 	"github.com/klauspost/compress/zstd"
 	"github.com/opencontainers/go-digest"
+	"github.com/vbatts/tar-split/archive/tar"
 )
 
+// TOC is short for Table of Contents and is used by the zstd:chunked
+// file format to effectively add an overall index into the contents
+// of a tarball; it also includes file metadata.
 type TOC struct {
-	Version        int            `json:"version"`
-	Entries        []FileMetadata `json:"entries"`
-	TarSplitDigest digest.Digest  `json:"tarSplitDigest,omitempty"`
+	// Version is currently expected to be 1
+	Version int `json:"version"`
+	// Entries is the list of file metadata in this TOC.
+	// The ordering in this array currently defaults to being the same
+	// as that of the tar stream; however, this should not be relied on.
+	Entries []FileMetadata `json:"entries"`
+	// TarSplitDigest is the checksum of the "tar-split" data which
+	// is included as a distinct skippable zstd frame before the TOC.
+	TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
 }
 
+// FileMetadata is an entry in the TOC that includes both generic file metadata
+// that duplicates what can found in the tar header (and should match), but
+// also special/custom content (see below).
+//
+// Regular files may optionally be represented as a sequence of “chunks”,
+// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries
+// are heuristically determined to increase chance of chunk matching / reuse
+// similar to rsync). In that case, the regular file is represented
+// as an initial TypeReg entry (with all metadata for the file as a whole)
+// immediately followed by zero or more TypeChunk entries (containing only Type,
+// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk*
+// fields are relevant in all of these entries, including the initial
+// TypeReg one.
+//
+// Note that the metadata here, when fetched by a zstd:chunked aware client,
+// is used instead of that in the tar stream.  The contents of the tar stream
+// are not used in this scenario.
 type FileMetadata struct {
+	// If you add any fields, update ensureFileMetadataMatches as well!
+
+	// The metadata below largely duplicates that in the tar headers.
 	Type       string            `json:"type"`
 	Name       string            `json:"name"`
 	Linkname   string            `json:"linkName,omitempty"`
@@ -37,9 +69,11 @@ type FileMetadata struct {
 	Devmajor   int64             `json:"devMajor,omitempty"`
 	Devminor   int64             `json:"devMinor,omitempty"`
 	Xattrs     map[string]string `json:"xattrs,omitempty"`
-	Digest     string            `json:"digest,omitempty"`
-	Offset     int64             `json:"offset,omitempty"`
-	EndOffset  int64             `json:"endOffset,omitempty"`
+	// Digest is a hexadecimal sha256 checksum of the file contents; it
+	// is empty for empty files
+	Digest    string `json:"digest,omitempty"`
+	Offset    int64  `json:"offset,omitempty"`
+	EndOffset int64  `json:"endOffset,omitempty"`
 
 	ChunkSize   int64  `json:"chunkSize,omitempty"`
 	ChunkOffset int64  `json:"chunkOffset,omitempty"`
@@ -53,19 +87,23 @@ const (
 )
 
 const (
+	// The following types correspond to regular types of entries that can
+	// appear in a tar archive.
 	TypeReg     = "reg"
-	TypeChunk   = "chunk"
 	TypeLink    = "hardlink"
 	TypeChar    = "char"
 	TypeBlock   = "block"
 	TypeDir     = "dir"
 	TypeFifo    = "fifo"
 	TypeSymlink = "symlink"
+	// TypeChunk is special; in zstd:chunked not only are files individually
+	// compressed and indexable, there is a "rolling checksum" used to compute
+	// "chunks" of individual file contents, that are also added to the TOC
+	TypeChunk = "chunk"
 )
 
 var TarTypes = map[byte]string{
 	tar.TypeReg:     TypeReg,
-	tar.TypeRegA:    TypeReg,
 	tar.TypeLink:    TypeLink,
 	tar.TypeChar:    TypeChar,
 	tar.TypeBlock:   TypeBlock,
@@ -83,11 +121,23 @@ func GetType(t byte) (string, error) {
 }
 
 const (
+	// ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest.
 	ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
-	ManifestInfoKey     = "io.github.containers.zstd-chunked.manifest-position"
-	TarSplitInfoKey     = "io.github.containers.zstd-chunked.tarsplit-position"
-
-	TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written.
+	// ManifestInfoKey is an annotation that signals the start of the TOC (manifest)
+	// contents which are embedded as a skippable zstd frame.  It has a format of
+	// four decimal integers separated by `:` as follows:
+	// <offset>:<length>:<uncompressed length>:<type>
+	// The <type> is ManifestTypeCRFS which should have the value `1`.
+	ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
+	// TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata
+	// contents which are embedded as a skippable zstd frame.  It has a format of
+	// three decimal integers separated by `:` as follows:
+	// <offset>:<length>:<uncompressed length>
+	TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
+
+	// TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead.
+	// The value is retained here as a constant as a historical reference for older zstd:chunked images.
+	// TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
 
 	// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
 	ManifestTypeCRFS = 1
@@ -232,3 +282,43 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
 
 	return manifestDataLE
 }
+
+// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil.
+func timeIfNotZero(t *time.Time) *time.Time {
+	if t == nil || t.IsZero() {
+		return nil
+	}
+	return t
+}
+
+// NewFileMetadata creates a basic FileMetadata entry for hdr.
+// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately.
+func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) {
+	typ, err := GetType(hdr.Typeflag)
+	if err != nil {
+		return FileMetadata{}, err
+	}
+	xattrs := make(map[string]string)
+	for k, v := range hdr.PAXRecords {
+		xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr)
+		if !ok {
+			continue
+		}
+		xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v))
+	}
+	return FileMetadata{
+		Type:       typ,
+		Name:       hdr.Name,
+		Linkname:   hdr.Linkname,
+		Mode:       hdr.Mode,
+		Size:       hdr.Size,
+		UID:        hdr.Uid,
+		GID:        hdr.Gid,
+		ModTime:    timeIfNotZero(&hdr.ModTime),
+		AccessTime: timeIfNotZero(&hdr.AccessTime),
+		ChangeTime: timeIfNotZero(&hdr.ChangeTime),
+		Devmajor:   hdr.Devmajor,
+		Devminor:   hdr.Devminor,
+		Xattrs:     xattrs,
+	}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go b/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
new file mode 100644
index 000000000..55ba74550
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/path/path.go
@@ -0,0 +1,27 @@
+package path
+
+import (
+	"fmt"
+	"path/filepath"
+
+	"github.com/opencontainers/go-digest"
+)
+
+// CleanAbsPath removes any ".." and "." from the path
+// and ensures it starts with a "/".  If the path refers to the root
+// directory, it returns "/".
+func CleanAbsPath(path string) string {
+	return filepath.Clean("/" + path)
+}
+
+// RegularFilePath returns the path used in the composefs backing store for a
+// regular file with the provided content digest.
+//
+// The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries)
+func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) {
+	if algo := d.Algorithm(); algo != digest.SHA256 {
+		return "", fmt.Errorf("unexpected digest algorithm %q", algo)
+	}
+	e := d.Encoded()
+	return e[0:2] + "/" + e[2:], nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go
index 752ee2520..cc9ee85bf 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go
@@ -23,3 +23,22 @@ type ErrBadRequest struct { //nolint: errname
 func (e ErrBadRequest) Error() string {
 	return "bad request"
 }
+
+// ErrFallbackToOrdinaryLayerDownload is a custom error type that
+// suggests to the caller that a fallback mechanism can be used
+// instead of a hard failure.
+type ErrFallbackToOrdinaryLayerDownload struct {
+	Err error
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
+	return c.Err.Error()
+}
+
+func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
+	return c.Err
+}
+
+func newErrFallbackToOrdinaryLayerDownload(err error) error {
+	return ErrFallbackToOrdinaryLayerDownload{Err: err}
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index e001022cb..8f4679851 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -2,34 +2,33 @@ package chunked
 
 import (
 	archivetar "archive/tar"
+	"bytes"
 	"context"
 	"encoding/base64"
 	"errors"
 	"fmt"
 	"hash"
 	"io"
+	"io/fs"
 	"os"
 	"path/filepath"
 	"reflect"
 	"sort"
 	"strings"
 	"sync"
-	"sync/atomic"
 	"syscall"
-	"time"
 
 	"github.com/containerd/stargz-snapshotter/estargz"
 	storage "github.com/containers/storage"
 	graphdriver "github.com/containers/storage/drivers"
-	driversCopy "github.com/containers/storage/drivers/copy"
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/chunked/compressor"
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
+	path "github.com/containers/storage/pkg/chunked/internal/path"
 	"github.com/containers/storage/pkg/chunked/toc"
 	"github.com/containers/storage/pkg/fsverity"
 	"github.com/containers/storage/pkg/idtools"
 	"github.com/containers/storage/pkg/system"
-	"github.com/containers/storage/types"
 	securejoin "github.com/cyphar/filepath-securejoin"
 	jsoniter "github.com/json-iterator/go"
 	"github.com/klauspost/compress/zstd"
@@ -37,14 +36,15 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	"github.com/sirupsen/logrus"
 	"github.com/vbatts/tar-split/archive/tar"
+	"github.com/vbatts/tar-split/tar/asm"
+	tsStorage "github.com/vbatts/tar-split/tar/storage"
 	"golang.org/x/sys/unix"
 )
 
 const (
 	maxNumberMissingChunks  = 1024
-	autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them.
+	autoMergePartsThreshold = 1024 // if the gap between two ranges is below this threshold, automatically merge them.
 	newFileFlags            = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
-	containersOverrideXattr = "user.containers.override_stat"
 	bigDataKey              = "zstd-chunked-manifest"
 	chunkedData             = "zstd-chunked-data"
 	chunkedLayerDataKey     = "zstd-chunked-layer-data"
@@ -59,63 +59,56 @@ const (
 	copyGoRoutines = 32
 )
 
-// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
-// are not part of the TOC document.
-// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
-type fileMetadata struct {
-	internal.FileMetadata
-
-	// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
-	chunks []*internal.FileMetadata
-
-	// skipSetAttrs is set when the file attributes must not be
-	// modified, e.g. it is a hard link from a different source,
-	// or a composefs file.
-	skipSetAttrs bool
-}
-
 type compressedFileType int
 
 type chunkedDiffer struct {
+	// Initial parameters, used throughout and never modified
+	// ==========
+	pullOptions pullOptions
 	stream      ImageSourceSeekable
-	manifest    []byte
-	toc         *internal.TOC // The parsed contents of manifest, or nil if not yet available
-	tarSplit    []byte
-	layersCache *layersCache
-	tocOffset   int64
-	fileType    compressedFileType
-
-	copyBuffer []byte
-
-	gzipReader *pgzip.Reader
-	zstdReader *zstd.Decoder
-	rawReader  io.Reader
-
-	// tocDigest is the digest of the TOC document when the layer
-	// is partially pulled.
-	tocDigest digest.Digest
+	// blobDigest is the digest of the whole compressed layer.  It is used if
+	// convertToZstdChunked to validate a layer when it is converted since there
+	// is no TOC referenced by the manifest.
+	blobDigest digest.Digest
+	blobSize   int64
 
+	// Input format
+	// ==========
+	fileType compressedFileType
 	// convertedToZstdChunked is set to true if the layer needs to
 	// be converted to the zstd:chunked format before it can be
 	// handled.
 	convertToZstdChunked bool
 
+	// Chunked metadata
+	// This is usually set in GetDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff
+	// ==========
+	// tocDigest is the digest of the TOC document when the layer
+	// is partially pulled, or "" if not relevant to consumers.
+	tocDigest           digest.Digest
+	tocOffset           int64
+	manifest            []byte
+	toc                 *minimal.TOC // The parsed contents of manifest, or nil if not yet available
+	tarSplit            []byte
+	uncompressedTarSize int64 // -1 if unknown
 	// skipValidation is set to true if the individual files in
 	// the layer are trusted and should not be validated.
 	skipValidation bool
 
-	// blobDigest is the digest of the whole compressed layer.  It is used if
-	// convertToZstdChunked to validate a layer when it is converted since there
-	// is no TOC referenced by the manifest.
-	blobDigest digest.Digest
-
-	blobSize int64
-
-	storeOpts *types.StoreOptions
-
-	useFsVerity     graphdriver.DifferFsVerity
+	// Long-term caches
+	// This is set in GetDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff()
+	// ==========
+	layersCache     *layersCache
+	copyBuffer      []byte
+	fsVerityMutex   sync.Mutex // protects fsVerityDigests
 	fsVerityDigests map[string]string
-	fsVerityMutex   sync.Mutex
+
+	// Private state of .ApplyDiff
+	// ==========
+	gzipReader  *pgzip.Reader
+	zstdReader  *zstd.Decoder
+	rawReader   io.Reader
+	useFsVerity graphdriver.DifferFsVerity
 }
 
 var xattrsToIgnore = map[string]interface{}{
@@ -127,98 +120,43 @@ type chunkedLayerData struct {
 	Format graphdriver.DifferOutputFormat `json:"format"`
 }
 
-func timeToTimespec(time *time.Time) (ts unix.Timespec) {
-	if time == nil || time.IsZero() {
-		// Return UTIME_OMIT special value
-		ts.Sec = 0
-		ts.Nsec = ((1 << 30) - 2)
-		return
-	}
-	return unix.NsecToTimespec(time.UnixNano())
-}
-
-func doHardLink(srcFd int, destDirFd int, destBase string) error {
-	doLink := func() error {
-		// Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
-		// /proc/self/fd doesn't and can be used with rootless.
-		srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd)
-		return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW)
-	}
-
-	err := doLink()
-
-	// if the destination exists, unlink it first and try again
-	if err != nil && os.IsExist(err) {
-		unix.Unlinkat(destDirFd, destBase, 0)
-		return doLink()
-	}
-	return err
-}
-
-func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
-	destFile := fileMetadata.Name
-	src := fmt.Sprintf("/proc/self/fd/%d", srcFd)
-	st, err := os.Stat(src)
-	if err != nil {
-		return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err)
-	}
-
-	copyWithFileRange, copyWithFileClone := true, true
-
-	if useHardLinks {
-		destDirPath := filepath.Dir(destFile)
-		destBase := filepath.Base(destFile)
-		destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode)
-		if err == nil {
-			defer destDir.Close()
-
-			err := doHardLink(srcFd, int(destDir.Fd()), destBase)
-			if err == nil {
-				// if the file was deduplicated with a hard link, skip overriding file metadata.
-				fileMetadata.skipSetAttrs = true
-				return nil, st.Size(), nil
-			}
+// pullOptions contains parsed data from storage.Store.PullOptions.
+// TO DO: ideally this should be parsed along with the rest of the config file into StoreOptions directly
+// (and then storage.Store.PullOptions would need to be somehow simulated).
+type pullOptions struct {
+	enablePartialImages                     bool     // enable_partial_images
+	convertImages                           bool     // convert_images
+	useHardLinks                            bool     // use_hard_links
+	insecureAllowUnpredictableImageContents bool     // insecure_allow_unpredictable_image_contents
+	ostreeRepos                             []string // ostree_repos
+}
+
+func parsePullOptions(store storage.Store) pullOptions {
+	options := store.PullOptions()
+
+	res := pullOptions{}
+	for _, e := range []struct {
+		dest         *bool
+		name         string
+		defaultValue bool
+	}{
+		{&res.enablePartialImages, "enable_partial_images", false},
+		{&res.convertImages, "convert_images", false},
+		{&res.useHardLinks, "use_hard_links", false},
+		{&res.insecureAllowUnpredictableImageContents, "insecure_allow_unpredictable_image_contents", false},
+	} {
+		if value, ok := options[e.name]; ok {
+			*e.dest = strings.ToLower(value) == "true"
+		} else {
+			*e.dest = e.defaultValue
 		}
 	}
+	res.ostreeRepos = strings.Split(options["ostree_repos"], ":")
 
-	// If the destination file already exists, we shouldn't blow it away
-	dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode)
-	if err != nil {
-		return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err)
-	}
-
-	err = driversCopy.CopyRegularToFile(src, dstFile, st, &copyWithFileRange, &copyWithFileClone)
-	if err != nil {
-		dstFile.Close()
-		return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err)
-	}
-	return dstFile, st.Size(), nil
-}
-
-type seekableFile struct {
-	file *os.File
-}
-
-func (f *seekableFile) Close() error {
-	return f.file.Close()
-}
-
-func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
-	streams := make(chan io.ReadCloser)
-	errs := make(chan error)
-
-	go func() {
-		for _, chunk := range chunks {
-			streams <- io.NopCloser(io.NewSectionReader(f.file, int64(chunk.Offset), int64(chunk.Length)))
-		}
-		close(streams)
-		close(errs)
-	}()
-
-	return streams, errs, nil
+	return res
 }
 
-func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
+func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
 	diff, err := archive.DecompressStream(payload)
 	if err != nil {
 		return 0, nil, "", nil, err
@@ -226,7 +164,7 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se
 
 	fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
 	if err != nil {
-		return 0, nil, "", nil, err
+		return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err}
 	}
 
 	f := os.NewFile(uintptr(fd), destDirectory)
@@ -240,7 +178,7 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se
 	}
 
 	convertedOutputDigester := digest.Canonical.Digester()
-	copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff)
+	copied, err := io.CopyBuffer(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff, c.copyBuffer)
 	if err != nil {
 		f.Close()
 		return 0, nil, "", nil, err
@@ -249,100 +187,203 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se
 		f.Close()
 		return 0, nil, "", nil, err
 	}
-	is := seekableFile{
-		file: f,
-	}
 
-	return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil
+	return copied, newSeekableFile(f), convertedOutputDigester.Digest(), newAnnotations, nil
 }
 
 // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
+// If it returns an error that matches ErrFallbackToOrdinaryLayerDownload, the caller can
+// retry the operation with a different method.
 func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
-	storeOpts, err := types.DefaultStoreOptions()
+	pullOptions := parsePullOptions(store)
+
+	if !pullOptions.enablePartialImages {
+		// If pullOptions.convertImages is set, the two options disagree whether fallback is permissible.
+		// Right now, we enable it, but that’s not a promise; rather, such a configuration should ideally be rejected.
+		return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
+	}
+	// pullOptions.convertImages also serves as a “must not fallback to non-partial pull” option (?!)
+
+	graphDriver, err := store.GraphDriver()
 	if err != nil {
 		return nil, err
 	}
+	if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
+		if pullOptions.convertImages {
+			return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
+		}
+		return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
+	}
 
-	if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) {
-		return nil, errors.New("enable_partial_images not configured")
+	differ, err := getProperDiffer(store, blobDigest, blobSize, annotations, iss, pullOptions)
+	if err != nil {
+		var fallbackErr ErrFallbackToOrdinaryLayerDownload
+		if !errors.As(err, &fallbackErr) {
+			return nil, err
+		}
+		// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
+		if !pullOptions.convertImages {
+			return nil, err
+		}
+		var canConvertErr errFallbackCanConvert
+		if !errors.As(err, &canConvertErr) {
+			// We are supposed to use makeConvertFromRawDiffer, but that would not work.
+			// Fail, and make sure the error does _not_ match ErrFallbackToOrdinaryLayerDownload: use only the error text,
+			// discard all type information.
+			return nil, fmt.Errorf("neither a partial pull nor convert_images is possible: %s", err.Error())
+		}
+		logrus.Debugf("Created differ to convert blob %q", blobDigest)
+		return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
 	}
 
-	zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
+	return differ, nil
+}
+
+// errFallbackCanConvert is an an error type _accompanying_ ErrFallbackToOrdinaryLayerDownload
+// within getProperDiffer, to mark that using makeConvertFromRawDiffer makes sense.
+// This is used to distinguish between cases where the environment does not support partial pulls
+// (e.g. a registry does not support range requests) and convert_images is still possible,
+// from cases where the image content is unacceptable for partial pulls (e.g. exceeds memory limits)
+// and convert_images would not help.
+type errFallbackCanConvert struct {
+	err error
+}
+
+func (e errFallbackCanConvert) Error() string {
+	return e.err.Error()
+}
+
+func (e errFallbackCanConvert) Unwrap() error {
+	return e.err
+}
+
+// getProperDiffer is an implementation detail of GetDiffer.
+// It returns a “proper” differ (not a convert_images one) if possible.
+// May return an error matching ErrFallbackToOrdinaryLayerDownload if a fallback to an alternative
+// (either makeConvertFromRawDiffer, or a non-partial pull) is permissible.
+func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (graphdriver.Differ, error) {
+	zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey]
 	estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
 
-	if hasZstdChunkedTOC && hasEstargzTOC {
+	switch {
+	case hasZstdChunkedTOC && hasEstargzTOC:
 		return nil, errors.New("both zstd:chunked and eStargz TOC found")
-	}
 
-	if hasZstdChunkedTOC {
+	case hasZstdChunkedTOC:
 		zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
 		if err != nil {
-			return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
+			return nil, err
 		}
-		return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts)
-	}
-	if hasEstargzTOC {
-		estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
+		differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
 		if err != nil {
-			return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
+			logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
+			return nil, err
 		}
-		return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts)
-	}
+		logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
+		return differ, nil
 
-	return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
-}
+	case hasEstargzTOC:
+		estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
+		if err != nil {
+			return nil, err
+		}
+		differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
+		if err != nil {
+			logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
+			return nil, err
+		}
+		logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
+		return differ, nil
 
-func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
-	if !parseBooleanPullOption(storeOpts, "convert_images", false) {
-		return nil, errors.New("convert_images not configured")
+	default: // no TOC
+		message := "no TOC found"
+		if !pullOptions.convertImages {
+			message = "no TOC found and convert_images is not configured"
+		}
+		return nil, errFallbackCanConvert{
+			newErrFallbackToOrdinaryLayerDownload(errors.New(message)),
+		}
 	}
+}
 
+func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
 	layersCache, err := getLayersCache(store)
 	if err != nil {
 		return nil, err
 	}
 
 	return &chunkedDiffer{
-		fsVerityDigests:      make(map[string]string),
-		blobDigest:           blobDigest,
-		blobSize:             blobSize,
+		pullOptions: pullOptions,
+		stream:      iss,
+		blobDigest:  blobDigest,
+		blobSize:    blobSize,
+
 		convertToZstdChunked: true,
-		copyBuffer:           makeCopyBuffer(),
-		layersCache:          layersCache,
-		storeOpts:            storeOpts,
-		stream:               iss,
+
+		uncompressedTarSize: -1, // Will be computed later
+
+		layersCache:     layersCache,
+		copyBuffer:      makeCopyBuffer(),
+		fsVerityDigests: make(map[string]string),
 	}, nil
 }
 
-func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
+// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer.
+// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
+func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
 	manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
-	if err != nil {
+	if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
 		return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
 	}
+
+	var uncompressedTarSize int64 = -1
+	if tarSplit != nil {
+		uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
+		if err != nil {
+			return nil, fmt.Errorf("computing size from tar-split: %w", err)
+		}
+	} else if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
+		return nil, errFallbackCanConvert{
+			newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("zstd:chunked layers without tar-split data don't support partial pulls with guaranteed consistency with non-partial pulls")),
+		}
+	}
+
 	layersCache, err := getLayersCache(store)
 	if err != nil {
 		return nil, err
 	}
 
 	return &chunkedDiffer{
-		fsVerityDigests: make(map[string]string),
-		blobSize:        blobSize,
-		tocDigest:       tocDigest,
-		copyBuffer:      makeCopyBuffer(),
-		fileType:        fileTypeZstdChunked,
+		pullOptions: pullOptions,
+		stream:      iss,
+		blobSize:    blobSize,
+
+		fileType: fileTypeZstdChunked,
+
+		tocDigest:           tocDigest,
+		tocOffset:           tocOffset,
+		manifest:            manifest,
+		toc:                 toc,
+		tarSplit:            tarSplit,
+		uncompressedTarSize: uncompressedTarSize,
+
 		layersCache:     layersCache,
-		manifest:        manifest,
-		toc:             toc,
-		storeOpts:       storeOpts,
-		stream:          iss,
-		tarSplit:        tarSplit,
-		tocOffset:       tocOffset,
+		copyBuffer:      makeCopyBuffer(),
+		fsVerityDigests: make(map[string]string),
 	}, nil
 }
 
-func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
+// makeEstargzChunkedDiffer sets up a chunkedDiffer for an estargz layer.
+// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
+func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
+	if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
+		return nil, errFallbackCanConvert{
+			newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("estargz layers don't support partial pulls with guaranteed consistency with non-partial pulls")),
+		}
+	}
+
 	manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
-	if err != nil {
+	if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
 		return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
 	}
 	layersCache, err := getLayersCache(store)
@@ -351,16 +392,20 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
 	}
 
 	return &chunkedDiffer{
-		fsVerityDigests: make(map[string]string),
-		blobSize:        blobSize,
-		tocDigest:       tocDigest,
-		copyBuffer:      makeCopyBuffer(),
-		fileType:        fileTypeEstargz,
+		pullOptions: pullOptions,
+		stream:      iss,
+		blobSize:    blobSize,
+
+		fileType: fileTypeEstargz,
+
+		tocDigest:           tocDigest,
+		tocOffset:           tocOffset,
+		manifest:            manifest,
+		uncompressedTarSize: -1, // We would have to read and decompress the whole layer
+
 		layersCache:     layersCache,
-		manifest:        manifest,
-		storeOpts:       storeOpts,
-		stream:          iss,
-		tocOffset:       tocOffset,
+		copyBuffer:      makeCopyBuffer(),
+		fsVerityDigests: make(map[string]string),
 	}, nil
 }
 
@@ -375,15 +420,15 @@ func makeCopyBuffer() []byte {
 // dirfd is an open file descriptor to the destination root directory.
 // useHardLinks defines whether the deduplication can be performed using hard links.
 func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
-	srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
+	srcDirfd, err := unix.Open(source, unix.O_RDONLY|unix.O_CLOEXEC, 0)
 	if err != nil {
-		return false, nil, 0, fmt.Errorf("open source file: %w", err)
+		return false, nil, 0, &fs.PathError{Op: "open", Path: source, Err: err}
 	}
 	defer unix.Close(srcDirfd)
 
-	srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
+	srcFile, err := openFileUnderRoot(srcDirfd, name, unix.O_RDONLY|syscall.O_CLOEXEC, 0)
 	if err != nil {
-		return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err)
+		return false, nil, 0, err
 	}
 	defer srcFile.Close()
 
@@ -420,7 +465,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
 		return false
 	}
 
-	path := fmt.Sprintf("/proc/self/fd/%d", fd)
+	path := procPathForFd(fd)
 
 	listXattrs, err := system.Llistxattr(path)
 	if err != nil {
@@ -441,7 +486,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
 	}
 	// fill only the attributes used by canDedupMetadataWithHardLink.
 	otherFile := fileMetadata{
-		FileMetadata: internal.FileMetadata{
+		FileMetadata: minimal.FileMetadata{
 			UID:    int(st.Uid),
 			GID:    int(st.Gid),
 			Mode:   int64(st.Mode),
@@ -476,7 +521,7 @@ func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int,
 		if st.Size() != file.Size {
 			continue
 		}
-		fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
+		fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK|unix.O_CLOEXEC, 0)
 		if err != nil {
 			logrus.Debugf("could not open sourceFile %s: %v", sourceFile, err)
 			return false, nil, 0, nil
@@ -585,15 +630,15 @@ type missingPart struct {
 }
 
 func (o *originFile) OpenFile() (io.ReadCloser, error) {
-	srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0)
+	srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY|unix.O_CLOEXEC, 0)
 	if err != nil {
-		return nil, fmt.Errorf("open source file: %w", err)
+		return nil, &fs.PathError{Op: "open", Path: o.Root, Err: err}
 	}
 	defer unix.Close(srcDirfd)
 
-	srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0)
+	srcFile, err := openFileUnderRoot(srcDirfd, o.Path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
 	if err != nil {
-		return nil, fmt.Errorf("open source file under target rootfs: %w", err)
+		return nil, err
 	}
 
 	if _, err := srcFile.Seek(o.Offset, 0); err != nil {
@@ -603,253 +648,6 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) {
 	return srcFile, nil
 }
 
-// setFileAttrs sets the file attributes for file given metadata
-func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error {
-	if metadata.skipSetAttrs {
-		return nil
-	}
-	if file == nil || file.Fd() < 0 {
-		return errors.New("invalid file")
-	}
-	fd := int(file.Fd())
-
-	t, err := typeToTarType(metadata.Type)
-	if err != nil {
-		return err
-	}
-
-	// If it is a symlink, force to use the path
-	if t == tar.TypeSymlink {
-		usePath = true
-	}
-
-	baseName := ""
-	if usePath {
-		dirName := filepath.Dir(metadata.Name)
-		if dirName != "" {
-			parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0)
-			if err != nil {
-				return err
-			}
-			defer parentFd.Close()
-
-			dirfd = int(parentFd.Fd())
-		}
-		baseName = filepath.Base(metadata.Name)
-	}
-
-	doChown := func() error {
-		if usePath {
-			return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW)
-		}
-		return unix.Fchown(fd, metadata.UID, metadata.GID)
-	}
-
-	doSetXattr := func(k string, v []byte) error {
-		return unix.Fsetxattr(fd, k, v, 0)
-	}
-
-	doUtimes := func() error {
-		ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
-		if usePath {
-			return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW)
-		}
-		return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0)
-	}
-
-	doChmod := func() error {
-		if usePath {
-			return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
-		}
-		return unix.Fchmod(fd, uint32(mode))
-	}
-
-	if err := doChown(); err != nil {
-		if !options.IgnoreChownErrors {
-			return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err)
-		}
-	}
-
-	canIgnore := func(err error) bool {
-		return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP)
-	}
-
-	for k, v := range metadata.Xattrs {
-		if _, found := xattrsToIgnore[k]; found {
-			continue
-		}
-		data, err := base64.StdEncoding.DecodeString(v)
-		if err != nil {
-			return fmt.Errorf("decode xattr %q: %w", v, err)
-		}
-		if err := doSetXattr(k, data); !canIgnore(err) {
-			return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err)
-		}
-	}
-
-	if err := doUtimes(); !canIgnore(err) {
-		return fmt.Errorf("set utimes for %q: %w", metadata.Name, err)
-	}
-
-	if err := doChmod(); !canIgnore(err) {
-		return fmt.Errorf("chmod %q: %w", metadata.Name, err)
-	}
-	return nil
-}
-
-func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
-	root := fmt.Sprintf("/proc/self/fd/%d", dirfd)
-
-	targetRoot, err := os.Readlink(root)
-	if err != nil {
-		return -1, err
-	}
-
-	hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
-
-	var fd int
-	// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
-	// last component as the path to openat().
-	if hasNoFollow {
-		dirName := filepath.Dir(name)
-		if dirName != "" {
-			newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name))
-			if err != nil {
-				return -1, err
-			}
-			root = newRoot
-		}
-
-		parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
-		if err != nil {
-			return -1, err
-		}
-		defer unix.Close(parentDirfd)
-
-		fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode))
-		if err != nil {
-			return -1, err
-		}
-	} else {
-		newPath, err := securejoin.SecureJoin(root, name)
-		if err != nil {
-			return -1, err
-		}
-		fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode))
-		if err != nil {
-			return -1, err
-		}
-	}
-
-	target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd))
-	if err != nil {
-		unix.Close(fd)
-		return -1, err
-	}
-
-	// Add an additional check to make sure the opened fd is inside the rootfs
-	if !strings.HasPrefix(target, targetRoot) {
-		unix.Close(fd)
-		return -1, fmt.Errorf("while resolving %q.  It resolves outside the root directory", name)
-	}
-
-	return fd, err
-}
-
-func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
-	how := unix.OpenHow{
-		Flags:   flags,
-		Mode:    uint64(mode & 0o7777),
-		Resolve: unix.RESOLVE_IN_ROOT,
-	}
-	return unix.Openat2(dirfd, name, &how)
-}
-
-// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid
-// using it again.
-var skipOpenat2 int32
-
-// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a
-// userspace lookup.
-func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
-	var fd int
-	var err error
-	if atomic.LoadInt32(&skipOpenat2) > 0 {
-		fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
-	} else {
-		fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode)
-		// If the function failed with ENOSYS, switch off the support for openat2
-		// and fallback to using safejoin.
-		if err != nil && errors.Is(err, unix.ENOSYS) {
-			atomic.StoreInt32(&skipOpenat2, 1)
-			fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
-		}
-	}
-	return fd, err
-}
-
-// openFileUnderRoot safely opens a file under the specified root directory using openat2
-// name is the path to open relative to dirfd.
-// dirfd is an open file descriptor to the target checkout directory.
-// flags are the flags to pass to the open syscall.
-// mode specifies the mode to use for newly created files.
-func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
-	fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
-	if err == nil {
-		return os.NewFile(uintptr(fd), name), nil
-	}
-
-	hasCreate := (flags & unix.O_CREAT) != 0
-	if errors.Is(err, unix.ENOENT) && hasCreate {
-		parent := filepath.Dir(name)
-		if parent != "" {
-			newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0)
-			if err2 == nil {
-				defer newDirfd.Close()
-				fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
-				if err == nil {
-					return os.NewFile(uintptr(fd), name), nil
-				}
-			}
-		}
-	}
-	return nil, fmt.Errorf("open %q under the rootfs: %w", name, err)
-}
-
-// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing.
-// name is the path to open relative to dirfd.
-// dirfd is an open file descriptor to the target checkout directory.
-// mode specifies the mode to use for newly created files.
-func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) {
-	fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode)
-	if err == nil {
-		return os.NewFile(uintptr(fd), name), nil
-	}
-
-	if errors.Is(err, unix.ENOENT) {
-		parent := filepath.Dir(name)
-		if parent != "" {
-			pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode)
-			if err2 != nil {
-				return nil, err
-			}
-			defer pDir.Close()
-
-			baseName := filepath.Base(name)
-
-			if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil {
-				return nil, err
-			}
-
-			fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode)
-			if err == nil {
-				return os.NewFile(uintptr(fd), name), nil
-			}
-		}
-	}
-	return nil, err
-}
-
 func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) {
 	switch {
 	case partCompression == fileTypeHole:
@@ -918,23 +716,14 @@ func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
 	return nil
 }
 
-// appendHole creates a hole with the specified size at the open fd.
-func appendHole(fd int, size int64) error {
-	off, err := unix.Seek(fd, size, unix.SEEK_CUR)
-	if err != nil {
-		return err
-	}
-	// Make sure the file size is changed.  It might be the last hole and no other data written afterwards.
-	if err := unix.Ftruncate(fd, off); err != nil {
-		return err
-	}
-	return nil
-}
-
 func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
 	switch compression {
 	case fileTypeZstdChunked:
-		defer c.zstdReader.Reset(nil)
+		defer func() {
+			if err := c.zstdReader.Reset(nil); err != nil {
+				logrus.Warnf("release of references to the previous zstd reader failed: %v", err)
+			}
+		}()
 		if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
 			return err
 		}
@@ -948,7 +737,7 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT
 			return err
 		}
 	case fileTypeHole:
-		if err := appendHole(int(destFile.file.Fd()), size); err != nil {
+		if err := appendHole(int(destFile.file.Fd()), destFile.metadata.Name, size); err != nil {
 			return err
 		}
 		if destFile.hash != nil {
@@ -977,7 +766,7 @@ type destinationFile struct {
 }
 
 func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
-	file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
+	file, err := openFileUnderRoot(dirfd, metadata.Name, newFileFlags, 0)
 	if err != nil {
 		return nil, err
 	}
@@ -1041,7 +830,12 @@ func (d *destinationFile) Close() (Err error) {
 		}
 	}
 
-	return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
+	mode := os.FileMode(d.metadata.Mode)
+	if d.options.ForceMask != nil {
+		mode = *d.options.ForceMask
+	}
+
+	return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false)
 }
 
 func closeDestinationFiles(files chan *destinationFile, errors chan error) {
@@ -1080,7 +874,7 @@ func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
 	return nil
 }
 
-func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
+func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
 	var destFile *destinationFile
 
 	filesToClose := make(chan *destinationFile, 3)
@@ -1294,7 +1088,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
 	return newMissingParts
 }
 
-func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
+func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
 	var chunksToRequest []ImageSourceChunk
 
 	calculateChunksToRequest := func() {
@@ -1320,11 +1114,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
 		}
 
 		if _, ok := err.(ErrBadRequest); ok {
-			// If the server cannot handle at least 64 chunks in a single request, just give up.
-			if len(chunksToRequest) < 64 {
+			if len(chunksToRequest) == 1 {
 				return err
 			}
-
 			// Merge more chunks to request
 			missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
 			calculateChunksToRequest()
@@ -1333,164 +1125,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
 		return err
 	}
 
-	if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil {
-		return err
-	}
-	return nil
-}
-
-func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
-	parent := filepath.Dir(name)
-	base := filepath.Base(name)
-
-	parentFd := dirfd
-	if parent != "." {
-		parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0)
-		if err != nil {
-			return err
-		}
-		defer parentFile.Close()
-		parentFd = int(parentFile.Fd())
-	}
-
-	if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
-		if !os.IsExist(err) {
-			return fmt.Errorf("mkdir %q: %w", name, err)
-		}
-	}
-
-	file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
-	if err != nil {
-		return err
-	}
-	defer file.Close()
-
-	return setFileAttrs(dirfd, file, mode, metadata, options, false)
-}
-
-func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
-	sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0)
-	if err != nil {
-		return err
-	}
-	defer sourceFile.Close()
-
-	destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
-	destDirFd := dirfd
-	if destDir != "." {
-		f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0)
-		if err != nil {
-			return err
-		}
-		defer f.Close()
-		destDirFd = int(f.Fd())
-	}
-
-	err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase)
-	if err != nil {
-		return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err)
-	}
-
-	newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0)
-	if err != nil {
-		// If the target is a symlink, open the file with O_PATH.
-		if errors.Is(err, unix.ELOOP) {
-			newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0)
-			if err != nil {
-				return err
-			}
-			defer newFile.Close()
-
-			return setFileAttrs(dirfd, newFile, mode, metadata, options, true)
-		}
-		return err
-	}
-	defer newFile.Close()
-
-	return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
-}
-
-func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
-	destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
-	destDirFd := dirfd
-	if destDir != "." {
-		f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0)
-		if err != nil {
-			return err
-		}
-		defer f.Close()
-		destDirFd = int(f.Fd())
-	}
-
-	if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil {
-		return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err)
-	}
-	return nil
-}
-
-type whiteoutHandler struct {
-	Dirfd int
-	Root  string
-}
-
-func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
-	file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0)
-	if err != nil {
-		return err
-	}
-	defer file.Close()
-
-	if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil {
-		return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err)
-	}
-	return nil
-}
-
-func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
-	dir := filepath.Dir(path)
-	base := filepath.Base(path)
-
-	dirfd := d.Dirfd
-	if dir != "" {
-		dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0)
-		if err != nil {
-			return err
-		}
-		defer dir.Close()
-
-		dirfd = int(dir.Fd())
-	}
-
-	if err := unix.Mknodat(dirfd, base, mode, dev); err != nil {
-		return fmt.Errorf("mknod %q: %w", path, err)
-	}
-
-	return nil
-}
-
-func checkChownErr(err error, name string, uid, gid int) error {
-	if errors.Is(err, syscall.EINVAL) {
-		return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err)
-	}
-	return err
-}
-
-func (d whiteoutHandler) Chown(path string, uid, gid int) error {
-	file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0)
-	if err != nil {
+	if err := c.storeMissingFiles(streams, errs, dirfd, missingParts, options); err != nil {
 		return err
 	}
-	defer file.Close()
-
-	if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil {
-		var stat unix.Stat_t
-		if unix.Fstat(int(file.Fd()), &stat) == nil {
-			if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) {
-				return nil
-			}
-		}
-		return checkChownErr(err, path, uid, gid)
-	}
 	return nil
 }
 
@@ -1501,13 +1138,6 @@ type hardLinkToCreate struct {
 	metadata *fileMetadata
 }
 
-func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool {
-	if value, ok := storeOpts.PullOptions[name]; ok {
-		return strings.ToLower(value) == "true"
-	}
-	return def
-}
-
 type findAndCopyFileOptions struct {
 	useHardLinks bool
 	ostreeRepos  []string
@@ -1515,10 +1145,10 @@ type findAndCopyFileOptions struct {
 }
 
 func reopenFileReadOnly(f *os.File) (*os.File, error) {
-	path := fmt.Sprintf("/proc/self/fd/%d", f.Fd())
+	path := procPathForFile(f)
 	fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
 	if err != nil {
-		return nil, err
+		return nil, &fs.PathError{Op: "open", Path: path, Err: err}
 	}
 	return os.NewFile(uintptr(fd), f.Name()), nil
 }
@@ -1574,10 +1204,13 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions
 	return false, nil
 }
 
-func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
+// makeEntriesFlat collects regular-file entries from mergedEntries, and produces a new list
+// where each file content is only represented once, and uses composefs.RegularFilePathForValidatedDigest for its name.
+// If flatPathNameMap is not nil, this function writes to it a mapping from filepath.Clean(originalName) to the composefs name.
+func makeEntriesFlat(mergedEntries []fileMetadata, flatPathNameMap map[string]string) ([]fileMetadata, error) {
 	var new []fileMetadata
 
-	hashes := make(map[string]string)
+	knownFlatPaths := make(map[string]struct{})
 	for i := range mergedEntries {
 		if mergedEntries[i].Type != TypeReg {
 			continue
@@ -1587,16 +1220,22 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
 		}
 		digest, err := digest.Parse(mergedEntries[i].Digest)
 		if err != nil {
-			return nil, err
+			return nil, fmt.Errorf("invalid digest %q for %q: %w", mergedEntries[i].Digest, mergedEntries[i].Name, err)
+		}
+		path, err := path.RegularFilePathForValidatedDigest(digest)
+		if err != nil {
+			return nil, fmt.Errorf("determining physical file path for %q: %w", mergedEntries[i].Name, err)
+		}
+		if flatPathNameMap != nil {
+			flatPathNameMap[filepath.Clean(mergedEntries[i].Name)] = path
 		}
-		d := digest.Encoded()
 
-		if hashes[d] != "" {
+		if _, known := knownFlatPaths[path]; known {
 			continue
 		}
-		hashes[d] = d
+		knownFlatPaths[path] = struct{}{}
 
-		mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
+		mergedEntries[i].Name = path
 		mergedEntries[i].skipSetAttrs = true
 
 		new = append(new, mergedEntries[i])
@@ -1604,43 +1243,140 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
 	return new, nil
 }
 
-func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
-	var payload io.ReadCloser
-	var streams chan io.ReadCloser
-	var errs chan error
-	var err error
+type streamOrErr struct {
+	stream io.ReadCloser
+	err    error
+}
 
-	chunksToRequest := []ImageSourceChunk{
-		{
-			Offset: 0,
-			Length: uint64(c.blobSize),
-		},
+// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered.
+func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) {
+	for soe := range streamsOrErrors {
+		if soe.stream != nil {
+			_ = soe.stream.Close()
+		} else if retErr == nil {
+			retErr = soe.err
+		}
 	}
+	return
+}
 
-	streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
-	if err != nil {
-		return "", err
+// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends
+// either a stream or an error to the stream channel.  The streams channel is closed when
+// there are no more streams and errors to read.
+// It ensures that no more than maxStreams streams are returned, and that every item from the
+// streams and errs channels is consumed.
+func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) {
+	tooManyStreams := false
+	streamsSoFar := 0
+
+	err := errors.New("Unexpected error in getBlobAtGoroutine")
+
+	defer func() {
+		if err != nil {
+			stream <- streamOrErr{err: err}
+		}
+		close(stream)
+	}()
+
+loop:
+	for {
+		select {
+		case p, ok := <-streams:
+			if !ok {
+				streams = nil
+				break loop
+			}
+			if streamsSoFar >= maxStreams {
+				tooManyStreams = true
+				_ = p.Close()
+				continue
+			}
+			streamsSoFar++
+			stream <- streamOrErr{stream: p}
+		case err, ok := <-errs:
+			if !ok {
+				errs = nil
+				break loop
+			}
+			stream <- streamOrErr{err: err}
+		}
 	}
-	select {
-	case p := <-streams:
-		payload = p
-	case err := <-errs:
-		return "", err
+	if streams != nil {
+		for p := range streams {
+			if streamsSoFar >= maxStreams {
+				tooManyStreams = true
+				_ = p.Close()
+				continue
+			}
+			streamsSoFar++
+			stream <- streamOrErr{stream: p}
+		}
+	}
+	if errs != nil {
+		for err := range errs {
+			stream <- streamOrErr{err: err}
+		}
 	}
-	if payload == nil {
-		return "", errors.New("invalid stream returned")
+	if tooManyStreams {
+		stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)}
 	}
+	err = nil
+}
 
-	originalRawDigester := digest.Canonical.Digester()
+// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt.
+// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel
+// always succeeds in select.
+// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`.
+func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) {
+	streams, errs, err := is.GetBlobAt(chunksToRequest)
+	if err != nil {
+		return nil, err
+	}
+	stream := make(chan streamOrErr)
+	go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest))
+	return stream, nil
+}
 
-	r := io.TeeReader(payload, originalRawDigester.Hash())
+func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
+	streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)})
+	if err != nil {
+		return "", err
+	}
 
-	// copy the entire tarball and compute its digest
-	_, err = io.Copy(destination, r)
+	originalRawDigester := digest.Canonical.Digester()
+	for soe := range streamsOrErrors {
+		if soe.stream != nil {
+			r := io.TeeReader(soe.stream, originalRawDigester.Hash())
 
+			// copy the entire tarball and compute its digest
+			_, err = io.CopyBuffer(destination, r, c.copyBuffer)
+			_ = soe.stream.Close()
+		}
+		if soe.err != nil && err == nil {
+			err = soe.err
+		}
+	}
 	return originalRawDigester.Digest(), err
 }
 
+func typeToOsMode(typ string) (os.FileMode, error) {
+	switch typ {
+	case TypeReg, TypeLink:
+		return 0, nil
+	case TypeSymlink:
+		return os.ModeSymlink, nil
+	case TypeDir:
+		return os.ModeDir, nil
+	case TypeChar:
+		return os.ModeDevice | os.ModeCharDevice, nil
+	case TypeBlock:
+		return os.ModeDevice, nil
+	case TypeFifo:
+		return os.ModeNamedPipe, nil
+	}
+	return 0, fmt.Errorf("unknown file type %q", typ)
+}
+
 func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
 	defer c.layersCache.release()
 	defer func() {
@@ -1654,13 +1390,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 	// stream to use for reading the zstd:chunked or Estargz file.
 	stream := c.stream
 
+	var compressedDigest digest.Digest
 	var uncompressedDigest digest.Digest
-	var convertedBlobSize int64
 
 	if c.convertToZstdChunked {
 		fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
 		if err != nil {
-			return graphdriver.DriverWithDifferOutput{}, err
+			return graphdriver.DriverWithDifferOutput{}, &fs.PathError{Op: "open", Path: dest, Err: err}
 		}
 		blobFile := os.NewFile(uintptr(fd), "blob-file")
 		defer func() {
@@ -1670,7 +1406,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 		}()
 
 		// calculate the checksum before accessing the file.
-		compressedDigest, err := c.copyAllBlobToFile(blobFile)
+		compressedDigest, err = c.copyAllBlobToFile(blobFile)
 		if err != nil {
 			return graphdriver.DriverWithDifferOutput{}, err
 		}
@@ -1683,11 +1419,11 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 			return graphdriver.DriverWithDifferOutput{}, err
 		}
 
-		tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
+		tarSize, fileSource, diffID, annotations, err := c.convertTarToZstdChunked(dest, blobFile)
 		if err != nil {
 			return graphdriver.DriverWithDifferOutput{}, err
 		}
-		convertedBlobSize = tarSize
+		c.uncompressedTarSize = tarSize
 		// fileSource is a O_TMPFILE file descriptor, so we
 		// need to keep it open until the entire file is processed.
 		defer fileSource.Close()
@@ -1756,32 +1492,20 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 		},
 		TOCDigest:          c.tocDigest,
 		UncompressedDigest: uncompressedDigest,
+		CompressedDigest:   compressedDigest,
+		Size:               c.uncompressedTarSize,
 	}
 
-	// When the hard links deduplication is used, file attributes are ignored because setting them
-	// modifies the source file as well.
-	useHardLinks := parseBooleanPullOption(c.storeOpts, "use_hard_links", false)
-
-	// List of OSTree repositories to use for deduplication
-	ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":")
-
 	whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
 
 	var missingParts []missingPart
 
-	mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries)
+	mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
 	if err != nil {
 		return output, err
 	}
 
 	output.UIDs, output.GIDs = collectIDs(mergedEntries)
-	if convertedBlobSize > 0 {
-		// if the image was converted, store the original tar size, so that
-		// it can be recreated correctly.
-		output.Size = convertedBlobSize
-	} else {
-		output.Size = totalSizeFromTOC
-	}
 
 	if err := maybeDoIDRemap(mergedEntries, options); err != nil {
 		return output, err
@@ -1790,29 +1514,38 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 	if options.ForceMask != nil {
 		uid, gid, mode, err := archive.GetFileOwner(dest)
 		if err == nil {
-			value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
-			if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
+			value := idtools.Stat{
+				IDs:  idtools.IDPair{UID: int(uid), GID: int(gid)},
+				Mode: os.ModeDir | os.FileMode(mode),
+			}
+			if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
 				return output, err
 			}
 		}
 	}
 
-	dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0)
+	dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH|unix.O_CLOEXEC, 0)
 	if err != nil {
-		return output, fmt.Errorf("cannot open %q: %w", dest, err)
+		return output, &fs.PathError{Op: "open", Path: dest, Err: err}
 	}
-	defer unix.Close(dirfd)
+	dirFile := os.NewFile(uintptr(dirfd), dest)
+	defer dirFile.Close()
 
+	var flatPathNameMap map[string]string // = nil
 	if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
-		mergedEntries, err = makeEntriesFlat(mergedEntries)
+		flatPathNameMap = map[string]string{}
+		mergedEntries, err = makeEntriesFlat(mergedEntries, flatPathNameMap)
 		if err != nil {
 			return output, err
 		}
 		createdDirs := make(map[string]struct{})
 		for _, e := range mergedEntries {
-			d := e.Name[0:2]
+			// This hard-codes an assumption that RegularFilePathForValidatedDigest creates paths with exactly one directory component.
+			d := filepath.Dir(e.Name)
 			if _, found := createdDirs[d]; !found {
-				unix.Mkdirat(dirfd, d, 0o755)
+				if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
+					return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
+				}
 				createdDirs[d] = struct{}{}
 			}
 		}
@@ -1825,8 +1558,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 	missingPartsSize, totalChunksSize := int64(0), int64(0)
 
 	copyOptions := findAndCopyFileOptions{
-		useHardLinks: useHardLinks,
-		ostreeRepos:  ostreeRepos,
+		// When the hard links deduplication is used, file attributes are ignored because setting them
+		// modifies the source file as well.
+		useHardLinks: c.pullOptions.useHardLinks,
+		ostreeRepos:  c.pullOptions.ostreeRepos, // List of OSTree repositories to use for deduplication
 		options:      options,
 	}
 
@@ -1852,7 +1587,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 		wg.Wait()
 	}()
 
-	for i := 0; i < copyGoRoutines; i++ {
+	for range copyGoRoutines {
 		wg.Add(1)
 		jobs := copyFileJobs
 
@@ -1868,12 +1603,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 	}
 
 	filesToWaitFor := 0
-	for i, r := range mergedEntries {
-		if options.ForceMask != nil {
-			value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777)
-			r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
-			r.Mode = int64(*options.ForceMask)
-		}
+	for i := range mergedEntries {
+		r := &mergedEntries[i]
 
 		mode := os.FileMode(r.Mode)
 
@@ -1882,10 +1613,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 			return output, err
 		}
 
-		r.Name = filepath.Clean(r.Name)
+		size := r.Size
+
+		// update also the implementation of ForceMask in pkg/archive
+		if options.ForceMask != nil {
+			mode = *options.ForceMask
+
+			// special files will be stored as regular files
+			if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink {
+				t = tar.TypeReg
+				size = 0
+			}
+
+			// if the entry will be stored as a directory or a regular file, store in a xattr the original
+			// owner and mode.
+			if t == tar.TypeDir || t == tar.TypeReg {
+				typeMode, err := typeToOsMode(r.Type)
+				if err != nil {
+					return output, err
+				}
+				value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor))
+				if r.Xattrs == nil {
+					r.Xattrs = make(map[string]string)
+				}
+				r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
+			}
+		}
+
+		r.Name = path.CleanAbsPath(r.Name)
 		// do not modify the value of symlinks
 		if r.Linkname != "" && t != tar.TypeSymlink {
-			r.Linkname = filepath.Clean(r.Linkname)
+			r.Linkname = path.CleanAbsPath(r.Linkname)
 		}
 
 		if whiteoutConverter != nil {
@@ -1893,8 +1651,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 				Typeflag: t,
 				Name:     r.Name,
 				Linkname: r.Linkname,
-				Size:     r.Size,
-				Mode:     r.Mode,
+				Size:     size,
+				Mode:     int64(mode),
 				Uid:      r.UID,
 				Gid:      r.GID,
 			}
@@ -1913,15 +1671,15 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 		switch t {
 		case tar.TypeReg:
 			// Create directly empty files.
-			if r.Size == 0 {
+			if size == 0 {
 				// Used to have a scope for cleanup.
 				createEmptyFile := func() error {
-					file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0)
+					file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
 					if err != nil {
 						return err
 					}
 					defer file.Close()
-					if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil {
+					if err := setFileAttrs(dirfd, file, mode, r, options, false); err != nil {
 						return err
 					}
 					return nil
@@ -1933,10 +1691,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 			}
 
 		case tar.TypeDir:
-			if r.Name == "" || r.Name == "." {
+			if r.Name == "/" {
 				output.RootDirMode = &mode
 			}
-			if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil {
+			if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
 				return output, err
 			}
 			continue
@@ -1950,12 +1708,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 				dest:     dest,
 				dirfd:    dirfd,
 				mode:     mode,
-				metadata: &r,
+				metadata: r,
 			})
 			continue
 
 		case tar.TypeSymlink:
-			if err := safeSymlink(dirfd, mode, &r, options); err != nil {
+			if err := safeSymlink(dirfd, r); err != nil {
 				return output, err
 			}
 			continue
@@ -1968,7 +1726,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 			return output, fmt.Errorf("invalid type %q", t)
 		}
 
-		totalChunksSize += r.Size
+		totalChunksSize += size
 
 		if t == tar.TypeReg {
 			index := i
@@ -2031,7 +1789,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 			}
 
 			switch chunk.ChunkType {
-			case internal.ChunkTypeData:
+			case minimal.ChunkTypeData:
 				root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
 				if err != nil {
 					return output, err
@@ -2044,7 +1802,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 						Offset: offset,
 					}
 				}
-			case internal.ChunkTypeZeros:
+			case minimal.ChunkTypeZeros:
 				missingPartsSize -= size
 				mp.Hole = true
 				// Mark all chunks belonging to the missing part as holes
@@ -2057,7 +1815,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 	}
 	// There are some missing files.  Prepare a multirange request for the missing chunks.
 	if len(missingParts) > 0 {
-		if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
+		if err := c.retrieveMissingFiles(stream, dirfd, missingParts, options); err != nil {
 			return output, err
 		}
 	}
@@ -2068,6 +1826,39 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 		}
 	}
 
+	// To ensure that consumers of the layer who decompress and read the full tar stream,
+	// and consumers who consume the data via the TOC, both see exactly the same data and metadata,
+	// compute the UncompressedDigest.
+	// c/image will then ensure that this value matches the value in the image config’s RootFS.DiffID, i.e. the image must commit
+	// to one UncompressedDigest value for each layer, and that will avoid the ambiguity (in consumers who validate layers against DiffID).
+	//
+	// c/image also uses the UncompressedDigest as a layer ID, allowing it to use the traditional layer and image IDs.
+	//
+	// This is, sadly, quite costly: Up to now we might have only have had to write, and digest, only the new/modified files.
+	// Here we need to read, and digest, the whole layer, even if almost all of it was already present locally previously.
+	// So, really specialized (EXTREMELY RARE) users can opt out of this check using insecureAllowUnpredictableImageContents .
+	//
+	// Layers without a tar-split (estargz layers and old zstd:chunked layers) can't produce an UncompressedDigest that
+	// matches the expected RootFS.DiffID; we always fall back to full pulls, again unless the user opts out
+	// via insecureAllowUnpredictableImageContents .
+	if output.UncompressedDigest == "" {
+		switch {
+		case c.pullOptions.insecureAllowUnpredictableImageContents:
+			// Oh well.  Skip the costly digest computation.
+		case output.TarSplit != nil:
+			metadata := tsStorage.NewJSONUnpacker(bytes.NewReader(output.TarSplit))
+			fg := newStagedFileGetter(dirFile, flatPathNameMap)
+			digester := digest.Canonical.Digester()
+			if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil {
+				return output, fmt.Errorf("digesting staged uncompressed stream: %w", err)
+			}
+			output.UncompressedDigest = digester.Digest()
+		default:
+			// We are checking for this earlier in GetDiffer, so this should not be reachable.
+			return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`)
+		}
+	}
+
 	if totalChunksSize > 0 {
 		logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
 	}
@@ -2077,7 +1868,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
 	return output, nil
 }
 
-func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
+func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool {
 	// ignore the metadata files for the estargz format.
 	if fileType != fileTypeEstargz {
 		return false
@@ -2090,9 +1881,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
 	return false
 }
 
-func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) {
-	var totalFilesSize int64
-
+func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) {
 	countNextChunks := func(start int) int {
 		count := 0
 		for _, e := range entries[start:] {
@@ -2122,16 +1911,14 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
 			continue
 		}
 
-		totalFilesSize += e.Size
-
 		if e.Type == TypeChunk {
-			return nil, -1, fmt.Errorf("chunk type without a regular file")
+			return nil, fmt.Errorf("chunk type without a regular file")
 		}
 
 		if e.Type == TypeReg {
 			nChunks := countNextChunks(i + 1)
 
-			e.chunks = make([]*internal.FileMetadata, nChunks+1)
+			e.chunks = make([]*minimal.FileMetadata, nChunks+1)
 			for j := 0; j <= nChunks; j++ {
 				// we need a copy here, otherwise we override the
 				// .Size later
@@ -2161,19 +1948,19 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
 			lastChunkOffset = mergedEntries[i].chunks[j].Offset
 		}
 	}
-	return mergedEntries, totalFilesSize, nil
+	return mergedEntries, nil
 }
 
 // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
 // same digest as chunk.ChunkDigest
-func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
-	parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
+func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
+	parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
 	if err != nil {
 		return false
 	}
 	defer unix.Close(parentDirfd)
 
-	fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0)
+	fd, err := openFileUnderRoot(parentDirfd, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
 	if err != nil {
 		return false
 	}
@@ -2197,3 +1984,33 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs
 
 	return digester.Digest() == digest
 }
+
+// newStagedFileGetter returns an object usable as storage.FileGetter for rootDir.
+// if flatPathNameMap is not nil, it must be used to map logical file names into the backing file paths.
+func newStagedFileGetter(rootDir *os.File, flatPathNameMap map[string]string) *stagedFileGetter {
+	return &stagedFileGetter{
+		rootDir:         rootDir,
+		flatPathNameMap: flatPathNameMap,
+	}
+}
+
+type stagedFileGetter struct {
+	rootDir         *os.File
+	flatPathNameMap map[string]string // nil, or a map from filepath.Clean()ed tar file names to expected on-filesystem names
+}
+
+func (fg *stagedFileGetter) Get(filename string) (io.ReadCloser, error) {
+	if fg.flatPathNameMap != nil {
+		path, ok := fg.flatPathNameMap[filepath.Clean(filename)]
+		if !ok {
+			return nil, fmt.Errorf("no path mapping exists for tar entry %q", filename)
+		}
+		filename = path
+	}
+	pathFD, err := securejoin.OpenatInRoot(fg.rootDir, filename)
+	if err != nil {
+		return nil, err
+	}
+	defer pathFD.Close()
+	return securejoin.Reopen(pathFD, unix.O_RDONLY)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
index ac6bdfec7..fe3d36c76 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package chunked
 
@@ -14,5 +13,5 @@ import (
 
 // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
 func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
-	return nil, errors.New("format not supported on this system")
+	return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("format not supported on this system"))
 }
diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
index 6fbaa41b5..6f39b2ae2 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
@@ -3,7 +3,7 @@ package toc
 import (
 	"errors"
 
-	"github.com/containers/storage/pkg/chunked/internal"
+	"github.com/containers/storage/pkg/chunked/internal/minimal"
 	digest "github.com/opencontainers/go-digest"
 )
 
@@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
 // This is an experimental feature and may be changed/removed in the future.
 func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
 	d1, ok1 := annotations[tocJSONDigestAnnotation]
-	d2, ok2 := annotations[internal.ManifestChecksumKey]
+	d2, ok2 := annotations[minimal.ManifestChecksumKey]
 	switch {
 	case ok1 && ok2:
 		return nil, errors.New("both zstd:chunked and eStargz TOC found")
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index 7f49d029d..560df3cf5 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -75,10 +75,6 @@ type OptionsConfig struct {
 	// Size
 	Size string `toml:"size,omitempty"`
 
-	// RemapUIDs is a list of default UID mappings to use for layers.
-	RemapUIDs string `toml:"remap-uids,omitempty"`
-	// RemapGIDs is a list of default GID mappings to use for layers.
-	RemapGIDs string `toml:"remap-gids,omitempty"`
 	// IgnoreChownErrors is a flag for whether chown errors should be
 	// ignored when building an image.
 	IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
@@ -90,13 +86,6 @@ type OptionsConfig struct {
 	// files and directories.
 	ForceMask os.FileMode `toml:"force_mask,omitempty"`
 
-	// RemapUser is the name of one or more entries in /etc/subuid which
-	// should be used to set up default UID mappings.
-	RemapUser string `toml:"remap-user,omitempty"`
-	// RemapGroup is the name of one or more entries in /etc/subgid which
-	// should be used to set up default GID mappings.
-	RemapGroup string `toml:"remap-group,omitempty"`
-
 	// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
 	// /etc/subgid which should be used to set up automatically a userns.
 	RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"`
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
index 36e1bdd5f..08060f2a2 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go
@@ -1,11 +1,10 @@
-//go:build linux || darwin || freebsd || solaris
-// +build linux darwin freebsd solaris
+//go:build !windows
 
 package directory
 
 import (
+	"errors"
 	"io/fs"
-	"os"
 	"path/filepath"
 	"syscall"
 )
@@ -27,7 +26,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
 		if err != nil {
 			// if dir does not exist, Usage() returns the error.
 			// if dir/x disappeared while walking, Usage() ignores dir/x.
-			if os.IsNotExist(err) && d != dir {
+			if errors.Is(err, fs.ErrNotExist) && d != dir {
 				return nil
 			}
 			return err
@@ -35,6 +34,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
 
 		fileInfo, err := entry.Info()
 		if err != nil {
+			if errors.Is(err, fs.ErrNotExist) {
+				return nil
+			}
 			return err
 		}
 
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
index 482bc51a2..c2145c26f 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go
@@ -1,11 +1,10 @@
 //go:build windows
-// +build windows
 
 package directory
 
 import (
+	"errors"
 	"io/fs"
-	"os"
 	"path/filepath"
 )
 
@@ -25,7 +24,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
 		if err != nil {
 			// if dir does not exist, Size() returns the error.
 			// if dir/x disappeared while walking, Size() ignores dir/x.
-			if os.IsNotExist(err) && path != dir {
+			if errors.Is(err, fs.ErrNotExist) && path != dir {
 				return nil
 			}
 			return err
@@ -40,6 +39,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
 
 		fileInfo, err := d.Info()
 		if err != nil {
+			if errors.Is(err, fs.ErrNotExist) {
+				return nil
+			}
 			return err
 		}
 		usage.Size += fileInfo.Size()
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go
new file mode 100644
index 000000000..eeecc9f75
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go
@@ -0,0 +1,38 @@
+package fileutils
+
+import (
+	"errors"
+	"os"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// Exists checks whether a file or directory exists at the given path.
+// If the path is a symlink, the symlink is followed.
+func Exists(path string) error {
+	// It uses unix.Faccessat which is a faster operation compared to os.Stat for
+	// simply checking the existence of a file.
+	err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
+	if err != nil {
+		return &os.PathError{Op: "faccessat", Path: path, Err: err}
+	}
+	return nil
+}
+
+// Lexists checks whether a file or directory exists at the given path.
+// If the path is a symlink, the symlink itself is checked.
+func Lexists(path string) error {
+	// FreeBSD before 15.0 does not support the AT_SYMLINK_NOFOLLOW flag for
+	// faccessat. In this case, the call to faccessat will return EINVAL and
+	// we fall back to using Lstat.
+	err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
+	if err != nil {
+		if errors.Is(err, syscall.EINVAL) {
+			_, err = os.Lstat(path)
+			return err
+		}
+		return &os.PathError{Op: "faccessat", Path: path, Err: err}
+	}
+	return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
index f3087d7df..785b13317 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go
@@ -1,5 +1,4 @@
-//go:build !windows
-// +build !windows
+//go:build !windows && !freebsd
 
 package fileutils
 
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
index 92e0263d8..3cb250c5a 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
@@ -1,5 +1,4 @@
 //go:build linux || freebsd
-// +build linux freebsd
 
 package fileutils
 
diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
index 9854cac1c..6b9a853b7 100644
--- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
+++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package fsutils
 
diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
index 46e68c578..80b9171db 100644
--- a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package fsverity
 
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
deleted file mode 100644
index 9057fe1b2..000000000
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ /dev/null
@@ -1,38 +0,0 @@
-//go:build !linux && !darwin && !freebsd && !windows
-// +build !linux,!darwin,!freebsd,!windows
-
-package homedir
-
-// Copyright 2013-2018 Docker, Inc.
-// NOTE: this package has originally been copied from github.com/docker/docker.
-
-import (
-	"errors"
-	"os"
-	"path/filepath"
-)
-
-// GetRuntimeDir is unsupported on non-linux system.
-func GetRuntimeDir() (string, error) {
-	return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
-}
-
-// StickRuntimeDirContents is unsupported on non-linux system.
-func StickRuntimeDirContents(files []string) ([]string, error) {
-	return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
-}
-
-// GetConfigHome returns XDG_CONFIG_HOME.
-// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetConfigHome() (string, error) {
-	if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
-		return xdgConfigHome, nil
-	}
-	home := Get()
-	if home == "" {
-		return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
-	}
-	return filepath.Join(home, ".config"), nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index 45be87659..f351b48bb 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package homedir
 
diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
index 87484d95b..4a75b70fd 100644
--- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
+++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go
@@ -1,15 +1,17 @@
 //go:build linux
-// +build linux
 
 package idmap
 
 import (
+	"errors"
 	"fmt"
+	"io/fs"
 	"os"
 	"runtime"
 	"syscall"
 
 	"github.com/containers/storage/pkg/idtools"
+	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -25,22 +27,26 @@ func CreateIDMappedMount(source, target string, pid int) error {
 
 	targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
 	if err != nil {
-		return err
+		return &os.PathError{Op: "open_tree", Path: source, Err: err}
 	}
 	defer unix.Close(targetDirFd)
 
 	if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
 		&unix.MountAttr{
-			Attr_set:  unix.MOUNT_ATTR_IDMAP,
-			Userns_fd: uint64(userNsFile.Fd()),
+			Attr_set:    unix.MOUNT_ATTR_IDMAP,
+			Userns_fd:   uint64(userNsFile.Fd()),
+			Propagation: unix.MS_PRIVATE,
 		}); err != nil {
-		return err
+		return &os.PathError{Op: "mount_setattr", Path: source, Err: err}
 	}
-	if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
+	if err := os.Mkdir(target, 0o700); err != nil && !errors.Is(err, fs.ErrExist) {
 		return err
 	}
 
-	return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH)
+	if err := unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil {
+		return &os.PathError{Op: "move_mount", Path: target, Err: err}
+	}
+	return nil
 }
 
 // CreateUsernsProcess forks the current process and creates a user namespace using the specified
@@ -61,12 +67,20 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
 		_ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
 		// just wait for the SIGKILL
 		for {
-			syscall.Pause()
+			_ = syscall.Pause()
 		}
 	}
 	cleanupFunc := func() {
-		unix.Kill(int(pid), unix.SIGKILL)
-		_, _ = unix.Wait4(int(pid), nil, 0, nil)
+		err1 := unix.Kill(int(pid), unix.SIGKILL)
+		if err1 != nil && err1 != syscall.ESRCH {
+			logrus.Warnf("kill process pid: %d with SIGKILL ended with error: %v", int(pid), err1)
+		}
+		if err1 != nil {
+			return
+		}
+		if _, err := unix.Wait4(int(pid), nil, 0, nil); err != nil {
+			logrus.Warnf("wait4 pid: %d ended with error: %v", int(pid), err)
+		}
 	}
 	writeMappings := func(fname string, idmap []idtools.IDMap) error {
 		mappings := ""
diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
index 81c6072aa..d58137b99 100644
--- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package idmap
 
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index ef5a95254..299bdbef7 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -4,6 +4,7 @@ import (
 	"bufio"
 	"errors"
 	"fmt"
+	"io/fs"
 	"os"
 	"os/user"
 	"runtime"
@@ -367,21 +368,174 @@ func checkChownErr(err error, name string, uid, gid int) error {
 	return err
 }
 
+// Stat contains file states that can be overridden with ContainersOverrideXattr.
+type Stat struct {
+	IDs   IDPair
+	Mode  os.FileMode
+	Major int
+	Minor int
+}
+
+// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string
+// that can be used as the value for the ContainersOverrideXattr xattr.
+func FormatContainersOverrideXattr(uid, gid, mode int) string {
+	return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0)
+}
+
+// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string
+// that can be used as the value for the ContainersOverrideXattr xattr.  For devices, it also
+// needs the major and minor numbers.
+func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string {
+	typ := ""
+	switch mode & os.ModeType {
+	case os.ModeDir:
+		typ = "dir"
+	case os.ModeSymlink:
+		typ = "symlink"
+	case os.ModeNamedPipe:
+		typ = "pipe"
+	case os.ModeSocket:
+		typ = "socket"
+	case os.ModeDevice:
+		typ = fmt.Sprintf("block-%d-%d", major, minor)
+	case os.ModeDevice | os.ModeCharDevice:
+		typ = fmt.Sprintf("char-%d-%d", major, minor)
+	default:
+		typ = "file"
+	}
+	unixMode := mode & os.ModePerm
+	if mode&os.ModeSetuid != 0 {
+		unixMode |= 0o4000
+	}
+	if mode&os.ModeSetgid != 0 {
+		unixMode |= 0o2000
+	}
+	if mode&os.ModeSticky != 0 {
+		unixMode |= 0o1000
+	}
+	return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ)
+}
+
+// GetContainersOverrideXattr will get and decode ContainersOverrideXattr.
+func GetContainersOverrideXattr(path string) (Stat, error) {
+	xstat, err := system.Lgetxattr(path, ContainersOverrideXattr)
+	if err != nil {
+		return Stat{}, err
+	}
+	return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist.
+}
+
+func parseOverrideXattr(xstat []byte) (Stat, error) {
+	var stat Stat
+	attrs := strings.Split(string(xstat), ":")
+	if len(attrs) < 3 {
+		return stat, fmt.Errorf("The number of parts in %s is less than 3",
+			ContainersOverrideXattr)
+	}
+
+	value, err := strconv.ParseUint(attrs[0], 10, 32)
+	if err != nil {
+		return stat, fmt.Errorf("Failed to parse UID: %w", err)
+	}
+	stat.IDs.UID = int(value)
+
+	value, err = strconv.ParseUint(attrs[1], 10, 32)
+	if err != nil {
+		return stat, fmt.Errorf("Failed to parse GID: %w", err)
+	}
+	stat.IDs.GID = int(value)
+
+	value, err = strconv.ParseUint(attrs[2], 8, 32)
+	if err != nil {
+		return stat, fmt.Errorf("Failed to parse mode: %w", err)
+	}
+	stat.Mode = os.FileMode(value) & os.ModePerm
+	if value&0o1000 != 0 {
+		stat.Mode |= os.ModeSticky
+	}
+	if value&0o2000 != 0 {
+		stat.Mode |= os.ModeSetgid
+	}
+	if value&0o4000 != 0 {
+		stat.Mode |= os.ModeSetuid
+	}
+
+	if len(attrs) > 3 {
+		typ := attrs[3]
+		if strings.HasPrefix(typ, "file") {
+		} else if strings.HasPrefix(typ, "dir") {
+			stat.Mode |= os.ModeDir
+		} else if strings.HasPrefix(typ, "symlink") {
+			stat.Mode |= os.ModeSymlink
+		} else if strings.HasPrefix(typ, "pipe") {
+			stat.Mode |= os.ModeNamedPipe
+		} else if strings.HasPrefix(typ, "socket") {
+			stat.Mode |= os.ModeSocket
+		} else if strings.HasPrefix(typ, "block") {
+			stat.Mode |= os.ModeDevice
+			stat.Major, stat.Minor, err = parseDevice(typ)
+			if err != nil {
+				return stat, err
+			}
+		} else if strings.HasPrefix(typ, "char") {
+			stat.Mode |= os.ModeDevice | os.ModeCharDevice
+			stat.Major, stat.Minor, err = parseDevice(typ)
+			if err != nil {
+				return stat, err
+			}
+		} else {
+			return stat, fmt.Errorf("Invalid file type %s", typ)
+		}
+	}
+	return stat, nil
+}
+
+func parseDevice(typ string) (int, int, error) {
+	parts := strings.Split(typ, "-")
+	// If there are more than 3 parts, just ignore them to be forward compatible
+	if len(parts) < 3 {
+		return 0, 0, fmt.Errorf("Invalid device type %s", typ)
+	}
+	if parts[0] != "block" && parts[0] != "char" {
+		return 0, 0, fmt.Errorf("Invalid device type %s", typ)
+	}
+	major, err := strconv.Atoi(parts[1])
+	if err != nil {
+		return 0, 0, fmt.Errorf("Failed to parse major number: %w", err)
+	}
+	minor, err := strconv.Atoi(parts[2])
+	if err != nil {
+		return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err)
+	}
+	return major, minor, nil
+}
+
+// SetContainersOverrideXattr will encode and set ContainersOverrideXattr.
+func SetContainersOverrideXattr(path string, stat Stat) error {
+	value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor)
+	return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0)
+}
+
 func SafeChown(name string, uid, gid int) error {
 	if runtime.GOOS == "darwin" {
-		var mode uint64 = 0o0700
+		stat := Stat{
+			Mode: os.FileMode(0o0700),
+		}
 		xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
-		if err == nil {
-			attrs := strings.Split(string(xstat), ":")
-			if len(attrs) == 3 {
-				val, err := strconv.ParseUint(attrs[2], 8, 32)
-				if err == nil {
-					mode = val
-				}
+		if err == nil && xstat != nil {
+			stat, err = parseOverrideXattr(xstat)
+			if err != nil {
+				return err
 			}
+		} else {
+			st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
+			if err != nil {
+				return err
+			}
+			stat.Mode = st.Mode()
 		}
-		value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
-		if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
+		stat.IDs = IDPair{UID: uid, GID: gid}
+		if err = SetContainersOverrideXattr(name, stat); err != nil {
 			return err
 		}
 		uid = os.Getuid()
@@ -397,19 +551,24 @@ func SafeChown(name string, uid, gid int) error {
 
 func SafeLchown(name string, uid, gid int) error {
 	if runtime.GOOS == "darwin" {
-		var mode uint64 = 0o0700
+		stat := Stat{
+			Mode: os.FileMode(0o0700),
+		}
 		xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
-		if err == nil {
-			attrs := strings.Split(string(xstat), ":")
-			if len(attrs) == 3 {
-				val, err := strconv.ParseUint(attrs[2], 8, 32)
-				if err == nil {
-					mode = val
-				}
+		if err == nil && xstat != nil {
+			stat, err = parseOverrideXattr(xstat)
+			if err != nil {
+				return err
+			}
+		} else {
+			st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
+			if err != nil {
+				return err
 			}
+			stat.Mode = st.Mode()
 		}
-		value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
-		if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
+		stat.IDs = IDPair{UID: uid, GID: gid}
+		if err = SetContainersOverrideXattr(name, stat); err != nil {
 			return err
 		}
 		uid = os.Getuid()
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
index 03e787376..2bd26d0e3 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -1,5 +1,4 @@
 //go:build linux && cgo && libsubid
-// +build linux,cgo,libsubid
 
 package idtools
 
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index 7900af38a..1da7dadbf 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package idtools
 
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
index 78141fb85..e6f5c1ba6 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux || !libsubid || !cgo
-// +build !linux !libsubid !cgo
 
 package idtools
 
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
index dc69c6076..ec6a3a046 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package idtools
 
diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
index 15bd98ede..e37c4540c 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package idtools
 
diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
index b3772bdb3..f34462a23 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package idtools
 
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
index 2a8c85ad4..fd6addd73 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
@@ -150,10 +150,13 @@ func (w *atomicFileWriter) complete(commit bool) (retErr error) {
 	}
 
 	defer func() {
-		w.closeTempFile()
+		err := w.closeTempFile()
 		if retErr != nil || w.writeErr != nil {
 			os.Remove(w.f.Name())
 		}
+		if retErr == nil {
+			retErr = err
+		}
 	}()
 
 	if commit {
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go
index aec161e0f..2ccdc3108 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go
@@ -1,5 +1,4 @@
 //go:build !linux
-// +build !linux
 
 package ioutils
 
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
index 9d5af610e..257b064c5 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package ioutils
 
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
index 2c2242d69..79837fb33 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package ioutils
 
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go
index ccc7f9c23..0b6d0a7a6 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/writers.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go
@@ -36,9 +36,9 @@ func (r *writeCloserWrapper) Close() error {
 }
 
 // NewWriteCloserWrapper returns a new io.WriteCloser.
-func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+func NewWriteCloserWrapper(w io.Writer, closer func() error) io.WriteCloser {
 	return &writeCloserWrapper{
-		Writer: r,
+		Writer: w,
 		closer: closer,
 	}
 }
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
index 510147578..52f6c0a62 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go
@@ -128,9 +128,8 @@ func GetROLockfile(path string) (Locker, error) {
 func (l *LockFile) Lock() {
 	if l.ro {
 		panic("can't take write lock on read-only lock file")
-	} else {
-		l.lock(writeLock)
 	}
+	l.lock(writeLock)
 }
 
 // RLock locks the lockfile as a reader.
@@ -142,9 +141,8 @@ func (l *LockFile) RLock() {
 func (l *LockFile) TryLock() error {
 	if l.ro {
 		panic("can't take write lock on read-only lock file")
-	} else {
-		return l.tryLock(writeLock)
 	}
+	return l.tryLock(writeLock)
 }
 
 // TryRLock attempts to lock the lockfile as a reader.
@@ -415,7 +413,9 @@ func (l *LockFile) lock(lType lockType) {
 		// Optimization: only use the (expensive) syscall when
 		// the counter is 0.  In this case, we're either the first
 		// reader lock or a writer lock.
-		lockHandle(l.fd, lType, false)
+		if err := lockHandle(l.fd, lType, false); err != nil {
+			panic(err)
+		}
 	}
 	l.lockType = lType
 	l.locked = true
@@ -426,10 +426,13 @@ func (l *LockFile) lock(lType lockType) {
 // command.
 func (l *LockFile) tryLock(lType lockType) error {
 	var success bool
+	var rwMutexUnlocker func()
 	if lType == readLock {
 		success = l.rwMutex.TryRLock()
+		rwMutexUnlocker = l.rwMutex.RUnlock
 	} else {
 		success = l.rwMutex.TryLock()
+		rwMutexUnlocker = l.rwMutex.Unlock
 	}
 	if !success {
 		return fmt.Errorf("resource temporarily unavailable")
@@ -440,7 +443,7 @@ func (l *LockFile) tryLock(lType lockType) error {
 		// If we're the first reference on the lock, we need to open the file again.
 		fd, err := openLock(l.file, l.ro)
 		if err != nil {
-			l.rwMutex.Unlock()
+			rwMutexUnlocker()
 			return err
 		}
 		l.fd = fd
@@ -450,7 +453,7 @@ func (l *LockFile) tryLock(lType lockType) error {
 		// reader lock or a writer lock.
 		if err = lockHandle(l.fd, lType, true); err != nil {
 			closeHandle(fd)
-			l.rwMutex.Unlock()
+			rwMutexUnlocker()
 			return err
 		}
 	}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index 0eff003bc..885f2f88a 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -1,5 +1,4 @@
-//go:build linux || solaris || darwin || freebsd
-// +build linux solaris darwin freebsd
+//go:build !windows
 
 package lockfile
 
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
index 6482529b3..0cc1c50cc 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package lockfile
 
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
index 40d8fd2b8..40cb22b0f 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -1,15 +1,16 @@
-//go:build linux && cgo
-// +build linux,cgo
+//go:build linux
 
 package loopback
 
 import (
 	"errors"
 	"fmt"
+	"io/fs"
 	"os"
 	"syscall"
 
 	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/unix"
 )
 
 // Loopback related errors
@@ -39,52 +40,70 @@ func getNextFreeLoopbackIndex() (int, error) {
 	return index, err
 }
 
-func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
+func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (*os.File, error) {
 	// Read information about the loopback file.
 	var st syscall.Stat_t
-	err = syscall.Fstat(int(sparseFile.Fd()), &st)
-	if err != nil {
+	if err := syscall.Fstat(int(sparseFile.Fd()), &st); err != nil {
 		logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err)
 		return nil, ErrAttachLoopbackDevice
 	}
 
+	// upper bound to avoid infinite loop
+	remaining := 1000
+
 	// Start looking for a free /dev/loop
 	for {
+		if remaining == 0 {
+			logrus.Errorf("No free loopback devices available")
+			return nil, ErrAttachLoopbackDevice
+		}
+		remaining--
+
+		index, err := getNextFreeLoopbackIndex()
+		if err != nil {
+			logrus.Debugf("Error retrieving the next available loopback: %s", err)
+			return nil, err
+		}
+
 		target := fmt.Sprintf("/dev/loop%d", index)
-		index++
 
-		fi, err := os.Stat(target)
+		// OpenFile adds O_CLOEXEC
+		loopFile, err := os.OpenFile(target, os.O_RDWR, 0o644)
 		if err != nil {
-			if os.IsNotExist(err) {
-				logrus.Error("There are no more loopback devices available.")
+			// The kernel returns ENXIO when opening a device that is in the "deleting" or "rundown" state, so
+			// just treat ENXIO as if the device does not exist.
+			if errors.Is(err, fs.ErrNotExist) || errors.Is(err, unix.ENXIO) {
+				// Another process could have taken the loopback device in the meantime.  So repeat
+				// the process with the next loopback device.
+				continue
 			}
+			logrus.Errorf("Opening loopback device: %s", err)
 			return nil, ErrAttachLoopbackDevice
 		}
 
+		fi, err := loopFile.Stat()
+		if err != nil {
+			loopFile.Close()
+			logrus.Errorf("Stat loopback device: %s", err)
+			return nil, ErrAttachLoopbackDevice
+		}
 		if fi.Mode()&os.ModeDevice != os.ModeDevice {
+			loopFile.Close()
 			logrus.Errorf("Loopback device %s is not a block device.", target)
 			continue
 		}
 
-		// OpenFile adds O_CLOEXEC
-		loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644)
-		if err != nil {
-			logrus.Errorf("Opening loopback device: %s", err)
-			return nil, ErrAttachLoopbackDevice
-		}
-
 		// Try to attach to the loop file
 		if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
 			loopFile.Close()
 
 			// If the error is EBUSY, then try the next loopback
-			if err != syscall.EBUSY {
-				logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
-				return nil, ErrAttachLoopbackDevice
+			if err == syscall.EBUSY {
+				continue
 			}
 
-			// Otherwise, we keep going with the loop
-			continue
+			logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
+			return nil, ErrAttachLoopbackDevice
 		}
 
 		// Check if the loopback driver and underlying filesystem agree on the loopback file's
@@ -97,18 +116,8 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
 		if dev != uint64(st.Dev) || ino != st.Ino {
 			logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino)
 		}
-
-		// In case of success, we finished. Break the loop.
-		break
+		return loopFile, nil
 	}
-
-	// This can't happen, but let's be sure
-	if loopFile == nil {
-		logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
-		return nil, ErrAttachLoopbackDevice
-	}
-
-	return loopFile, nil
 }
 
 // AttachLoopDevice attaches the given sparse file to the next
@@ -124,14 +133,6 @@ func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) {
 }
 
 func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) {
-	// Try to retrieve the next available loopback device via syscall.
-	// If it fails, we discard error and start looping for a
-	// loopback from index 0.
-	startIndex, err := getNextFreeLoopbackIndex()
-	if err != nil {
-		logrus.Debugf("Error retrieving the next available loopback: %s", err)
-	}
-
 	var sparseFile *os.File
 
 	// OpenFile adds O_CLOEXEC
@@ -146,7 +147,7 @@ func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err erro
 	}
 	defer sparseFile.Close()
 
-	loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile)
+	loopFile, err := openNextAvailableLoopback(sparseName, sparseFile)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
index da2ba46fe..9acc519cc 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go
@@ -1,5 +1,4 @@
-//go:build linux && cgo
-// +build linux,cgo
+//go:build linux
 
 package loopback
 
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
index 21a981007..2cd8fc8b0 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go
@@ -1,22 +1,7 @@
-//go:build linux && cgo
-// +build linux,cgo
+//go:build linux
 
 package loopback
 
-/*
-#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
-
-#ifndef LOOP_CTL_GET_FREE
-  #define LOOP_CTL_GET_FREE 0x4C82
-#endif
-
-#ifndef LO_FLAGS_PARTSCAN
-  #define LO_FLAGS_PARTSCAN 8
-#endif
-
-*/
-import "C"
-
 type loopInfo64 struct {
 	loDevice         uint64 /* ioctl r/o */
 	loInode          uint64 /* ioctl r/o */
@@ -35,19 +20,19 @@ type loopInfo64 struct {
 
 // IOCTL consts
 const (
-	LoopSetFd       = C.LOOP_SET_FD
-	LoopCtlGetFree  = C.LOOP_CTL_GET_FREE
-	LoopGetStatus64 = C.LOOP_GET_STATUS64
-	LoopSetStatus64 = C.LOOP_SET_STATUS64
-	LoopClrFd       = C.LOOP_CLR_FD
-	LoopSetCapacity = C.LOOP_SET_CAPACITY
+	LoopSetFd       = 0x4C00
+	LoopCtlGetFree  = 0x4C82
+	LoopGetStatus64 = 0x4C05
+	LoopSetStatus64 = 0x4C04
+	LoopClrFd       = 0x4C01
+	LoopSetCapacity = 0x4C07
 )
 
 // LOOP consts.
 const (
-	LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
-	LoFlagsReadOnly  = C.LO_FLAGS_READ_ONLY
-	LoFlagsPartScan  = C.LO_FLAGS_PARTSCAN
-	LoKeySize        = C.LO_KEY_SIZE
-	LoNameSize       = C.LO_NAME_SIZE
+	LoFlagsAutoClear = 0x4C07
+	LoFlagsReadOnly  = 1
+	LoFlagsPartScan  = 8
+	LoKeySize        = 32
+	LoNameSize       = 64
 )
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
index ec4287247..b3527e3a0 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
@@ -1,5 +1,4 @@
-//go:build linux && cgo
-// +build linux,cgo
+//go:build linux
 
 package loopback
 
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go
index 5de3a671d..40a229932 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags.go
@@ -97,14 +97,14 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
 			}
 			continue
 		}
-		opt := strings.SplitN(option, "=", 2)
-		if len(opt) != 2 || !validFlags[opt[0]] {
+		opt, _, ok := strings.Cut(option, "=")
+		if !ok || !validFlags[opt] {
 			return nil, fmt.Errorf("invalid tmpfs option %q", opt)
 		}
-		if !dataCollisions[opt[0]] {
+		if !dataCollisions[opt] {
 			// We prepend the option and add to collision map
 			newOptions = append([]string{option}, newOptions...)
-			dataCollisions[opt[0]] = true
+			dataCollisions[opt] = true
 		}
 	}
 
@@ -140,8 +140,8 @@ func ParseOptions(options string) (int, string) {
 func ParseTmpfsOptions(options string) (int, string, error) {
 	flags, data := ParseOptions(options)
 	for _, o := range strings.Split(data, ",") {
-		opt := strings.SplitN(o, "=", 2)
-		if !validFlags[opt[0]] {
+		opt, _, _ := strings.Cut(o, "=")
+		if !validFlags[opt] {
 			return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
 		}
 	}
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
index ee0f593a5..e581d64eb 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux && !freebsd
-// +build !linux,!freebsd
 
 package mount
 
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
index c70b0bf99..61d6d1c59 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd && cgo
-// +build freebsd,cgo
 
 package mount
 
@@ -40,13 +39,9 @@ func mount(device, target, mType string, flag uintptr, data string) error {
 				isNullFS = true
 				continue
 			}
-			opt := strings.SplitN(x, "=", 2)
-			options = append(options, opt[0])
-			if len(opt) == 2 {
-				options = append(options, opt[1])
-			} else {
-				options = append(options, "")
-			}
+			name, val, _ := strings.Cut(x, "=")
+			options = append(options, name)
+			options = append(options, val)
 		}
 	}
 
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
index 74fe66609..b9dc82d3f 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go
@@ -1,6 +1,4 @@
 //go:build !linux && !(freebsd && cgo)
-// +build !linux
-// +build !freebsd !cgo
 
 package mount
 
diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
index cbc0299fb..2d9e75ea1 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go
@@ -1,5 +1,18 @@
 package mount
 
-import "github.com/moby/sys/mountinfo"
+import (
+	"fmt"
+	"os"
 
-var PidMountInfo = mountinfo.PidMountInfo
+	"github.com/moby/sys/mountinfo"
+)
+
+func PidMountInfo(pid int) ([]*Info, error) {
+	f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return mountinfo.GetMountsFromReader(f, nil)
+}
diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
index a2a1d4072..331272e0c 100644
--- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
+++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package mount
 
@@ -11,7 +10,7 @@ import (
 
 func unmount(target string, flags int) error {
 	var err error
-	for i := 0; i < 50; i++ {
+	for range 50 {
 		err = unix.Unmount(target, flags)
 		switch err {
 		case unix.EBUSY:
diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go
index d3a0cf51c..3c942bfb2 100644
--- a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package mount
 
diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
index 3fb0c36b8..7b20b0628 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go
@@ -11,11 +11,11 @@ import (
 
 // ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
 func ParseKeyValueOpt(opt string) (string, string, error) {
-	parts := strings.SplitN(opt, "=", 2)
-	if len(parts) != 2 {
+	k, v, ok := strings.Cut(opt, "=")
+	if !ok {
 		return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
 	}
-	return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+	return strings.TrimSpace(k), strings.TrimSpace(v), nil
 }
 
 // ParseUintList parses and validates the specified string as the value
@@ -42,19 +42,19 @@ func ParseUintList(val string) (map[int]bool, error) {
 	errInvalidFormat := fmt.Errorf("invalid format: %s", val)
 
 	for _, r := range split {
-		if !strings.Contains(r, "-") {
+		minS, maxS, ok := strings.Cut(r, "-")
+		if !ok {
 			v, err := strconv.Atoi(r)
 			if err != nil {
 				return nil, errInvalidFormat
 			}
 			availableInts[v] = true
 		} else {
-			split := strings.SplitN(r, "-", 2)
-			min, err := strconv.Atoi(split[0])
+			min, err := strconv.Atoi(minS)
 			if err != nil {
 				return nil, errInvalidFormat
 			}
-			max, err := strconv.Atoi(split[1])
+			max, err := strconv.Atoi(maxS)
 			if err != nil {
 				return nil, errInvalidFormat
 			}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
index 32d6a9f49..171cd81e7 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd
-// +build freebsd
 
 package reexec
 
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 87b43ed95..025aef60a 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package reexec
 
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
index a56ada216..eefddea41 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
@@ -1,5 +1,4 @@
 //go:build solaris || darwin
-// +build solaris darwin
 
 package reexec
 
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
index 77c93b4ab..a78b548a5 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux && !windows && !freebsd && !solaris && !darwin
-// +build !linux,!windows,!freebsd,!solaris,!darwin
 
 package reexec
 
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
index c46125ebf..ba2f0f847 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package reexec
 
diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go
index 834dd9433..ccd9d0fb1 100644
--- a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go
+++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go
@@ -1,5 +1,4 @@
 //go:build !regexp_precompile
-// +build !regexp_precompile
 
 package regexp
 
diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go
index a5fe0dbc4..fe4421b01 100644
--- a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go
+++ b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go
@@ -1,5 +1,4 @@
 //go:build regexp_precompile
-// +build regexp_precompile
 
 package regexp
 
diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
index 66a59c85d..f63c3e444 100644
--- a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
+++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
@@ -3,7 +3,7 @@ package stringutils
 
 import (
 	"bytes"
-	"math/rand"
+	"math/rand/v2"
 	"strings"
 )
 
@@ -13,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string {
 	letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
 	b := make([]byte, n)
 	for i := range b {
-		b[i] = letters[rand.Intn(len(letters))]
+		b[i] = letters[rand.IntN(len(letters))]
 	}
 	return string(b)
 }
@@ -25,7 +25,7 @@ func GenerateRandomASCIIString(n int) string {
 		"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
 	res := make([]byte, n)
 	for i := 0; i < n; i++ {
-		res[i] = chars[rand.Intn(len(chars))]
+		res[i] = chars[rand.IntN(len(chars))]
 	}
 	return string(res)
 }
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
index 1ce4c0d6e..892d56138 100644
--- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
index 7a3d7937d..f0d744eb8 100644
--- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go
new file mode 100644
index 000000000..1314058f1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go
@@ -0,0 +1,93 @@
+//go:build freebsd
+
+package system
+
+import (
+	"os"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	EXTATTR_NAMESPACE_EMPTY  = unix.EXTATTR_NAMESPACE_EMPTY
+	EXTATTR_NAMESPACE_USER   = unix.EXTATTR_NAMESPACE_USER
+	EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM
+)
+
+// ExtattrGetLink retrieves the value of the extended attribute identified by attrname
+// in the given namespace and associated with the given path in the file system.
+// If the path is a symbolic link, the extended attribute is retrieved from the link itself.
+// Returns a []byte slice if the extattr is set and nil otherwise.
+func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) {
+	size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname,
+		uintptr(unsafe.Pointer(nil)), 0)
+	if errno != nil {
+		if errno == unix.ENOATTR {
+			return nil, nil
+		}
+		return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno}
+	}
+	if size == 0 {
+		return []byte{}, nil
+	}
+
+	dest := make([]byte, size)
+	size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname,
+		uintptr(unsafe.Pointer(&dest[0])), size)
+	if errno != nil {
+		return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno}
+	}
+
+	return dest[:size], nil
+}
+
+// ExtattrSetLink sets the value of extended attribute identified by attrname
+// in the given namespace and associated with the given path in the file system.
+// If the path is a symbolic link, the extended attribute is set on the link itself.
+func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error {
+	if len(data) == 0 {
+		data = []byte{} // ensure non-nil for empty data
+	}
+	if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname,
+		uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil {
+		return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno}
+	}
+
+	return nil
+}
+
+// ExtattrListLink lists extended attributes associated with the given path
+// in the specified namespace. If the path is a symbolic link, the attributes
+// are listed from the link itself.
+func ExtattrListLink(path string, attrnamespace int) ([]string, error) {
+	size, errno := unix.ExtattrListLink(path, attrnamespace,
+		uintptr(unsafe.Pointer(nil)), 0)
+	if errno != nil {
+		return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno}
+	}
+	if size == 0 {
+		return []string{}, nil
+	}
+
+	dest := make([]byte, size)
+	size, errno = unix.ExtattrListLink(path, attrnamespace,
+		uintptr(unsafe.Pointer(&dest[0])), size)
+	if errno != nil {
+		return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno}
+	}
+
+	var attrs []string
+	for i := 0; i < size; {
+		// Each attribute is preceded by a single byte length
+		length := int(dest[i])
+		i++
+		if i+length > size {
+			break
+		}
+		attrs = append(attrs, string(dest[i:i+length]))
+		i += length
+	}
+
+	return attrs, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go
new file mode 100644
index 000000000..07b67357f
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go
@@ -0,0 +1,24 @@
+//go:build !freebsd
+
+package system
+
+const (
+	EXTATTR_NAMESPACE_EMPTY  = 0
+	EXTATTR_NAMESPACE_USER   = 0
+	EXTATTR_NAMESPACE_SYSTEM = 0
+)
+
+// ExtattrGetLink is not supported on platforms other than FreeBSD.
+func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) {
+	return nil, ErrNotSupportedPlatform
+}
+
+// ExtattrSetLink is not supported on platforms other than FreeBSD.
+func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error {
+	return ErrNotSupportedPlatform
+}
+
+// ExtattrListLink is not supported on platforms other than FreeBSD.
+func ExtattrListLink(path string, attrnamespace int) ([]string, error) {
+	return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go
index 4eaeb5d69..f9de938dd 100644
--- a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd
-// +build freebsd
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
index 42658c8b9..037ccf59d 100644
--- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
index 9b13e6146..826c1f9c3 100644
--- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
index 37da93aa0..589cbeba7 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd && cgo
-// +build freebsd,cgo
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
index a90b23e03..17474e114 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go
@@ -1,5 +1,4 @@
 //go:build solaris && cgo
-// +build solaris,cgo
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
index 0f9feb1d2..db0864275 100644
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go
@@ -1,8 +1,4 @@
 //go:build !linux && !windows && !solaris && !(freebsd && cgo)
-// +build !linux
-// +build !windows
-// +build !solaris
-// +build !freebsd !cgo
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go
index d3d0ed8a1..ff679c5b1 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod.go
@@ -1,5 +1,4 @@
 //go:build !windows && !freebsd
-// +build !windows,!freebsd
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
index 53c3f2837..d94353600 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd
-// +build freebsd
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
index c35b1b346..752f90b14 100644
--- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go
index ff01143ee..fc8de3e4d 100644
--- a/vendor/github.com/containers/storage/pkg/system/path_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go
index 9f2509738..8838d9fd2 100644
--- a/vendor/github.com/containers/storage/pkg/system/path_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go
index 7ee59d926..5090f3042 100644
--- a/vendor/github.com/containers/storage/pkg/system/process_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go
@@ -1,5 +1,4 @@
 //go:build linux || freebsd || solaris || darwin
-// +build linux freebsd solaris darwin
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go
index 117eb1d6d..db214c4cd 100644
--- a/vendor/github.com/containers/storage/pkg/system/rm_common.go
+++ b/vendor/github.com/containers/storage/pkg/system/rm_common.go
@@ -1,5 +1,4 @@
 //go:build !freebsd
-// +build !freebsd
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go
index 2f44d18b6..1d57b7f40 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_common.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_common.go
@@ -1,5 +1,4 @@
 //go:build !freebsd
-// +build !freebsd
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go
new file mode 100644
index 000000000..715f05b93
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
index e552e91d7..ffe45f32d 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
index c4816c133..d1b41f34d 100644
--- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
+++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go
@@ -1,5 +1,4 @@
-//go:build linux || freebsd || darwin
-// +build linux freebsd darwin
+//go:build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go
index ad0337db7..9b02a1887 100644
--- a/vendor/github.com/containers/storage/pkg/system/umask.go
+++ b/vendor/github.com/containers/storage/pkg/system/umask.go
@@ -1,5 +1,4 @@
 //go:build !windows
-// +build !windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go
index 9497596a0..c0b69ab1b 100644
--- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go
@@ -1,5 +1,4 @@
 //go:build windows
-// +build windows
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
index 843ecdc53..b6c36339d 100644
--- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux && !freebsd
-// +build !linux,!freebsd
 
 package system
 
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
index 75275b964..27ada2083 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go
@@ -12,7 +12,7 @@ const (
 	E2BIG unix.Errno = unix.E2BIG
 
 	// Operation not supported
-	EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+	ENOTSUP unix.Errno = unix.ENOTSUP
 )
 
 // Lgetxattr retrieves the value of the extended attribute identified by attr
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go
new file mode 100644
index 000000000..5d653976e
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go
@@ -0,0 +1,85 @@
+package system
+
+import (
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	// Value is larger than the maximum size allowed
+	E2BIG unix.Errno = unix.E2BIG
+
+	// Operation not supported
+	ENOTSUP unix.Errno = unix.ENOTSUP
+
+	// Value is too small or too large for maximum size allowed
+	EOVERFLOW unix.Errno = unix.EOVERFLOW
+)
+
+var (
+	namespaceMap = map[string]int{
+		"user":   EXTATTR_NAMESPACE_USER,
+		"system": EXTATTR_NAMESPACE_SYSTEM,
+	}
+)
+
+func xattrToExtattr(xattr string) (namespace int, extattr string, err error) {
+	namespaceName, extattr, found := strings.Cut(xattr, ".")
+	if !found {
+		return -1, "", ENOTSUP
+	}
+
+	namespace, ok := namespaceMap[namespaceName]
+	if !ok {
+		return -1, "", ENOTSUP
+	}
+	return namespace, extattr, nil
+}
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// Returns a []byte slice if the xattr is set and nil otherwise.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+	namespace, extattr, err := xattrToExtattr(attr)
+	if err != nil {
+		return nil, err
+	}
+	return ExtattrGetLink(path, namespace, extattr)
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, value []byte, flags int) error {
+	if flags != 0 {
+		// FIXME: Flags are not supported on FreeBSD, but we can implement
+		// them mimicking the behavior of the Linux implementation.
+		// See lsetxattr(2) on Linux for more information.
+		return ENOTSUP
+	}
+
+	namespace, extattr, err := xattrToExtattr(attr)
+	if err != nil {
+		return err
+	}
+	return ExtattrSetLink(path, namespace, extattr, value)
+}
+
+// Llistxattr lists extended attributes associated with the given path
+// in the file system.
+func Llistxattr(path string) ([]string, error) {
+	attrs := []string{}
+
+	for namespaceName, namespace := range namespaceMap {
+		namespaceAttrs, err := ExtattrListLink(path, namespace)
+		if err != nil {
+			return nil, err
+		}
+
+		for _, attr := range namespaceAttrs {
+			attrs = append(attrs, namespaceName+"."+attr)
+		}
+	}
+
+	return attrs, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
index 6b47c4e71..12462cca3 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
@@ -12,7 +12,7 @@ const (
 	E2BIG unix.Errno = unix.E2BIG
 
 	// Operation not supported
-	EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
+	ENOTSUP unix.Errno = unix.ENOTSUP
 
 	// Value is too small or too large for maximum size allowed
 	EOVERFLOW unix.Errno = unix.EOVERFLOW
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
index 8bd7acf1f..66bf5858f 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go
@@ -1,5 +1,4 @@
-//go:build !linux && !darwin
-// +build !linux,!darwin
+//go:build !linux && !darwin && !freebsd
 
 package system
 
@@ -10,7 +9,7 @@ const (
 	E2BIG syscall.Errno = syscall.Errno(0)
 
 	// Operation not supported
-	EOPNOTSUPP syscall.Errno = syscall.Errno(0)
+	ENOTSUP syscall.Errno = syscall.Errno(0)
 
 	// Value is too small or too large for maximum size allowed
 	EOVERFLOW syscall.Errno = syscall.Errno(0)
diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
index 08dbc661d..14aaeddcf 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go
@@ -1,5 +1,4 @@
 //go:build linux && cgo
-// +build linux,cgo
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
index 25054810a..f970935b5 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go
@@ -1,5 +1,4 @@
 //go:build linux && !cgo
-// +build linux,!cgo
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
index fbfb90d59..f575fba2e 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go
@@ -1,5 +1,4 @@
 //go:build (linux && cgo && !gccgo) || (freebsd && cgo)
-// +build linux,cgo,!gccgo freebsd,cgo
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
index 480e2fcb0..5d0a7a683 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go
@@ -1,5 +1,4 @@
 //go:build darwin
-// +build darwin
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
index 7a44ca301..37a87fa5b 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go
@@ -1,5 +1,4 @@
 //go:build freebsd
-// +build freebsd
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
index 21a43d38c..818983474 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go
@@ -1,5 +1,4 @@
 //go:build linux && cgo && gccgo
-// +build linux,cgo,gccgo
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index a8dc1ba03..b45a6819a 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -1,5 +1,4 @@
 //go:build linux
-// +build linux
 
 package unshare
 
@@ -21,9 +20,9 @@ import (
 
 	"github.com/containers/storage/pkg/idtools"
 	"github.com/containers/storage/pkg/reexec"
+	"github.com/moby/sys/capability"
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/sirupsen/logrus"
-	"github.com/syndtr/gocapability/capability"
 )
 
 // Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
@@ -526,8 +525,11 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
 	} else {
 		// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
 		// to use unshare(), so don't bother creating a new user namespace at this point.
-		capabilities, err := capability.NewPid(0)
+		capabilities, err := capability.NewPid2(0)
+		bailOnError(err, "Initializing a new Capabilities object of pid 0")
+		err = capabilities.Load()
 		bailOnError(err, "Reading the current capabilities sets")
+
 		if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
 			return
 		}
@@ -587,7 +589,12 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
 	cmd.Hook = func(int) error {
 		go func() {
 			for receivedSignal := range interrupted {
-				cmd.Cmd.Process.Signal(receivedSignal)
+				if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil {
+					logrus.Warnf(
+						"Failed to send a signal '%d' to the Process (PID: %d): %v",
+						receivedSignal, cmd.Cmd.Process.Pid, err,
+					)
+				}
 			}
 		}()
 		return nil
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
index e3160d0da..05706b8fe 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
@@ -1,5 +1,4 @@
 //go:build !linux && !darwin
-// +build !linux,!darwin
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
index a6b38eda8..ae2869d74 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go
@@ -1,5 +1,4 @@
 //go:build cgo && !(linux || freebsd)
-// +build cgo,!linux,!freebsd
 
 package unshare
 
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index 0f8d1f024..2fff0cecf 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -8,20 +8,24 @@
 #      /usr/containers/storage.conf
 #      /etc/containers/storage.conf
 #      $HOME/.config/containers/storage.conf
-#      $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
+#      $XDG_CONFIG_HOME/containers/storage.conf (if XDG_CONFIG_HOME is set)
 # See man 5 containers-storage.conf for more information
-# The "container storage" table contains all of the server options.
+# The "storage" table contains all of the server options.
 [storage]
 
-# Default Storage Driver, Must be set for proper operation.
+# Default storage driver, must be set for proper operation.
 driver = "overlay"
 
 # Temporary storage location
 runroot = "/run/containers/storage"
 
+# Priority list for the storage drivers that will be tested one
+# after the other to pick the storage driver if it is not defined.
+# driver_priority = ["overlay", "btrfs"]
+
 # Primary Read/Write location of container storage
-# When changing the graphroot location on an SELINUX system, you must
-# ensure  the labeling matches the default locations labels with the
+# When changing the graphroot location on an SELinux system, you must
+# ensure the labeling matches the default location's labels with the
 # following commands:
 # semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH
 # restorecon -R -v /NEWSTORAGEPATH
@@ -50,54 +54,50 @@ graphroot = "/var/lib/containers/storage"
 additionalimagestores = [
 ]
 
-# Allows specification of how storage is populated when pulling images. This
-# option can speed the pulling process of images compressed with format
-# zstd:chunked. Containers/storage looks for files within images that are being
-# pulled from a container registry that were previously pulled to the host.  It
-# can copy or create a hard link to the existing file when it finds them,
-# eliminating the need to pull them from the container registry. These options
-# can deduplicate pulling of content, disk storage of content and can allow the
-# kernel to use less memory when running containers.
-
-# containers/storage supports four keys
-#   * enable_partial_images="true" | "false"
-#     Tells containers/storage to look for files previously pulled in storage
-#     rather then always pulling them from the container registry.
-#   * use_hard_links = "false" | "true"
-#     Tells containers/storage to use hard links rather then create new files in
-#     the image, if an identical file already existed in storage.
-#   * ostree_repos = ""
-#     Tells containers/storage where an ostree repository exists that might have
-#     previously pulled content which can be used when attempting to avoid
-#     pulling content from the container registry
-#   * convert_images = "false" | "true"
-#     If set to true, containers/storage will convert images to a
-#     format compatible with partial pulls in order to take advantage
-#     of local deduplication and hard linking.  It is an expensive
-#     operation so it is not enabled by default.
-pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
-
-# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
-# a container, to the UIDs/GIDs as they should appear outside of the container,
-# and the length of the range of UIDs/GIDs.  Additional mapped sets can be
-# listed and will be heeded by libraries, but there are limits to the number of
-# mappings which the kernel will allow when you later attempt to run a
-# container.
+# Options controlling how storage is populated when pulling images.
+[storage.options.pull_options]
+# Enable the "zstd:chunked" feature, which allows partial pulls, reusing
+# content that already exists on the system. This is disabled by default,
+# and must be explicitly enabled to be used. For more on zstd:chunked, see
+# https://github.com/containers/storage/blob/main/docs/containers-storage-zstd-chunked.md
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# enable_partial_images = "false"
+
+# Tells containers/storage to use hard links rather then create new files in
+# the image, if an identical file already existed in storage.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# use_hard_links = "false"
+
+# Path to an ostree repository that might have
+# previously pulled content which can be used when attempting to avoid
+# pulling content from the container registry.
+# ostree_repos=""
+
+# If set to "true", containers/storage will convert images that are
+# not already in zstd:chunked format to that format before processing
+# in order to take advantage of local deduplication and hard linking.
+# It is an expensive operation so it is not enabled by default.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
+# convert_images = "false"
+
+# This should ALMOST NEVER be set.
+# It allows partial pulls of images without guaranteeing that "partial
+# pulls" and non-partial pulls both result in consistent image contents.
+# This allows pulling estargz images and early versions of zstd:chunked images;
+# otherwise, these layers always use the traditional non-partial pull path.
+#
+# This option should be enabled EXTREMELY rarely, only if ALL images that could
+# EVER be conceivably pulled on this system are GUARANTEED (e.g. using a signature policy)
+# to come from a build system trusted to never attack image integrity.
 #
-# remap-uids = "0:1668442479:65536"
-# remap-gids = "0:1668442479:65536"
-
-# Remap-User/Group is a user name which can be used to look up one or more UID/GID
-# ranges in the /etc/subuid or /etc/subgid file.  Mappings are set up starting
-# with an in-container ID of 0 and then a host-level ID taken from the lowest
-# range that matches the specified name, and using the length of that range.
-# Additional ranges are then assigned, using the ranges which specify the
-# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
-# until all of the entries have been used for maps. This setting overrides the
-# Remap-UIDs/GIDs setting.
+# If this consistency enforcement were disabled, malicious images could be built
+# in a way designed to evade other audit mechanisms, so presence of most other audit
+# mechanisms is not a replacement for the above-mentioned need for all images to come
+# from a trusted build system.
 #
-# remap-user = "containers"
-# remap-group = "containers"
+# As a side effect, enabling this option will also make image IDs unpredictable
+# (usually not equal to the traditional value matching the config digest).
+# insecure_allow_unpredictable_image_contents = "false"
 
 # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
 # ranges in the /etc/subuid and /etc/subgid file.  These ranges will be partitioned
@@ -120,6 +120,7 @@ pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree
 # squashed down to the default uid in the container.  These images will have no
 # separation between the users in the container. Only supported for the overlay
 # and vfs drivers.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
 #ignore_chown_errors = "false"
 
 # Inodes is used to set a maximum inodes of the container image.
@@ -133,9 +134,11 @@ pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree
 mountopt = "nodev"
 
 # Set to skip a PRIVATE bind mount on the storage home directory.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
 # skip_mount_home = "false"
 
 # Set to use composefs to mount data layers with overlay.
+# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
 # use_composefs = "false"
 
 # Size is used to set a maximum size of the container image.
diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd
index 43278a1fc..5f421e0c1 100644
--- a/vendor/github.com/containers/storage/storage.conf-freebsd
+++ b/vendor/github.com/containers/storage/storage.conf-freebsd
@@ -39,27 +39,6 @@ graphroot = "/var/db/containers/storage"
 additionalimagestores = [
 ]
 
-# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
-# a container, to the UIDs/GIDs as they should appear outside of the container,
-# and the length of the range of UIDs/GIDs.  Additional mapped sets can be
-# listed and will be heeded by libraries, but there are limits to the number of
-# mappings which the kernel will allow when you later attempt to run a
-# container.
-#
-# remap-uids = 0:1668442479:65536
-# remap-gids = 0:1668442479:65536
-
-# Remap-User/Group is a user name which can be used to look up one or more UID/GID
-# ranges in the /etc/subuid or /etc/subgid file.  Mappings are set up starting
-# with an in-container ID of 0 and then a host-level ID taken from the lowest
-# range that matches the specified name, and using the length of that range.
-# Additional ranges are then assigned, using the ranges which specify the
-# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
-# until all of the entries have been used for maps.
-#
-# remap-user = "containers"
-# remap-group = "containers"
-
 # Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
 # ranges in the /etc/subuid and /etc/subgid file.  These ranges will be partitioned
 # to containers configured to create automatically a user namespace.  Containers
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 957675ba4..cd1cf861f 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -6,9 +6,11 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"maps"
 	"os"
 	"path/filepath"
 	"reflect"
+	"slices"
 	"strings"
 	"sync"
 	"syscall"
@@ -18,6 +20,7 @@ import (
 	_ "github.com/containers/storage/drivers/register"
 
 	drivers "github.com/containers/storage/drivers"
+	"github.com/containers/storage/internal/dedup"
 	"github.com/containers/storage/pkg/archive"
 	"github.com/containers/storage/pkg/directory"
 	"github.com/containers/storage/pkg/idtools"
@@ -84,6 +87,20 @@ type ApplyStagedLayerOptions struct {
 	DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
 }
 
+// MultiListOptions contains options to pass to MultiList
+type MultiListOptions struct {
+	Images     bool // if true, Images will be listed in the result
+	Layers     bool // if true, layers will be listed in the result
+	Containers bool // if true, containers will be listed in the result
+}
+
+// MultiListResult contains slices of Images, Layers or Containers listed by MultiList method
+type MultiListResult struct {
+	Images     []Image
+	Layers     []Layer
+	Containers []Container
+}
+
 // An roBigDataStore wraps up the read-only big-data related methods of the
 // various types of file-based lookaside stores that we implement.
 type roBigDataStore interface {
@@ -150,6 +167,26 @@ type flaggableStore interface {
 
 type StoreOptions = types.StoreOptions
 
+type DedupHashMethod = dedup.DedupHashMethod
+
+const (
+	DedupHashInvalid  = dedup.DedupHashInvalid
+	DedupHashCRC      = dedup.DedupHashCRC
+	DedupHashFileSize = dedup.DedupHashFileSize
+	DedupHashSHA256   = dedup.DedupHashSHA256
+)
+
+type (
+	DedupOptions = dedup.DedupOptions
+	DedupResult  = dedup.DedupResult
+)
+
+// DedupArgs is used to pass arguments to the Dedup command.
+type DedupArgs struct {
+	// Options that are passed directly to the internal/dedup.DedupDirs function.
+	Options DedupOptions
+}
+
 // Store wraps up the various types of file-based stores that we use into a
 // singleton object that initializes and manages them all together.
 type Store interface {
@@ -325,11 +362,17 @@ type Store interface {
 	//   }
 	ApplyDiff(to string, diff io.Reader) (int64, error)
 
-	// ApplyDiffer applies a diff to a layer.
+	// ApplyDiffWithDiffer applies a diff to a layer.
 	// It is the caller responsibility to clean the staging directory if it is not
-	// successfully applied with ApplyDiffFromStagingDirectory.
+	// successfully applied with ApplyStagedLayer.
+	// Deprecated: Use PrepareStagedLayer instead.  ApplyDiffWithDiffer is going to be removed in a future release
 	ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
 
+	// PrepareStagedLayer applies a diff to a layer.
+	// It is the caller responsibility to clean the staging directory if it is not
+	// successfully applied with ApplyStagedLayer.
+	PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
+
 	// ApplyStagedLayer combines the functions of creating a layer and using the staging
 	// directory to populate it.
 	// It marks the layer for automatic removal if applying the diff fails for any reason.
@@ -561,6 +604,15 @@ type Store interface {
 	// usually by deleting layers and images which are damaged.  If the
 	// right options are set, it will remove containers as well.
 	Repair(report CheckReport, options *RepairOptions) []error
+
+	// MultiList returns a MultiListResult structure that contains layer, image, or container
+	// extracts according to the values in MultiListOptions.
+	// MultiList returns consistent values as of a single point in time.
+	// WARNING: The values may already be out of date by the time they are returned to the caller.
+	MultiList(MultiListOptions) (MultiListResult, error)
+
+	// Dedup deduplicates layers in the store.
+	Dedup(DedupArgs) (drivers.DedupResult, error)
 }
 
 // AdditionalLayer represents a layer that is contained in the additional layer store
@@ -849,8 +901,8 @@ func GetStore(options types.StoreOptions) (Store, error) {
 		graphOptions:        options.GraphDriverOptions,
 		imageStoreDir:       options.ImageStore,
 		pullOptions:         options.PullOptions,
-		uidMap:              copyIDMap(options.UIDMap),
-		gidMap:              copyIDMap(options.GIDMap),
+		uidMap:              copySlicePreferringNil(options.UIDMap),
+		gidMap:              copySlicePreferringNil(options.GIDMap),
 		autoUsernsUser:      options.RootAutoNsUser,
 		autoNsMinSize:       autoNsMinSize,
 		autoNsMaxSize:       autoNsMaxSize,
@@ -869,30 +921,6 @@ func GetStore(options types.StoreOptions) (Store, error) {
 	return s, nil
 }
 
-func copyUint32Slice(slice []uint32) []uint32 {
-	m := []uint32{}
-	if slice != nil {
-		m = make([]uint32, len(slice))
-		copy(m, slice)
-	}
-	if len(m) > 0 {
-		return m[:]
-	}
-	return nil
-}
-
-func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
-	m := []idtools.IDMap{}
-	if idmap != nil {
-		m = make([]idtools.IDMap, len(idmap))
-		copy(m, idmap)
-	}
-	if len(m) > 0 {
-		return m[:]
-	}
-	return nil
-}
-
 func (s *store) RunRoot() string {
 	return s.runRoot
 }
@@ -919,18 +947,16 @@ func (s *store) GraphOptions() []string {
 
 func (s *store) PullOptions() map[string]string {
 	cp := make(map[string]string, len(s.pullOptions))
-	for k, v := range s.pullOptions {
-		cp[k] = v
-	}
+	maps.Copy(cp, s.pullOptions)
 	return cp
 }
 
 func (s *store) UIDMap() []idtools.IDMap {
-	return copyIDMap(s.uidMap)
+	return copySlicePreferringNil(s.uidMap)
 }
 
 func (s *store) GIDMap() []idtools.IDMap {
-	return copyIDMap(s.gidMap)
+	return copySlicePreferringNil(s.gidMap)
 }
 
 // This must only be called when constructing store; it writes to fields that are assumed to be constant after construction.
@@ -1088,8 +1114,6 @@ func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
 		RunRoot:        s.runRoot,
 		DriverPriority: s.graphDriverPriority,
 		DriverOptions:  s.graphOptions,
-		UIDMaps:        s.uidMap,
-		GIDMaps:        s.gidMap,
 	}
 	return drivers.New(s.graphDriverName, config)
 }
@@ -1437,14 +1461,16 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
 	return true
 }
 
-// putLayer requires the rlstore, rlstores, as well as s.containerStore (even if not an argument to this function) to be locked for write.
+// On entry:
+// - rlstore must be locked for writing
+// - rlstores MUST NOT be locked
 func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
 	var parentLayer *Layer
 	var options LayerOptions
 	if lOptions != nil {
 		options = *lOptions
-		options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData)
-		options.Flags = copyStringInterfaceMap(lOptions.Flags)
+		options.BigData = slices.Clone(lOptions.BigData)
+		options.Flags = copyMapPreferringNil(lOptions.Flags)
 	}
 	if options.HostUIDMapping {
 		options.UIDMap = nil
@@ -1474,6 +1500,11 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
 			return nil, -1, ErrLayerUnknown
 		}
 		parentLayer = ilayer
+
+		if err := s.containerStore.startWriting(); err != nil {
+			return nil, -1, err
+		}
+		defer s.containerStore.stopWriting()
 		containers, err := s.containerStore.Containers()
 		if err != nil {
 			return nil, -1, err
@@ -1490,6 +1521,13 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
 			gidMap = ilayer.GIDMap
 		}
 	} else {
+		// FIXME? It’s unclear why we are holding containerStore locked here at all
+		// (and because we are not modifying it, why it is a write lock, not a read lock).
+		if err := s.containerStore.startWriting(); err != nil {
+			return nil, -1, err
+		}
+		defer s.containerStore.stopWriting()
+
 		if !options.HostUIDMapping && len(options.UIDMap) == 0 {
 			uidMap = s.uidMap
 		}
@@ -1497,23 +1535,17 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
 			gidMap = s.gidMap
 		}
 	}
-	layerOptions := LayerOptions{
-		OriginalDigest:     options.OriginalDigest,
-		OriginalSize:       options.OriginalSize,
-		UncompressedDigest: options.UncompressedDigest,
-		Flags:              options.Flags,
-	}
 	if s.canUseShifting(uidMap, gidMap) {
-		layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
+		options.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
 	} else {
-		layerOptions.IDMappingOptions = types.IDMappingOptions{
+		options.IDMappingOptions = types.IDMappingOptions{
 			HostUIDMapping: options.HostUIDMapping,
 			HostGIDMapping: options.HostGIDMapping,
-			UIDMap:         copyIDMap(uidMap),
-			GIDMap:         copyIDMap(gidMap),
+			UIDMap:         copySlicePreferringNil(uidMap),
+			GIDMap:         copySlicePreferringNil(gidMap),
 		}
 	}
-	return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
+	return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo)
 }
 
 func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
@@ -1525,10 +1557,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
 		return nil, -1, err
 	}
 	defer rlstore.stopWriting()
-	if err := s.containerStore.startWriting(); err != nil {
-		return nil, -1, err
-	}
-	defer s.containerStore.stopWriting()
 	return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil)
 }
 
@@ -1582,8 +1610,8 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
 						Metadata:     i.Metadata,
 						CreationDate: i.Created,
 						Digest:       i.Digest,
-						Digests:      copyDigestSlice(i.Digests),
-						NamesHistory: copyStringSlice(i.NamesHistory),
+						Digests:      copySlicePreferringNil(i.Digests),
+						NamesHistory: copySlicePreferringNil(i.NamesHistory),
 					}
 					for _, key := range i.BigDataNames {
 						data, err := store.BigData(id, key)
@@ -1600,7 +1628,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
 							Digest: dataDigest,
 						})
 					}
-					namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...))
+					namesToAddAfterCreating = dedupeStrings(slices.Concat(i.Names, names))
 					break
 				}
 			}
@@ -1614,18 +1642,16 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
 			if iOptions.Digest != "" {
 				options.Digest = iOptions.Digest
 			}
-			options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...)
+			options.Digests = append(options.Digests, iOptions.Digests...)
 			if iOptions.Metadata != "" {
 				options.Metadata = iOptions.Metadata
 			}
 			options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
-			options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...)
+			options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...)
 			if options.Flags == nil {
 				options.Flags = make(map[string]interface{})
 			}
-			for k, v := range iOptions.Flags {
-				options.Flags[k] = v
-			}
+			maps.Copy(options.Flags, iOptions.Flags)
 		}
 
 		if options.CreationDate.IsZero() {
@@ -1734,8 +1760,8 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
 		layerOptions.IDMappingOptions = types.IDMappingOptions{
 			HostUIDMapping: options.HostUIDMapping,
 			HostGIDMapping: options.HostGIDMapping,
-			UIDMap:         copyIDMap(options.UIDMap),
-			GIDMap:         copyIDMap(options.GIDMap),
+			UIDMap:         copySlicePreferringNil(options.UIDMap),
+			GIDMap:         copySlicePreferringNil(options.GIDMap),
 		}
 	}
 	layerOptions.TemplateLayer = layer.ID
@@ -1757,12 +1783,12 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
 	var options ContainerOptions
 	if cOptions != nil {
 		options = *cOptions
-		options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap)
-		options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap)
-		options.LabelOpts = copyStringSlice(cOptions.LabelOpts)
-		options.Flags = copyStringInterfaceMap(cOptions.Flags)
-		options.MountOpts = copyStringSlice(cOptions.MountOpts)
-		options.StorageOpt = copyStringStringMap(cOptions.StorageOpt)
+		options.IDMappingOptions.UIDMap = copySlicePreferringNil(cOptions.IDMappingOptions.UIDMap)
+		options.IDMappingOptions.GIDMap = copySlicePreferringNil(cOptions.IDMappingOptions.GIDMap)
+		options.LabelOpts = copySlicePreferringNil(cOptions.LabelOpts)
+		options.Flags = copyMapPreferringNil(cOptions.Flags)
+		options.MountOpts = copySlicePreferringNil(cOptions.MountOpts)
+		options.StorageOpt = copyMapPreferringNil(cOptions.StorageOpt)
 		options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
 	}
 	if options.HostUIDMapping {
@@ -1887,8 +1913,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
 		layerOptions.IDMappingOptions = types.IDMappingOptions{
 			HostUIDMapping: idMappingsOptions.HostUIDMapping,
 			HostGIDMapping: idMappingsOptions.HostGIDMapping,
-			UIDMap:         copyIDMap(uidMap),
-			GIDMap:         copyIDMap(gidMap),
+			UIDMap:         copySlicePreferringNil(uidMap),
+			GIDMap:         copySlicePreferringNil(gidMap),
 		}
 	}
 	if options.Flags == nil {
@@ -1925,8 +1951,8 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
 		options.IDMappingOptions = types.IDMappingOptions{
 			HostUIDMapping: len(options.UIDMap) == 0,
 			HostGIDMapping: len(options.GIDMap) == 0,
-			UIDMap:         copyIDMap(options.UIDMap),
-			GIDMap:         copyIDMap(options.GIDMap),
+			UIDMap:         copySlicePreferringNil(options.UIDMap),
+			GIDMap:         copySlicePreferringNil(options.GIDMap),
 		}
 		container, err := s.containerStore.create(id, names, imageID, layer, &options)
 		if err != nil || container == nil {
@@ -2175,7 +2201,7 @@ func (s *store) ImageSize(id string) (int64, error) {
 			}
 			// The UncompressedSize is only valid if there's a digest to go with it.
 			n := layer.UncompressedSize
-			if layer.UncompressedDigest == "" {
+			if layer.UncompressedDigest == "" || n == -1 {
 				// Compute the size.
 				n, err = layerStore.DiffSize("", layer.ID)
 				if err != nil {
@@ -2424,10 +2450,10 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
 			options := ImageOptions{
 				CreationDate: i.Created,
 				Digest:       i.Digest,
-				Digests:      copyDigestSlice(i.Digests),
+				Digests:      copySlicePreferringNil(i.Digests),
 				Metadata:     i.Metadata,
-				NamesHistory: copyStringSlice(i.NamesHistory),
-				Flags:        copyStringInterfaceMap(i.Flags),
+				NamesHistory: copySlicePreferringNil(i.NamesHistory),
+				Flags:        copyMapPreferringNil(i.Flags),
 			}
 			for _, key := range i.BigDataNames {
 				data, err := store.BigData(id, key)
@@ -2844,7 +2870,7 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
 		exists := store.Exists(id)
 		store.stopReading()
 		if exists {
-			return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrLayerUnknown)
+			return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
 		}
 	}
 
@@ -2928,14 +2954,44 @@ func (s *store) Unmount(id string, force bool) (bool, error) {
 }
 
 func (s *store) Changes(from, to string) ([]archive.Change, error) {
-	if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) {
+	// NaiveDiff could cause mounts to happen without a lock, so be safe
+	// and treat the .Diff operation as a Mount.
+	// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
+	// in startUsingGraphDriver().
+	if err := s.startUsingGraphDriver(); err != nil {
+		return nil, err
+	}
+	defer s.stopUsingGraphDriver()
+
+	rlstore, lstores, err := s.bothLayerStoreKindsLocked()
+	if err != nil {
+		return nil, err
+	}
+
+	// While the general rules require the layer store to only be locked RO (apart from known LOCKING BUGs)
+	// the overlay driver requires the primary layer store to be locked RW; see
+	// drivers/overlay.Driver.getMergedDir.
+	if err := rlstore.startWriting(); err != nil {
+		return nil, err
+	}
+	if rlstore.Exists(to) {
+		res, err := rlstore.Changes(from, to)
+		rlstore.stopWriting()
+		return res, err
+	}
+	rlstore.stopWriting()
+
+	for _, s := range lstores {
+		store := s
+		if err := store.startReading(); err != nil {
+			return nil, err
+		}
 		if store.Exists(to) {
 			res, err := store.Changes(from, to)
-			return res, true, err
+			store.stopReading()
+			return res, err
 		}
-		return nil, false, nil
-	}); done {
-		return res, err
+		store.stopReading()
 	}
 	return nil, ErrLayerUnknown
 }
@@ -2966,12 +3022,33 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
 	}
 	defer s.stopUsingGraphDriver()
 
-	layerStores, err := s.allLayerStoresLocked()
+	rlstore, lstores, err := s.bothLayerStoreKindsLocked()
 	if err != nil {
 		return nil, err
 	}
 
-	for _, s := range layerStores {
+	// While the general rules require the layer store to only be locked RO (apart from known LOCKING BUGs)
+	// the overlay driver requires the primary layer store to be locked RW; see
+	// drivers/overlay.Driver.getMergedDir.
+	if err := rlstore.startWriting(); err != nil {
+		return nil, err
+	}
+	if rlstore.Exists(to) {
+		rc, err := rlstore.Diff(from, to, options)
+		if rc != nil && err == nil {
+			wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
+				err := rc.Close()
+				rlstore.stopWriting()
+				return err
+			})
+			return wrapped, nil
+		}
+		rlstore.stopWriting()
+		return rc, err
+	}
+	rlstore.stopWriting()
+
+	for _, s := range lstores {
 		store := s
 		if err := store.startReading(); err != nil {
 			return nil, err
@@ -3009,16 +3086,14 @@ func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
 		return layer, err
 	}
 	if err == nil {
+		// This code path exists only for cmd/containers/storage.applyDiffUsingStagingDirectory; we have tests that
+		// assume layer creation and applying a staged layer are separate steps. Production pull code always uses the
+		// other path, where layer creation is atomic.
 		return layer, rlstore.applyDiffFromStagingDirectory(args.ID, args.DiffOutput, args.DiffOptions)
 	}
 
 	// if the layer doesn't exist yet, try to create it.
 
-	if err := s.containerStore.startWriting(); err != nil {
-		return nil, err
-	}
-	defer s.containerStore.stopWriting()
-
 	slo := stagedLayerOptions{
 		DiffOutput:  args.DiffOutput,
 		DiffOptions: args.DiffOptions,
@@ -3034,13 +3109,19 @@ func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) e
 	return err
 }
 
+func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
+	rlstore, err := s.getLayerStore()
+	if err != nil {
+		return nil, err
+	}
+	return rlstore.applyDiffWithDifferNoLock(options, differ)
+}
+
 func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
-	return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
-		if to != "" && !rlstore.Exists(to) {
-			return nil, ErrLayerUnknown
-		}
-		return rlstore.ApplyDiffWithDiffer(to, options, differ)
-	})
+	if to != "" {
+		return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter")
+	}
+	return s.PrepareStagedLayer(options, differ)
 }
 
 func (s *store) DifferTarget(id string) (string, error) {
@@ -3593,79 +3674,47 @@ func makeBigDataBaseName(key string) string {
 }
 
 func stringSliceWithoutValue(slice []string, value string) []string {
-	modified := make([]string, 0, len(slice))
-	for _, v := range slice {
-		if v == value {
-			continue
-		}
-		modified = append(modified, v)
-	}
-	return modified
+	return slices.DeleteFunc(slices.Clone(slice), func(v string) bool {
+		return v == value
+	})
 }
 
-func copyStringSlice(slice []string) []string {
-	if len(slice) == 0 {
+// copySlicePreferringNil returns a copy of the slice.
+// If s is empty, a nil is returned.
+func copySlicePreferringNil[S ~[]E, E any](s S) S {
+	if len(s) == 0 {
 		return nil
 	}
-	ret := make([]string, len(slice))
-	copy(ret, slice)
-	return ret
+	return slices.Clone(s)
 }
 
-func copyStringInt64Map(m map[string]int64) map[string]int64 {
-	ret := make(map[string]int64, len(m))
-	for k, v := range m {
-		ret[k] = v
-	}
-	return ret
-}
-
-func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
-	ret := make(map[string]digest.Digest, len(m))
-	for k, v := range m {
-		ret[k] = v
-	}
-	return ret
-}
-
-func copyStringStringMap(m map[string]string) map[string]string {
-	ret := make(map[string]string, len(m))
-	for k, v := range m {
-		ret[k] = v
-	}
-	return ret
-}
-
-func copyDigestSlice(slice []digest.Digest) []digest.Digest {
-	if len(slice) == 0 {
+// copyMapPreferringNil returns a shallow clone of map m.
+// If m is empty, a nil is returned.
+//
+// (As of, e.g., Go 1.23, maps.Clone preserves nil, but that’s not a documented promise;
+// and this function turns even non-nil empty maps into nil.)
+func copyMapPreferringNil[K comparable, V any](m map[K]V) map[K]V {
+	if len(m) == 0 {
 		return nil
 	}
-	ret := make([]digest.Digest, len(slice))
-	copy(ret, slice)
-	return ret
+	return maps.Clone(m)
 }
 
-// copyStringInterfaceMap still forces us to assume that the interface{} is
-// a non-pointer scalar value
-func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
-	ret := make(map[string]interface{}, len(m))
+// newMapFrom returns a shallow clone of map m.
+// If m is empty, an empty map is allocated and returned.
+func newMapFrom[K comparable, V any](m map[K]V) map[K]V {
+	ret := make(map[K]V, len(m))
 	for k, v := range m {
 		ret[k] = v
 	}
 	return ret
 }
 
-func copyLayerBigDataOptionSlice(slice []LayerBigDataOption) []LayerBigDataOption {
-	ret := make([]LayerBigDataOption, len(slice))
-	copy(ret, slice)
-	return ret
-}
-
 func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption {
 	ret := make([]ImageBigDataOption, len(slice))
 	for i := range slice {
 		ret[i].Key = slice[i].Key
-		ret[i].Data = append([]byte{}, slice[i].Data...)
+		ret[i].Data = slices.Clone(slice[i].Data)
 		ret[i].Digest = slice[i].Digest
 	}
 	return ret
@@ -3675,7 +3724,7 @@ func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []Container
 	ret := make([]ContainerBigDataOption, len(slice))
 	for i := range slice {
 		ret[i].Key = slice[i].Key
-		ret[i].Data = append([]byte{}, slice[i].Data...)
+		ret[i].Data = slices.Clone(slice[i].Data)
 	}
 	return ret
 }
@@ -3729,10 +3778,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
 			return nil, err
 		}
 		key = strings.ToLower(key)
-		for _, m := range mountOpts {
-			if m == key {
-				return strings.Split(val, ","), nil
-			}
+		if slices.Contains(mountOpts, key) {
+			return strings.Split(val, ","), nil
 		}
 	}
 	return nil, nil
@@ -3740,11 +3787,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
 
 // Free removes the store from the list of stores
 func (s *store) Free() {
-	for i := 0; i < len(stores); i++ {
-		if stores[i] == s {
-			stores = append(stores[:i], stores[i+1:]...)
-			return
-		}
+	if i := slices.Index(stores, s); i != -1 {
+		stores = slices.Delete(stores, i, i+1)
 	}
 }
 
@@ -3771,3 +3815,95 @@ func (s *store) GarbageCollect() error {
 
 	return firstErr
 }
+
+// List returns a MultiListResult structure that contains layer, image, or container
+// extracts according to the values in MultiListOptions.
+func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) {
+	// TODO: Possible optimization: Deduplicate content from multiple stores.
+	out := MultiListResult{}
+
+	if options.Layers {
+		layerStores, err := s.allLayerStores()
+		if err != nil {
+			return MultiListResult{}, err
+		}
+		for _, roStore := range layerStores {
+			if err := roStore.startReading(); err != nil {
+				return MultiListResult{}, err
+			}
+			defer roStore.stopReading()
+			layers, err := roStore.Layers()
+			if err != nil {
+				return MultiListResult{}, err
+			}
+			out.Layers = append(out.Layers, layers...)
+		}
+	}
+
+	if options.Images {
+		for _, roStore := range s.allImageStores() {
+			if err := roStore.startReading(); err != nil {
+				return MultiListResult{}, err
+			}
+			defer roStore.stopReading()
+
+			images, err := roStore.Images()
+			if err != nil {
+				return MultiListResult{}, err
+			}
+			out.Images = append(out.Images, images...)
+		}
+	}
+
+	if options.Containers {
+		containers, _, err := readContainerStore(s, func() ([]Container, bool, error) {
+			res, err := s.containerStore.Containers()
+			return res, true, err
+		})
+		if err != nil {
+			return MultiListResult{}, err
+		}
+		out.Containers = append(out.Containers, containers...)
+	}
+	return out, nil
+}
+
+// Dedup deduplicates layers in the store.
+func (s *store) Dedup(req DedupArgs) (drivers.DedupResult, error) {
+	imgs, err := s.Images()
+	if err != nil {
+		return drivers.DedupResult{}, err
+	}
+	var topLayers []string
+	for _, i := range imgs {
+		topLayers = append(topLayers, i.TopLayer)
+		topLayers = append(topLayers, i.MappedTopLayers...)
+	}
+	return writeToLayerStore(s, func(rlstore rwLayerStore) (drivers.DedupResult, error) {
+		layers := make(map[string]struct{})
+		for _, i := range topLayers {
+			cur := i
+			for cur != "" {
+				if _, visited := layers[cur]; visited {
+					break
+				}
+				l, err := rlstore.Get(cur)
+				if err != nil {
+					if err == ErrLayerUnknown {
+						break
+					}
+					return drivers.DedupResult{}, err
+				}
+				layers[cur] = struct{}{}
+				cur = l.Parent
+			}
+		}
+		r := drivers.DedupArgs{
+			Options: req.Options,
+		}
+		for l := range layers {
+			r.Layers = append(r.Layers, l)
+		}
+		return rlstore.dedup(r)
+	})
+}
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 03e5f7ab6..efc08c476 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -344,15 +344,15 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
 			dirEntries, err := os.ReadDir(opts.GraphRoot)
 			if err == nil {
 				for _, entry := range dirEntries {
-					if strings.HasSuffix(entry.Name(), "-images") {
-						opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images")
+					if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok {
+						opts.GraphDriverName = name
 						break
 					}
 				}
 			}
 
 			if opts.GraphDriverName == "" {
-				if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) {
+				if canUseRootlessOverlay() {
 					opts.GraphDriverName = overlayDriver
 				} else {
 					opts.GraphDriverName = "vfs"
@@ -481,33 +481,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
 	if config.Storage.Options.MountOpt != "" {
 		storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
 	}
-
-	uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
-	if err != nil {
-		return err
-	}
-	gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
-	if err != nil {
-		return err
-	}
-
-	if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
-		config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
-	}
-	if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" {
-		config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup
-	}
-	if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
-		mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
-		if err != nil {
-			logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
-			return err
-		}
-		uidmap = mappings.UIDs()
-		gidmap = mappings.GIDs()
-	}
-	storeOptions.UIDMap = uidmap
-	storeOptions.GIDMap = gidmap
 	storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
 	if config.Storage.Options.AutoUsernsMinSize > 0 {
 		storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_bsd.go
similarity index 90%
rename from vendor/github.com/containers/storage/types/options_freebsd.go
rename to vendor/github.com/containers/storage/types/options_bsd.go
index be2bc2f27..040fdc797 100644
--- a/vendor/github.com/containers/storage/types/options_freebsd.go
+++ b/vendor/github.com/containers/storage/types/options_bsd.go
@@ -1,3 +1,5 @@
+//go:build freebsd || netbsd
+
 package types
 
 const (
@@ -14,6 +16,6 @@ var (
 )
 
 // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
-func canUseRootlessOverlay(home, runhome string) bool {
+func canUseRootlessOverlay() bool {
 	return false
 }
diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go
index 3eecc2b82..27ba6a061 100644
--- a/vendor/github.com/containers/storage/types/options_darwin.go
+++ b/vendor/github.com/containers/storage/types/options_darwin.go
@@ -11,6 +11,6 @@ const (
 var defaultOverrideConfigFile = "/etc/containers/storage.conf"
 
 // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
-func canUseRootlessOverlay(home, runhome string) bool {
+func canUseRootlessOverlay() bool {
 	return false
 }
diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go
index a28e82883..09cbae54b 100644
--- a/vendor/github.com/containers/storage/types/options_linux.go
+++ b/vendor/github.com/containers/storage/types/options_linux.go
@@ -22,7 +22,7 @@ var (
 )
 
 // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
-func canUseRootlessOverlay(home, runhome string) bool {
+func canUseRootlessOverlay() bool {
 	// we check first for fuse-overlayfs since it is cheaper.
 	if path, _ := exec.LookPath("fuse-overlayfs"); path != "" {
 		return true
diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go
index c1bea9fac..99a67ff21 100644
--- a/vendor/github.com/containers/storage/types/options_windows.go
+++ b/vendor/github.com/containers/storage/types/options_windows.go
@@ -14,6 +14,6 @@ var (
 )
 
 // canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
-func canUseRootlessOverlay(home, runhome string) bool {
+func canUseRootlessOverlay() bool {
 	return false
 }
diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf
index c42d33fb9..761b3a795 100644
--- a/vendor/github.com/containers/storage/types/storage_test.conf
+++ b/vendor/github.com/containers/storage/types/storage_test.conf
@@ -25,16 +25,6 @@ rootless_storage_path = "$HOME/$UID/containers/storage"
 additionalimagestores = [
 ]
 
-# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
-# a container, to the UIDs/GIDs as they should appear outside of the container,
-# and the length of the range of UIDs/GIDs.  Additional mapped sets can be
-# listed and will be heeded by libraries, but there are limits to the number of
-# mappings which the kernel will allow when you later attempt to run a
-# container.
-#
-remap-uids = "0:1000000000:30000"
-remap-gids = "0:1500000000:60000"
-
 [storage.options.overlay]
 
 # mountopt specifies comma separated list of extra mount options
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index b313a4728..73fcd2405 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -66,7 +66,10 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
 		return
 	}
 
-	ReloadConfigurationFile(configFile, storeOptions)
+	if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
+		logrus.Warningf("Failed to reload %q %v\n", configFile, err)
+		return
+	}
 
 	prevReloadConfig.storeOptions = storeOptions
 	prevReloadConfig.mod = mtime
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 57120731b..09919394c 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -1,18 +1,21 @@
+//go:build linux
+
 package storage
 
 import (
 	"fmt"
 	"os"
 	"os/user"
-	"path/filepath"
 	"strconv"
 
 	drivers "github.com/containers/storage/drivers"
 	"github.com/containers/storage/pkg/idtools"
 	"github.com/containers/storage/pkg/unshare"
 	"github.com/containers/storage/types"
+	securejoin "github.com/cyphar/filepath-securejoin"
 	libcontainerUser "github.com/moby/sys/user"
 	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/unix"
 )
 
 // getAdditionalSubIDs looks up the additional IDs configured for
@@ -85,40 +88,59 @@ const nobodyUser = 65534
 // parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and
 // /etc/group files.
 func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
+	var (
+		passwd *os.File
+		group  *os.File
+		size   int
+		err    error
+	)
 	if passwdFile == "" {
-		passwdFile = filepath.Join(containerMount, "etc/passwd")
-	}
-	if groupFile == "" {
-		groupFile = filepath.Join(groupFile, "etc/group")
+		passwd, err = secureOpen(containerMount, "/etc/passwd")
+	} else {
+		// User-specified override from a volume. Will not be in
+		// container root.
+		passwd, err = os.Open(passwdFile)
 	}
-
-	size := 0
-
-	users, err := libcontainerUser.ParsePasswdFile(passwdFile)
 	if err == nil {
-		for _, u := range users {
-			// Skip the "nobody" user otherwise we end up with 65536
-			// ids with most images
-			if u.Name == "nobody" {
-				continue
-			}
-			if u.Uid > size && u.Uid != nobodyUser {
-				size = u.Uid
-			}
-			if u.Gid > size && u.Gid != nobodyUser {
-				size = u.Gid
+		defer passwd.Close()
+
+		users, err := libcontainerUser.ParsePasswd(passwd)
+		if err == nil {
+			for _, u := range users {
+				// Skip the "nobody" user otherwise we end up with 65536
+				// ids with most images
+				if u.Name == "nobody" || u.Name == "nogroup" {
+					continue
+				}
+				if u.Uid > size && u.Uid != nobodyUser {
+					size = u.Uid + 1
+				}
+				if u.Gid > size && u.Gid != nobodyUser {
+					size = u.Gid + 1
+				}
 			}
 		}
 	}
 
-	groups, err := libcontainerUser.ParseGroupFile(groupFile)
+	if groupFile == "" {
+		group, err = secureOpen(containerMount, "/etc/group")
+	} else {
+		// User-specified override from a volume. Will not be in
+		// container root.
+		group, err = os.Open(groupFile)
+	}
 	if err == nil {
-		for _, g := range groups {
-			if g.Name == "nobody" {
-				continue
-			}
-			if g.Gid > size && g.Gid != nobodyUser {
-				size = g.Gid
+		defer group.Close()
+
+		groups, err := libcontainerUser.ParseGroup(group)
+		if err == nil {
+			for _, g := range groups {
+				if g.Name == "nobody" || g.Name == "nogroup" {
+					continue
+				}
+				if g.Gid > size && g.Gid != nobodyUser {
+					size = g.Gid + 1
+				}
 			}
 		}
 	}
@@ -309,3 +331,14 @@ func getAutoUserNSIDMappings(
 	gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...)
 	return uidMap, gidMap, nil
 }
+
+// Securely open (read-only) a file in a container mount.
+func secureOpen(containerMount, file string) (*os.File, error) {
+	tmpFile, err := securejoin.OpenInRoot(containerMount, file)
+	if err != nil {
+		return nil, err
+	}
+	defer tmpFile.Close()
+
+	return securejoin.Reopen(tmpFile, unix.O_RDONLY)
+}
diff --git a/vendor/github.com/containers/storage/userns_unsupported.go b/vendor/github.com/containers/storage/userns_unsupported.go
new file mode 100644
index 000000000..e37c18fe4
--- /dev/null
+++ b/vendor/github.com/containers/storage/userns_unsupported.go
@@ -0,0 +1,14 @@
+//go:build !linux
+
+package storage
+
+import (
+	"errors"
+
+	"github.com/containers/storage/pkg/idtools"
+	"github.com/containers/storage/types"
+)
+
+func (s *store) getAutoUserNS(_ *types.AutoUserNsOptions, _ *Image, _ rwLayerStore, _ []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) {
+	return nil, nil, errors.New("user namespaces are not supported on this platform")
+}
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 5bade6ffe..c61d79837 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -2,6 +2,7 @@ package storage
 
 import (
 	"fmt"
+	"slices"
 
 	"github.com/containers/storage/types"
 )
@@ -41,22 +42,12 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
 		// remove given names from old names
 		result = make([]string, 0, len(oldNames))
 		for _, name := range oldNames {
-			// only keep names in final result which do not intersect with input names
-			// basically `result = oldNames - opParameters`
-			nameShouldBeRemoved := false
-			for _, opName := range opParameters {
-				if name == opName {
-					nameShouldBeRemoved = true
-				}
-			}
-			if !nameShouldBeRemoved {
+			if !slices.Contains(opParameters, name) {
 				result = append(result, name)
 			}
 		}
 	case addNames:
-		result = make([]string, 0, len(opParameters)+len(oldNames))
-		result = append(result, opParameters...)
-		result = append(result, oldNames...)
+		result = slices.Concat(opParameters, oldNames)
 	default:
 		return result, errInvalidUpdateNameOperation
 	}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
index 7436896e1..cb1252b53 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
+++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md
@@ -6,6 +6,72 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
 
 ## [Unreleased] ##
 
+## [0.3.6] - 2024-12-17 ##
+
+### Compatibility ###
+- The minimum Go version requirement for `filepath-securejoin` is now Go 1.18
+  (we use generics internally).
+
+  For reference, `filepath-securejoin@v0.3.0` somewhat-arbitrarily bumped the
+  Go version requirement to 1.21.
+
+  While we did make some use of Go 1.21 stdlib features (and in principle Go
+  versions <= 1.21 are no longer even supported by upstream anymore), some
+  downstreams have complained that the version bump has meant that they have to
+  do workarounds when backporting fixes that use the new `filepath-securejoin`
+  API onto old branches. This is not an ideal situation, but since using this
+  library is probably better for most downstreams than a hand-rolled
+  workaround, we now have compatibility shims that allow us to build on older
+  Go versions.
+- Lower minimum version requirement for `golang.org/x/sys` to `v0.18.0` (we
+  need the wrappers for `fsconfig(2)`), which should also make backporting
+  patches to older branches easier.
+
+## [0.3.5] - 2024-12-06 ##
+
+### Fixed ###
+- `MkdirAll` will now no longer return an `EEXIST` error if two racing
+  processes are creating the same directory. We will still verify that the path
+  is a directory, but this will avoid spurious errors when multiple threads or
+  programs are trying to `MkdirAll` the same path. opencontainers/runc#4543
+
+## [0.3.4] - 2024-10-09 ##
+
+### Fixed ###
+- Previously, some testing mocks we had resulted in us doing `import "testing"`
+  in non-`_test.go` code, which made some downstreams like Kubernetes unhappy.
+  This has been fixed. (#32)
+
+## [0.3.3] - 2024-09-30 ##
+
+### Fixed ###
+- The mode and owner verification logic in `MkdirAll` has been removed. This
+  was originally intended to protect against some theoretical attacks but upon
+  further consideration these protections don't actually buy us anything and
+  they were causing spurious errors with more complicated filesystem setups.
+- The "is the created directory empty" logic in `MkdirAll` has also been
+  removed. This was not causing us issues yet, but some pseudofilesystems (such
+  as `cgroup`) create non-empty directories and so this logic would've been
+  wrong for such cases.
+
+## [0.3.2] - 2024-09-13 ##
+
+### Changed ###
+- Passing the `S_ISUID` or `S_ISGID` modes to `MkdirAllInRoot` will now return
+  an explicit error saying that those bits are ignored by `mkdirat(2)`. In the
+  past a different error was returned, but since the silent ignoring behaviour
+  is codified in the man pages a more explicit error seems apt. While silently
+  ignoring these bits would be the most compatible option, it could lead to
+  users thinking their code sets these bits when it doesn't. Programs that need
+  to deal with compatibility can mask the bits themselves. (#23, #25)
+
+### Fixed ###
+- If a directory has `S_ISGID` set, then all child directories will have
+  `S_ISGID` set when created and a different gid will be used for any inode
+  created under the directory. Previously, the "expected owner and mode"
+  validation in `securejoin.MkdirAll` did not correctly handle this. We now
+  correctly handle this case. (#24, #25)
+
 ## [0.3.1] - 2024-07-23 ##
 
 ### Changed ###
@@ -127,7 +193,12 @@ This is our first release of `github.com/cyphar/filepath-securejoin`,
 containing a full implementation with a coverage of 93.5% (the only missing
 cases are the error cases, which are hard to mocktest at the moment).
 
-[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...HEAD
+[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...HEAD
+[0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6
+[0.3.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.4...v0.3.5
+[0.3.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.3...v0.3.4
+[0.3.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.2...v0.3.3
+[0.3.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...v0.3.2
 [0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1
 [0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0
 [0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5
diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md
index 253956f86..eaeb53fcd 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/README.md
+++ b/vendor/github.com/cyphar/filepath-securejoin/README.md
@@ -1,5 +1,6 @@
 ## `filepath-securejoin` ##
 
+[![Go Documentation](https://pkg.go.dev/badge/github.com/cyphar/filepath-securejoin.svg)](https://pkg.go.dev/github.com/cyphar/filepath-securejoin)
 [![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml)
 
 ### Old API ###
@@ -85,7 +86,7 @@ more secure. In particular:
   or avoid being tricked by a `/proc` that is not legitimate. This is done
   using [`openat2`][openat2.2] for all users, and privileged users will also be
   further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2]
-  (Linux 4.18 or later).
+  (Linux 5.2 or later).
 
 [openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html
 [fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
index 9e11b32fc..449d7e73a 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/VERSION
+++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION
@@ -1 +1 @@
-0.3.1
+0.3.6
diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go
new file mode 100644
index 000000000..1ec7d065e
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go
@@ -0,0 +1,39 @@
+// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package securejoin implements a set of helpers to make it easier to write Go
+// code that is safe against symlink-related escape attacks. The primary idea
+// is to let you resolve a path within a rootfs directory as if the rootfs was
+// a chroot.
+//
+// securejoin has two APIs, a "legacy" API and a "modern" API.
+//
+// The legacy API is [SecureJoin] and [SecureJoinVFS]. These methods are
+// **not** safe against race conditions where an attacker changes the
+// filesystem after (or during) the [SecureJoin] operation.
+//
+// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived
+// functions). These are safe against racing attackers and have several other
+// protections that are not provided by the legacy API. There are many more
+// operations that most programs expect to be able to do safely, but we do not
+// provide explicit support for them because we want to encourage users to
+// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a
+// cross-language next-generation library that is entirely designed around
+// operating on paths safely.
+//
+// securejoin has been used by several container runtimes (Docker, runc,
+// Kubernetes, etc) for quite a few years as a de-facto standard for operating
+// on container filesystem paths "safely". However, most users still use the
+// legacy API which is unsafe against various attacks (there is a fairly long
+// history of CVEs in dependent as a result). Users should switch to the modern
+// API as soon as possible (or even better, switch to libpathrs).
+//
+// This project was initially intended to be included in the Go standard
+// library, but [it was rejected](https://go.dev/issue/20126). There is now a
+// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API
+// that shares some of the goals of filepath-securejoin. However, that design
+// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the
+// usecase of container runtimes and most system tools.
+package securejoin
diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go
new file mode 100644
index 000000000..42452bbf9
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go
@@ -0,0 +1,18 @@
+//go:build linux && go1.20
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+	"fmt"
+)
+
+// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
+// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap)
+// is only guaranteed to give you baseErr.
+func wrapBaseError(baseErr, extraErr error) error {
+	return fmt.Errorf("%w: %w", extraErr, baseErr)
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go
new file mode 100644
index 000000000..e7adca3fd
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go
@@ -0,0 +1,38 @@
+//go:build linux && !go1.20
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+	"fmt"
+)
+
+type wrappedError struct {
+	inner   error
+	isError error
+}
+
+func (err wrappedError) Is(target error) bool {
+	return err.isError == target
+}
+
+func (err wrappedError) Unwrap() error {
+	return err.inner
+}
+
+func (err wrappedError) Error() string {
+	return fmt.Sprintf("%v: %v", err.isError, err.inner)
+}
+
+// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except
+// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap)
+// is only guaranteed to give you baseErr.
+func wrapBaseError(baseErr, extraErr error) error {
+	return wrappedError{
+		inner:   baseErr,
+		isError: extraErr,
+	}
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go
new file mode 100644
index 000000000..ddd6fa9a4
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go
@@ -0,0 +1,32 @@
+//go:build linux && go1.21
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+	"slices"
+	"sync"
+)
+
+func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S {
+	return slices.DeleteFunc(slice, delFn)
+}
+
+func slices_Contains[S ~[]E, E comparable](slice S, val E) bool {
+	return slices.Contains(slice, val)
+}
+
+func slices_Clone[S ~[]E, E any](slice S) S {
+	return slices.Clone(slice)
+}
+
+func sync_OnceValue[T any](f func() T) func() T {
+	return sync.OnceValue(f)
+}
+
+func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
+	return sync.OnceValues(f)
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go
new file mode 100644
index 000000000..f1e6fe7e7
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go
@@ -0,0 +1,124 @@
+//go:build linux && !go1.21
+
+// Copyright (C) 2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import (
+	"sync"
+)
+
+// These are very minimal implementations of functions that appear in Go 1.21's
+// stdlib, included so that we can build on older Go versions. Most are
+// borrowed directly from the stdlib, and a few are modified to be "obviously
+// correct" without needing to copy too many other helpers.
+
+// clearSlice is equivalent to the builtin clear from Go 1.21.
+// Copied from the Go 1.24 stdlib implementation.
+func clearSlice[S ~[]E, E any](slice S) {
+	var zero E
+	for i := range slice {
+		slice[i] = zero
+	}
+}
+
+// Copied from the Go 1.24 stdlib implementation.
+func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+	for i := range s {
+		if f(s[i]) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Copied from the Go 1.24 stdlib implementation.
+func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+	i := slices_IndexFunc(s, del)
+	if i == -1 {
+		return s
+	}
+	// Don't start copying elements until we find one to delete.
+	for j := i + 1; j < len(s); j++ {
+		if v := s[j]; !del(v) {
+			s[i] = v
+			i++
+		}
+	}
+	clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+	return s[:i]
+}
+
+// Similar to the stdlib slices.Contains, except that we don't have
+// slices.Index so we need to use slices.IndexFunc for this non-Func helper.
+func slices_Contains[S ~[]E, E comparable](s S, v E) bool {
+	return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0
+}
+
+// Copied from the Go 1.24 stdlib implementation.
+func slices_Clone[S ~[]E, E any](s S) S {
+	// Preserve nil in case it matters.
+	if s == nil {
+		return nil
+	}
+	return append(S([]E{}), s...)
+}
+
+// Copied from the Go 1.24 stdlib implementation.
+func sync_OnceValue[T any](f func() T) func() T {
+	var (
+		once   sync.Once
+		valid  bool
+		p      any
+		result T
+	)
+	g := func() {
+		defer func() {
+			p = recover()
+			if !valid {
+				panic(p)
+			}
+		}()
+		result = f()
+		f = nil
+		valid = true
+	}
+	return func() T {
+		once.Do(g)
+		if !valid {
+			panic(p)
+		}
+		return result
+	}
+}
+
+// Copied from the Go 1.24 stdlib implementation.
+func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
+	var (
+		once  sync.Once
+		valid bool
+		p     any
+		r1    T1
+		r2    T2
+	)
+	g := func() {
+		defer func() {
+			p = recover()
+			if !valid {
+				panic(p)
+			}
+		}()
+		r1, r2 = f()
+		f = nil
+		valid = true
+	}
+	return func() (T1, T2) {
+		once.Do(g)
+		if !valid {
+			panic(p)
+		}
+		return r1, r2
+	}
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
index bd86a48b0..e0ee3f2b5 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/join.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/join.go
@@ -3,11 +3,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package securejoin is an implementation of the hopefully-soon-to-be-included
-// SecureJoin helper that is meant to be part of the "path/filepath" package.
-// The purpose of this project is to provide a PoC implementation to make the
-// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
-// tangible.
 package securejoin
 
 import (
@@ -22,27 +17,27 @@ const maxSymlinkLimit = 255
 
 // IsNotExist tells you if err is an error that implies that either the path
 // accessed does not exist (or path components don't exist). This is
-// effectively a more broad version of os.IsNotExist.
+// effectively a more broad version of [os.IsNotExist].
 func IsNotExist(err error) bool {
 	// Check that it's not actually an ENOTDIR, which in some cases is a more
 	// convoluted case of ENOENT (usually involving weird paths).
 	return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
 }
 
-// SecureJoinVFS joins the two given path components (similar to Join) except
+// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except
 // that the returned path is guaranteed to be scoped inside the provided root
 // path (when evaluated). Any symbolic links in the path are evaluated with the
 // given root treated as the root of the filesystem, similar to a chroot. The
-// filesystem state is evaluated through the given VFS interface (if nil, the
-// standard os.* family of functions are used).
+// filesystem state is evaluated through the given [VFS] interface (if nil, the
+// standard [os].* family of functions are used).
 //
 // Note that the guarantees provided by this function only apply if the path
 // components in the returned string are not modified (in other words are not
 // replaced with symlinks on the filesystem) after this function has returned.
-// Such a symlink race is necessarily out-of-scope of SecureJoin.
+// Such a symlink race is necessarily out-of-scope of SecureJoinVFS.
 //
 // NOTE: Due to the above limitation, Linux users are strongly encouraged to
-// use OpenInRoot instead, which does safely protect against these kinds of
+// use [OpenInRoot] instead, which does safely protect against these kinds of
 // attacks. There is no way to solve this problem with SecureJoinVFS because
 // the API is fundamentally wrong (you cannot return a "safe" path string and
 // guarantee it won't be modified afterwards).
@@ -123,8 +118,8 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
 	return filepath.Join(root, finalPath), nil
 }
 
-// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
-// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
+// SecureJoin is a wrapper around [SecureJoinVFS] that just uses the [os].* library
+// of functions as the [VFS]. If in doubt, use this function over [SecureJoinVFS].
 func SecureJoin(root, unsafePath string) (string, error) {
 	return SecureJoinVFS(root, unsafePath, nil)
 }
diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
index 290befa15..be81e498d 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go
@@ -12,7 +12,6 @@ import (
 	"os"
 	"path"
 	"path/filepath"
-	"slices"
 	"strings"
 
 	"golang.org/x/sys/unix"
@@ -113,7 +112,7 @@ func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) erro
 		return nil
 	}
 	// Split the link target and clean up any "" parts.
-	linkTargetParts := slices.DeleteFunc(
+	linkTargetParts := slices_DeleteFunc(
 		strings.Split(linkTarget, "/"),
 		func(part string) bool { return part == "" || part == "." })
 
diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
index ad2bd7973..5e559bb7a 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go
@@ -9,10 +9,8 @@ package securejoin
 import (
 	"errors"
 	"fmt"
-	"io"
 	"os"
 	"path/filepath"
-	"slices"
 	"strings"
 
 	"golang.org/x/sys/unix"
@@ -23,29 +21,36 @@ var (
 	errPossibleAttack = errors.New("possible attack detected")
 )
 
-// MkdirAllHandle is equivalent to MkdirAll, except that it is safer to use in
-// two respects:
+// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use
+// in two respects:
 //
-//   - The caller provides the root directory as an *os.File (preferably O_PATH)
+//   - The caller provides the root directory as an *[os.File] (preferably O_PATH)
 //     handle. This means that the caller can be sure which root directory is
 //     being used. Note that this can be emulated by using /proc/self/fd/... as
-//     the root path with MkdirAll.
+//     the root path with [os.MkdirAll].
 //
-//   - Once all of the directories have been created, an *os.File (O_PATH) handle
+//   - Once all of the directories have been created, an *[os.File] O_PATH handle
 //     to the directory at unsafePath is returned to the caller. This is done in
 //     an effectively-race-free way (an attacker would only be able to swap the
 //     final directory component), which is not possible to emulate with
-//     MkdirAll.
+//     [MkdirAll].
 //
 // In addition, the returned handle is obtained far more efficiently than doing
-// a brand new lookup of unsafePath (such as with SecureJoin or openat2) after
-// doing MkdirAll. If you intend to open the directory after creating it, you
+// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after
+// doing [MkdirAll]. If you intend to open the directory after creating it, you
 // should use MkdirAllHandle.
 func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err error) {
 	// Make sure there are no os.FileMode bits set.
 	if mode&^0o7777 != 0 {
 		return nil, fmt.Errorf("%w for mkdir 0o%.3o", errInvalidMode, mode)
 	}
+	// On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid
+	// bits. We could also silently ignore them but since we have very few
+	// users it seems more prudent to return an error so users notice that
+	// these bits will not be set.
+	if mode&^0o1777 != 0 {
+		return nil, fmt.Errorf("%w for mkdir 0o%.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode)
+	}
 
 	// Try to open as much of the path as possible.
 	currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath)
@@ -87,7 +92,7 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
 	}
 
 	remainingParts := strings.Split(remainingPath, string(filepath.Separator))
-	if slices.Contains(remainingParts, "..") {
+	if slices_Contains(remainingParts, "..") {
 		// The path contained ".." components after the end of the "real"
 		// components. We could try to safely resolve ".." here but that would
 		// add a bunch of extra logic for something that it's not clear even
@@ -101,24 +106,6 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
 
 	// Make sure the mode doesn't have any type bits.
 	mode &^= unix.S_IFMT
-	// What properties do we expect any newly created directories to have?
-	var (
-		// While umask(2) is a per-thread property, and thus this value could
-		// vary between threads, a functioning Go program would LockOSThread
-		// threads with different umasks and so we don't need to LockOSThread
-		// for this entire mkdirat loop (if we are in the locked thread with a
-		// different umask, we are already locked and there's nothing for us to
-		// do -- and if not then it doesn't matter which thread we run on and
-		// there's nothing for us to do).
-		expectedMode = uint32(unix.S_IFDIR | (mode &^ getUmask()))
-
-		// We would want to get the fs[ug]id here, but we can't access those
-		// from userspace. In practice, nobody uses setfs[ug]id() anymore, so
-		// just use the effective [ug]id (which is equivalent to the fs[ug]id
-		// for programs that don't use setfs[ug]id).
-		expectedUid = uint32(unix.Geteuid())
-		expectedGid = uint32(unix.Getegid())
-	)
 
 	// Create the remaining components.
 	for _, part := range remainingParts {
@@ -129,13 +116,22 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
 		}
 
 		// NOTE: mkdir(2) will not follow trailing symlinks, so we can safely
-		// create the finaly component without worrying about symlink-exchange
+		// create the final component without worrying about symlink-exchange
 		// attacks.
-		if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil {
+		//
+		// If we get -EEXIST, it's possible that another program created the
+		// directory at the same time as us. In that case, just continue on as
+		// if we created it (if the created inode is not a directory, the
+		// following open call will fail).
+		if err := unix.Mkdirat(int(currentDir.Fd()), part, uint32(mode)); err != nil && !errors.Is(err, unix.EEXIST) {
 			err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err}
 			// Make the error a bit nicer if the directory is dead.
-			if err2 := isDeadInode(currentDir); err2 != nil {
-				err = fmt.Errorf("%w (%w)", err, err2)
+			if deadErr := isDeadInode(currentDir); deadErr != nil {
+				// TODO: Once we bump the minimum Go version to 1.20, we can use
+				// multiple %w verbs for this wrapping. For now we need to use a
+				// compatibility shim for older Go versions.
+				//err = fmt.Errorf("%w (%w)", err, deadErr)
+				err = wrapBaseError(err, deadErr)
 			}
 			return nil, err
 		}
@@ -157,40 +153,30 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
 		_ = currentDir.Close()
 		currentDir = nextDir
 
-		// Make sure that the directory matches what we expect. An attacker
-		// could have swapped the directory between us making it and opening
-		// it. There's no way for us to be sure that the directory is
-		// _precisely_ the same as the directory we created, but if we are in
-		// an empty directory with the same owner and mode as the one we
-		// created then there is nothing the attacker could do with this new
-		// directory that they couldn't do with the old one.
-		if stat, err := fstat(currentDir); err != nil {
-			return nil, fmt.Errorf("check newly created directory: %w", err)
-		} else {
-			if stat.Mode != expectedMode {
-				return nil, fmt.Errorf("%w: newly created directory %q has incorrect mode 0o%.3o (expected 0o%.3o)", errPossibleAttack, currentDir.Name(), stat.Mode, expectedMode)
-			}
-			if stat.Uid != expectedUid || stat.Gid != expectedGid {
-				return nil, fmt.Errorf("%w: newly created directory %q has incorrect owner %d:%d (expected %d:%d)", errPossibleAttack, currentDir.Name(), stat.Uid, stat.Gid, expectedUid, expectedGid)
-			}
-			// Check that the directory is empty. We only need to check for
-			// a single entry, and we should get EOF if the directory is
-			// empty.
-			_, err := currentDir.Readdirnames(1)
-			if !errors.Is(err, io.EOF) {
-				if err == nil {
-					err = fmt.Errorf("%w: newly created directory %q is non-empty", errPossibleAttack, currentDir.Name())
-				}
-				return nil, fmt.Errorf("check if newly created directory %q is empty: %w", currentDir.Name(), err)
-			}
-			// Reset the offset.
-			_, _ = currentDir.Seek(0, unix.SEEK_SET)
-		}
+		// It's possible that the directory we just opened was swapped by an
+		// attacker. Unfortunately there isn't much we can do to protect
+		// against this, and MkdirAll's behaviour is that we will reuse
+		// existing directories anyway so the need to protect against this is
+		// incredibly limited (and arguably doesn't even deserve mention here).
+		//
+		// Ideally we might want to check that the owner and mode match what we
+		// would've created -- unfortunately, it is non-trivial to verify that
+		// the owner and mode of the created directory match. While plain Unix
+		// DAC rules seem simple enough to emulate, there are a bunch of other
+		// factors that can change the mode or owner of created directories
+		// (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on
+		// filesystems like vfat, etc etc). We used to try to verify this but
+		// it just lead to a series of spurious errors.
+		//
+		// We could also check that the directory is non-empty, but
+		// unfortunately some pseduofilesystems (like cgroupfs) create
+		// non-empty directories, which would result in different spurious
+		// errors.
 	}
 	return currentDir, nil
 }
 
-// MkdirAll is a race-safe alternative to the Go stdlib's os.MkdirAll function,
+// MkdirAll is a race-safe alternative to the [os.MkdirAll] function,
 // where the new directory is guaranteed to be within the root directory (if an
 // attacker can move directories from inside the root to outside the root, the
 // created directory tree might be outside of the root but the key constraint
@@ -203,16 +189,16 @@ func MkdirAllHandle(root *os.File, unsafePath string, mode int) (_ *os.File, Err
 //	err := os.MkdirAll(path, mode)
 //
 // But is much safer. The above implementation is unsafe because if an attacker
-// can modify the filesystem tree between SecureJoin and MkdirAll, it is
+// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is
 // possible for MkdirAll to resolve unsafe symlink components and create
 // directories outside of the root.
 //
 // If you plan to open the directory after you have created it or want to use
-// an open directory handle as the root, you should use MkdirAllHandle instead.
-// This function is a wrapper around MkdirAllHandle.
+// an open directory handle as the root, you should use [MkdirAllHandle] instead.
+// This function is a wrapper around [MkdirAllHandle].
 //
 // NOTE: The mode argument must be set the unix mode bits (unix.S_I...), not
-// the Go generic mode bits (os.Mode...).
+// the Go generic mode bits ([os.FileMode]...).
 func MkdirAll(root, unsafePath string, mode int) error {
 	rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
 	if err != nil {
diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go
index 52dce76f3..230be73f0 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go
@@ -14,8 +14,8 @@ import (
 	"golang.org/x/sys/unix"
 )
 
-// OpenatInRoot is equivalent to OpenInRoot, except that the root is provided
-// using an *os.File handle, to ensure that the correct root directory is used.
+// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided
+// using an *[os.File] handle, to ensure that the correct root directory is used.
 func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) {
 	handle, err := completeLookupInRoot(root, unsafePath)
 	if err != nil {
@@ -31,7 +31,7 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) {
 //	handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC)
 //
 // But is much safer. The above implementation is unsafe because if an attacker
-// can modify the filesystem tree between SecureJoin and OpenFile, it is
+// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is
 // possible for the returned file to be outside of the root.
 //
 // Note that the returned handle is an O_PATH handle, meaning that only a very
@@ -39,7 +39,7 @@ func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) {
 // accidentally opening an untrusted file that could cause issues (such as a
 // disconnected TTY that could cause a DoS, or some other issue). In order to
 // use the returned handle, you can "upgrade" it to a proper handle using
-// Reopen.
+// [Reopen].
 func OpenInRoot(root, unsafePath string) (*os.File, error) {
 	rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
 	if err != nil {
@@ -49,7 +49,7 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) {
 	return OpenatInRoot(rootDir, unsafePath)
 }
 
-// Reopen takes an *os.File handle and re-opens it through /proc/self/fd.
+// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd.
 // Reopen(file, flags) is effectively equivalent to
 //
 //	fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd())
@@ -59,7 +59,9 @@ func OpenInRoot(root, unsafePath string) (*os.File, error) {
 // maliciously-configured /proc mount. While this attack scenario is not
 // common, in container runtimes it is possible for higher-level runtimes to be
 // tricked into configuring an unsafe /proc that can be used to attack file
-// operations. See CVE-2019-19921 for more details.
+// operations. See [CVE-2019-19921] for more details.
+//
+// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw
 func Reopen(handle *os.File, flags int) (*os.File, error) {
 	procRoot, err := getProcRoot()
 	if err != nil {
diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
index 921b3e1d4..f7a13e69c 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go
@@ -12,35 +12,21 @@ import (
 	"os"
 	"path/filepath"
 	"strings"
-	"sync"
-	"testing"
 
 	"golang.org/x/sys/unix"
 )
 
-var (
-	hasOpenat2Bool bool
-	hasOpenat2Once sync.Once
-
-	testingForceHasOpenat2 *bool
-)
-
-func hasOpenat2() bool {
-	if testing.Testing() && testingForceHasOpenat2 != nil {
-		return *testingForceHasOpenat2
-	}
-	hasOpenat2Once.Do(func() {
-		fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{
-			Flags:   unix.O_PATH | unix.O_CLOEXEC,
-			Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT,
-		})
-		if err == nil {
-			hasOpenat2Bool = true
-			_ = unix.Close(fd)
-		}
+var hasOpenat2 = sync_OnceValue(func() bool {
+	fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{
+		Flags:   unix.O_PATH | unix.O_CLOEXEC,
+		Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT,
 	})
-	return hasOpenat2Bool
-}
+	if err != nil {
+		return false
+	}
+	_ = unix.Close(fd)
+	return true
+})
 
 func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool {
 	// RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve
diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
index adf0bd08f..809a579cb 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go
@@ -12,7 +12,6 @@ import (
 	"os"
 	"runtime"
 	"strconv"
-	"sync"
 
 	"golang.org/x/sys/unix"
 )
@@ -54,33 +53,26 @@ func verifyProcRoot(procRoot *os.File) error {
 	return nil
 }
 
-var (
-	hasNewMountApiBool bool
-	hasNewMountApiOnce sync.Once
-)
-
-func hasNewMountApi() bool {
-	hasNewMountApiOnce.Do(func() {
-		// All of the pieces of the new mount API we use (fsopen, fsconfig,
-		// fsmount, open_tree) were added together in Linux 5.1[1,2], so we can
-		// just check for one of the syscalls and the others should also be
-		// available.
-		//
-		// Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE.
-		// This is equivalent to openat(2), but tells us if open_tree is
-		// available (and thus all of the other basic new mount API syscalls).
-		// open_tree(2) is most light-weight syscall to test here.
-		//
-		// [1]: merge commit 400913252d09
-		// [2]: <https://lore.kernel.org/lkml/153754740781.17872.7869536526927736855.stgit@warthog.procyon.org.uk/>
-		fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC)
-		if err == nil {
-			hasNewMountApiBool = true
-			_ = unix.Close(fd)
-		}
-	})
-	return hasNewMountApiBool
-}
+var hasNewMountApi = sync_OnceValue(func() bool {
+	// All of the pieces of the new mount API we use (fsopen, fsconfig,
+	// fsmount, open_tree) were added together in Linux 5.1[1,2], so we can
+	// just check for one of the syscalls and the others should also be
+	// available.
+	//
+	// Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE.
+	// This is equivalent to openat(2), but tells us if open_tree is
+	// available (and thus all of the other basic new mount API syscalls).
+	// open_tree(2) is most light-weight syscall to test here.
+	//
+	// [1]: merge commit 400913252d09
+	// [2]: <https://lore.kernel.org/lkml/153754740781.17872.7869536526927736855.stgit@warthog.procyon.org.uk/>
+	fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC)
+	if err != nil {
+		return false
+	}
+	_ = unix.Close(fd)
+	return true
+})
 
 func fsopen(fsName string, flags int) (*os.File, error) {
 	// Make sure we always set O_CLOEXEC.
@@ -141,7 +133,7 @@ func clonePrivateProcMount() (_ *os.File, Err error) {
 	// we can be sure there are no over-mounts and so if the root is valid then
 	// we're golden. Otherwise, we have to deal with over-mounts.
 	procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE)
-	if err != nil || testingForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) {
+	if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) {
 		procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE)
 	}
 	if err != nil {
@@ -159,27 +151,19 @@ func clonePrivateProcMount() (_ *os.File, Err error) {
 }
 
 func privateProcRoot() (*os.File, error) {
-	if !hasNewMountApi() || testingForceGetProcRootUnsafe() {
+	if !hasNewMountApi() || hookForceGetProcRootUnsafe() {
 		return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP)
 	}
 	// Try to create a new procfs mount from scratch if we can. This ensures we
 	// can get a procfs mount even if /proc is fake (for whatever reason).
 	procRoot, err := newPrivateProcMount()
-	if err != nil || testingForcePrivateProcRootOpenTree(procRoot) {
+	if err != nil || hookForcePrivateProcRootOpenTree(procRoot) {
 		// Try to clone /proc then...
 		procRoot, err = clonePrivateProcMount()
 	}
 	return procRoot, err
 }
 
-var (
-	procRootHandle *os.File
-	procRootError  error
-	procRootOnce   sync.Once
-
-	errUnsafeProcfs = errors.New("unsafe procfs detected")
-)
-
 func unsafeHostProcRoot() (_ *os.File, Err error) {
 	procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0)
 	if err != nil {
@@ -207,17 +191,15 @@ func doGetProcRoot() (*os.File, error) {
 	return procRoot, err
 }
 
-func getProcRoot() (*os.File, error) {
-	procRootOnce.Do(func() {
-		procRootHandle, procRootError = doGetProcRoot()
-	})
-	return procRootHandle, procRootError
-}
+var getProcRoot = sync_OnceValues(func() (*os.File, error) {
+	return doGetProcRoot()
+})
 
-var (
-	haveProcThreadSelf     bool
-	haveProcThreadSelfOnce sync.Once
-)
+var hasProcThreadSelf = sync_OnceValue(func() bool {
+	return unix.Access("/proc/thread-self/", unix.F_OK) == nil
+})
+
+var errUnsafeProcfs = errors.New("unsafe procfs detected")
 
 type procThreadSelfCloser func()
 
@@ -230,13 +212,6 @@ type procThreadSelfCloser func()
 // This is similar to ProcThreadSelf from runc, but with extra hardening
 // applied and using *os.File.
 func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) {
-	haveProcThreadSelfOnce.Do(func() {
-		// If the kernel doesn't support thread-self, it doesn't matter which
-		// /proc handle we use.
-		_, err := fstatatFile(procRoot, "thread-self", unix.AT_SYMLINK_NOFOLLOW)
-		haveProcThreadSelf = (err == nil)
-	})
-
 	// We need to lock our thread until the caller is done with the handle
 	// because between getting the handle and using it we could get interrupted
 	// by the Go runtime and hit the case where the underlying thread is
@@ -251,10 +226,10 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread
 
 	// Figure out what prefix we want to use.
 	threadSelf := "thread-self/"
-	if !haveProcThreadSelf || testingForceProcSelfTask() {
+	if !hasProcThreadSelf() || hookForceProcSelfTask() {
 		/// Pre-3.17 kernels don't have /proc/thread-self, so do it manually.
 		threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/"
-		if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || testingForceProcSelf() {
+		if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() {
 			// In this case, we running in a pid namespace that doesn't match
 			// the /proc mount we have. This can happen inside runc.
 			//
@@ -275,7 +250,7 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread
 		// absolutely sure we are operating on a clean /proc handle that
 		// doesn't have any cheeky overmounts that could trick us (including
 		// symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't
-		// stricly needed, but just use it since we have it.
+		// strictly needed, but just use it since we have it.
 		//
 		// NOTE: /proc/self is technically a magic-link (the contents of the
 		//       symlink are generated dynamically), but it doesn't use
@@ -289,12 +264,20 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread
 			Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS,
 		})
 		if err != nil {
-			return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err)
+			// TODO: Once we bump the minimum Go version to 1.20, we can use
+			// multiple %w verbs for this wrapping. For now we need to use a
+			// compatibility shim for older Go versions.
+			//err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
+			return nil, nil, wrapBaseError(err, errUnsafeProcfs)
 		}
 	} else {
 		handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0)
 		if err != nil {
-			return nil, nil, fmt.Errorf("%w: %w", errUnsafeProcfs, err)
+			// TODO: Once we bump the minimum Go version to 1.20, we can use
+			// multiple %w verbs for this wrapping. For now we need to use a
+			// compatibility shim for older Go versions.
+			//err = fmt.Errorf("%w: %w", errUnsafeProcfs, err)
+			return nil, nil, wrapBaseError(err, errUnsafeProcfs)
 		}
 		defer func() {
 			if Err != nil {
@@ -313,24 +296,21 @@ func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThread
 	return handle, runtime.UnlockOSThread, nil
 }
 
-var (
-	hasStatxMountIdBool bool
-	hasStatxMountIdOnce sync.Once
-)
+// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to
+// avoid bumping the requirement for a single constant we can just define it
+// ourselves.
+const STATX_MNT_ID_UNIQUE = 0x4000
 
-func hasStatxMountId() bool {
-	hasStatxMountIdOnce.Do(func() {
-		var (
-			stx unix.Statx_t
-			// We don't care which mount ID we get. The kernel will give us the
-			// unique one if it is supported.
-			wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
-		)
-		err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx)
-		hasStatxMountIdBool = (err == nil && (stx.Mask&wantStxMask != 0))
-	})
-	return hasStatxMountIdBool
-}
+var hasStatxMountId = sync_OnceValue(func() bool {
+	var (
+		stx unix.Statx_t
+		// We don't care which mount ID we get. The kernel will give us the
+		// unique one if it is supported.
+		wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
+	)
+	err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx)
+	return err == nil && stx.Mask&wantStxMask != 0
+})
 
 func getMountId(dir *os.File, path string) (uint64, error) {
 	// If we don't have statx(STATX_MNT_ID*) support, we can't do anything.
@@ -342,7 +322,7 @@ func getMountId(dir *os.File, path string) (uint64, error) {
 		stx unix.Statx_t
 		// We don't care which mount ID we get. The kernel will give us the
 		// unique one if it is supported.
-		wantStxMask uint32 = unix.STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
+		wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID
 	)
 
 	err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx)
@@ -443,22 +423,6 @@ func isDeadInode(file *os.File) error {
 	return nil
 }
 
-func getUmask() int {
-	// umask is a per-thread property, but it is inherited by children, so we
-	// need to lock our OS thread to make sure that no other goroutine runs in
-	// this thread and no goroutines are spawned from this thread until we
-	// revert to the old umask.
-	//
-	// We could parse /proc/self/status to avoid this get-set problem, but
-	// /proc/thread-self requires LockOSThread anyway, so there's no real
-	// benefit over just using umask(2).
-	runtime.LockOSThread()
-	umask := unix.Umask(0)
-	unix.Umask(umask)
-	runtime.UnlockOSThread()
-	return umask
-}
-
 func checkProcSelfFdPath(path string, file *os.File) error {
 	if err := isDeadInode(file); err != nil {
 		return err
@@ -472,3 +436,17 @@ func checkProcSelfFdPath(path string, file *os.File) error {
 	}
 	return nil
 }
+
+// Test hooks used in the procfs tests to verify that the fallback logic works.
+// See testing_mocks_linux_test.go and procfs_linux_test.go for more details.
+var (
+	hookForcePrivateProcRootOpenTree            = hookDummyFile
+	hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile
+	hookForceGetProcRootUnsafe                  = hookDummy
+
+	hookForceProcSelfTask = hookDummy
+	hookForceProcSelf     = hookDummy
+)
+
+func hookDummy() bool               { return false }
+func hookDummyFile(_ *os.File) bool { return false }
diff --git a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go b/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go
deleted file mode 100644
index a3aedf03d..000000000
--- a/vendor/github.com/cyphar/filepath-securejoin/testing_mocks_linux.go
+++ /dev/null
@@ -1,68 +0,0 @@
-//go:build linux
-
-// Copyright (C) 2024 SUSE LLC. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package securejoin
-
-import (
-	"os"
-	"testing"
-)
-
-type forceGetProcRootLevel int
-
-const (
-	forceGetProcRootDefault             forceGetProcRootLevel = iota
-	forceGetProcRootOpenTree                                  // force open_tree()
-	forceGetProcRootOpenTreeAtRecursive                       // force open_tree(AT_RECURSIVE)
-	forceGetProcRootUnsafe                                    // force open()
-)
-
-var testingForceGetProcRoot *forceGetProcRootLevel
-
-func testingCheckClose(check bool, f *os.File) bool {
-	if check {
-		if f != nil {
-			_ = f.Close()
-		}
-		return true
-	}
-	return false
-}
-
-func testingForcePrivateProcRootOpenTree(f *os.File) bool {
-	return testing.Testing() && testingForceGetProcRoot != nil &&
-		testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTree, f)
-}
-
-func testingForcePrivateProcRootOpenTreeAtRecursive(f *os.File) bool {
-	return testing.Testing() && testingForceGetProcRoot != nil &&
-		testingCheckClose(*testingForceGetProcRoot >= forceGetProcRootOpenTreeAtRecursive, f)
-}
-
-func testingForceGetProcRootUnsafe() bool {
-	return testing.Testing() && testingForceGetProcRoot != nil &&
-		*testingForceGetProcRoot >= forceGetProcRootUnsafe
-}
-
-type forceProcThreadSelfLevel int
-
-const (
-	forceProcThreadSelfDefault forceProcThreadSelfLevel = iota
-	forceProcSelfTask
-	forceProcSelf
-)
-
-var testingForceProcThreadSelf *forceProcThreadSelfLevel
-
-func testingForceProcSelfTask() bool {
-	return testing.Testing() && testingForceProcThreadSelf != nil &&
-		*testingForceProcThreadSelf >= forceProcSelfTask
-}
-
-func testingForceProcSelf() bool {
-	return testing.Testing() && testingForceProcThreadSelf != nil &&
-		*testingForceProcThreadSelf >= forceProcSelf
-}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
index 6e27c7dd8..36373f8c5 100644
--- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go
+++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
@@ -10,19 +10,19 @@ import "os"
 // are several projects (umoci and go-mtree) that are using this sort of
 // interface.
 
-// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
-// equivalent to using the standard os.* family of functions. This is mainly
+// VFS is the minimal interface necessary to use [SecureJoinVFS]. A nil VFS is
+// equivalent to using the standard [os].* family of functions. This is mainly
 // used for the purposes of mock testing, but also can be used to otherwise use
-// SecureJoin with VFS-like system.
+// [SecureJoinVFS] with VFS-like system.
 type VFS interface {
-	// Lstat returns a FileInfo describing the named file. If the file is a
-	// symbolic link, the returned FileInfo describes the symbolic link. Lstat
-	// makes no attempt to follow the link. These semantics are identical to
-	// os.Lstat.
+	// Lstat returns an [os.FileInfo] describing the named file. If the
+	// file is a symbolic link, the returned [os.FileInfo] describes the
+	// symbolic link. Lstat makes no attempt to follow the link.
+	// The semantics are identical to [os.Lstat].
 	Lstat(name string) (os.FileInfo, error)
 
-	// Readlink returns the destination of the named symbolic link. These
-	// semantics are identical to os.Readlink.
+	// Readlink returns the destination of the named symbolic link.
+	// The semantics are identical to [os.Readlink].
 	Readlink(name string) (string, error)
 }
 
@@ -30,12 +30,6 @@ type VFS interface {
 // module.
 type osVFS struct{}
 
-// Lstat returns a FileInfo describing the named file. If the file is a
-// symbolic link, the returned FileInfo describes the symbolic link. Lstat
-// makes no attempt to follow the link. These semantics are identical to
-// os.Lstat.
 func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
 
-// Readlink returns the destination of the named symbolic link. These
-// semantics are identical to os.Readlink.
 func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
deleted file mode 100644
index 2c3ebe165..000000000
--- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package challenge
-
-import (
-	"net/url"
-	"strings"
-)
-
-// FROM: https://golang.org/src/net/http/http.go
-// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
-// return true if the string includes a port.
-func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
-
-// FROM: http://golang.org/src/net/http/transport.go
-var portMap = map[string]string{
-	"http":  "80",
-	"https": "443",
-}
-
-// canonicalAddr returns url.Host but always with a ":port" suffix
-// FROM: http://golang.org/src/net/http/transport.go
-func canonicalAddr(url *url.URL) string {
-	addr := url.Host
-	if !hasPort(addr) {
-		return addr + ":" + portMap[url.Scheme]
-	}
-	return addr
-}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
deleted file mode 100644
index fe238210c..000000000
--- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package challenge
-
-import (
-	"fmt"
-	"net/http"
-	"net/url"
-	"strings"
-	"sync"
-)
-
-// Challenge carries information from a WWW-Authenticate response header.
-// See RFC 2617.
-type Challenge struct {
-	// Scheme is the auth-scheme according to RFC 2617
-	Scheme string
-
-	// Parameters are the auth-params according to RFC 2617
-	Parameters map[string]string
-}
-
-// Manager manages the challenges for endpoints.
-// The challenges are pulled out of HTTP responses. Only
-// responses which expect challenges should be added to
-// the manager, since a non-unauthorized request will be
-// viewed as not requiring challenges.
-type Manager interface {
-	// GetChallenges returns the challenges for the given
-	// endpoint URL.
-	GetChallenges(endpoint url.URL) ([]Challenge, error)
-
-	// AddResponse adds the response to the challenge
-	// manager. The challenges will be parsed out of
-	// the WWW-Authenicate headers and added to the
-	// URL which was produced the response. If the
-	// response was authorized, any challenges for the
-	// endpoint will be cleared.
-	AddResponse(resp *http.Response) error
-}
-
-// NewSimpleManager returns an instance of
-// Manger which only maps endpoints to challenges
-// based on the responses which have been added the
-// manager. The simple manager will make no attempt to
-// perform requests on the endpoints or cache the responses
-// to a backend.
-func NewSimpleManager() Manager {
-	return &simpleManager{
-		Challenges: make(map[string][]Challenge),
-	}
-}
-
-type simpleManager struct {
-	sync.RWMutex
-	Challenges map[string][]Challenge
-}
-
-func normalizeURL(endpoint *url.URL) {
-	endpoint.Host = strings.ToLower(endpoint.Host)
-	endpoint.Host = canonicalAddr(endpoint)
-}
-
-func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
-	normalizeURL(&endpoint)
-
-	m.RLock()
-	defer m.RUnlock()
-	challenges := m.Challenges[endpoint.String()]
-	return challenges, nil
-}
-
-func (m *simpleManager) AddResponse(resp *http.Response) error {
-	challenges := ResponseChallenges(resp)
-	if resp.Request == nil {
-		return fmt.Errorf("missing request reference")
-	}
-	urlCopy := url.URL{
-		Path:   resp.Request.URL.Path,
-		Host:   resp.Request.URL.Host,
-		Scheme: resp.Request.URL.Scheme,
-	}
-	normalizeURL(&urlCopy)
-
-	m.Lock()
-	defer m.Unlock()
-	m.Challenges[urlCopy.String()] = challenges
-	return nil
-}
-
-// Octet types from RFC 2616.
-type octetType byte
-
-var octetTypes [256]octetType
-
-const (
-	isToken octetType = 1 << iota
-	isSpace
-)
-
-func init() {
-	// OCTET      = <any 8-bit sequence of data>
-	// CHAR       = <any US-ASCII character (octets 0 - 127)>
-	// CTL        = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
-	// CR         = <US-ASCII CR, carriage return (13)>
-	// LF         = <US-ASCII LF, linefeed (10)>
-	// SP         = <US-ASCII SP, space (32)>
-	// HT         = <US-ASCII HT, horizontal-tab (9)>
-	// <">        = <US-ASCII double-quote mark (34)>
-	// CRLF       = CR LF
-	// LWS        = [CRLF] 1*( SP | HT )
-	// TEXT       = <any OCTET except CTLs, but including LWS>
-	// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
-	//              | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
-	// token      = 1*<any CHAR except CTLs or separators>
-	// qdtext     = <any TEXT except <">>
-
-	for c := 0; c < 256; c++ {
-		var t octetType
-		isCtl := c <= 31 || c == 127
-		isChar := 0 <= c && c <= 127
-		isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
-		if strings.ContainsRune(" \t\r\n", rune(c)) {
-			t |= isSpace
-		}
-		if isChar && !isCtl && !isSeparator {
-			t |= isToken
-		}
-		octetTypes[c] = t
-	}
-}
-
-// ResponseChallenges returns a list of authorization challenges
-// for the given http Response. Challenges are only checked if
-// the response status code was a 401.
-func ResponseChallenges(resp *http.Response) []Challenge {
-	if resp.StatusCode == http.StatusUnauthorized {
-		// Parse the WWW-Authenticate Header and store the challenges
-		// on this endpoint object.
-		return parseAuthHeader(resp.Header)
-	}
-
-	return nil
-}
-
-func parseAuthHeader(header http.Header) []Challenge {
-	challenges := []Challenge{}
-	for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
-		v, p := parseValueAndParams(h)
-		if v != "" {
-			challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
-		}
-	}
-	return challenges
-}
-
-func parseValueAndParams(header string) (value string, params map[string]string) {
-	params = make(map[string]string)
-	value, s := expectToken(header)
-	if value == "" {
-		return
-	}
-	value = strings.ToLower(value)
-	s = "," + skipSpace(s)
-	for strings.HasPrefix(s, ",") {
-		var pkey string
-		pkey, s = expectToken(skipSpace(s[1:]))
-		if pkey == "" {
-			return
-		}
-		if !strings.HasPrefix(s, "=") {
-			return
-		}
-		var pvalue string
-		pvalue, s = expectTokenOrQuoted(s[1:])
-		if pvalue == "" {
-			return
-		}
-		pkey = strings.ToLower(pkey)
-		params[pkey] = pvalue
-		s = skipSpace(s)
-	}
-	return
-}
-
-func skipSpace(s string) (rest string) {
-	i := 0
-	for ; i < len(s); i++ {
-		if octetTypes[s[i]]&isSpace == 0 {
-			break
-		}
-	}
-	return s[i:]
-}
-
-func expectToken(s string) (token, rest string) {
-	i := 0
-	for ; i < len(s); i++ {
-		if octetTypes[s[i]]&isToken == 0 {
-			break
-		}
-	}
-	return s[:i], s[i:]
-}
-
-func expectTokenOrQuoted(s string) (value string, rest string) {
-	if !strings.HasPrefix(s, "\"") {
-		return expectToken(s)
-	}
-	s = s[1:]
-	for i := 0; i < len(s); i++ {
-		switch s[i] {
-		case '"':
-			return s[:i], s[i+1:]
-		case '\\':
-			p := make([]byte, len(s)-1)
-			j := copy(p, s[:i])
-			escape := true
-			for i = i + 1; i < len(s); i++ {
-				b := s[i]
-				switch {
-				case escape:
-					escape = false
-					p[j] = b
-					j++
-				case b == '\\':
-					escape = true
-				case b == '"':
-					return string(p[:j]), s[i+1:]
-				default:
-					p[j] = b
-					j++
-				}
-			}
-			return "", ""
-		}
-	}
-	return "", ""
-}
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 7164e1eba..142bb038a 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -1195,6 +1195,7 @@ definitions:
               - "default"
               - "process"
               - "hyperv"
+              - ""
           MaskedPaths:
             type: "array"
             description: |
@@ -4180,6 +4181,7 @@ definitions:
               - "default"
               - "process"
               - "hyperv"
+              - ""
           Init:
             description: |
               Run an init inside the container that forwards signals and reaps
@@ -5750,6 +5752,7 @@ definitions:
           - "default"
           - "hyperv"
           - "process"
+          - ""
       InitBinary:
         description: |
           Name and, optional, path of the `docker-init` binary.
@@ -5820,8 +5823,6 @@ definitions:
           type: "string"
         example:
           - "WARNING: No memory limit support"
-          - "WARNING: bridge-nf-call-iptables is disabled"
-          - "WARNING: bridge-nf-call-ip6tables is disabled"
       CDISpecDirs:
         description: |
           List of directories where (Container Device Interface) CDI
@@ -7876,10 +7877,12 @@ paths:
           type: "string"
         - name: "h"
           in: "query"
+          required: true
           description: "Height of the TTY session in characters"
           type: "integer"
         - name: "w"
           in: "query"
+          required: true
           description: "Width of the TTY session in characters"
           type: "integer"
       tags: ["Container"]
@@ -9244,6 +9247,19 @@ paths:
             all tags of the given image that are present in the local image store
             are pushed.
           type: "string"
+        - name: "platform"
+          type: "string"
+          in: "query"
+          description: |
+            JSON-encoded OCI platform to select the platform-variant to push.
+            If not provided, all available variants will attempt to be pushed.
+
+            If the daemon provides a multi-platform image store, this selects
+            the platform-variant to push to the registry. If the image is
+            a single-platform image, or if the multi-platform image does not
+            provide a variant matching the given platform, an error is returned.
+
+            Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
         - name: "X-Registry-Auth"
           in: "header"
           description: |
@@ -9253,11 +9269,6 @@ paths:
             details.
           type: "string"
           required: true
-        - name: "platform"
-          in: "query"
-          description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)"
-          type: "string"
-          x-nullable: true
       tags: ["Image"]
   /images/{name}/tag:
     post:
@@ -9553,7 +9564,7 @@ paths:
             type: "string"
             example: "OK"
           headers:
-            API-Version:
+            Api-Version:
               type: "string"
               description: "Max API Version the server supports"
             Builder-Version:
@@ -9609,7 +9620,7 @@ paths:
             type: "string"
             example: "(empty)"
           headers:
-            API-Version:
+            Api-Version:
               type: "string"
               description: "Max API Version the server supports"
             Builder-Version:
@@ -10203,10 +10214,12 @@ paths:
           type: "string"
         - name: "h"
           in: "query"
+          required: true
           description: "Height of the TTY session in characters"
           type: "integer"
         - name: "w"
           in: "query"
+          required: true
           description: "Width of the TTY session in characters"
           type: "integer"
       tags: ["Exec"]
@@ -11622,6 +11635,7 @@ paths:
             example:
               ListenAddr: "0.0.0.0:2377"
               AdvertiseAddr: "192.168.1.1:2377"
+              DataPathAddr: "192.168.1.1"
               RemoteAddrs:
                 - "node1:2377"
               JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
index 03648fb7b..83198305e 100644
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go
+++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go
@@ -10,7 +10,7 @@ import (
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/strslice"
 	"github.com/docker/go-connections/nat"
-	units "github.com/docker/go-units"
+	"github.com/docker/go-units"
 )
 
 // CgroupnsMode represents the cgroup namespace mode of the container
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
index fe99b7439..ea55813e6 100644
--- a/vendor/github.com/docker/docker/api/types/types.go
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -484,4 +484,6 @@ type BuildCachePruneOptions struct {
 	All         bool
 	KeepStorage int64
 	Filters     filters.Args
+
+	// FIXME(thaJeztah): add new options; see https://github.com/moby/moby/issues/48639
 }
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index 60d91bc65..46832d8a4 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -2,7 +2,7 @@
 Package client is a Go client for the Docker Engine API.
 
 For more information about the Engine API, see the documentation:
-https://docs.docker.com/engine/api/
+https://docs.docker.com/reference/api/engine/
 
 # Usage
 
@@ -247,6 +247,14 @@ func (cli *Client) tlsConfig() *tls.Config {
 
 func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
 	transport := &http.Transport{}
+	// Necessary to prevent long-lived processes using the
+	// client from leaking connections due to idle connections
+	// not being released.
+	// TODO: see if we can also address this from the server side,
+	// or in go-connections.
+	// see: https://github.com/moby/moby/issues/45539
+	transport.MaxIdleConns = 6
+	transport.IdleConnTimeout = 30 * time.Second
 	err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
index bf3e9b1cd..7c43268b3 100644
--- a/vendor/github.com/docker/docker/client/ping.go
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -56,8 +56,8 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) {
 		err := cli.checkResponseErr(resp)
 		return ping, errdefs.FromStatusCode(err, resp.statusCode)
 	}
-	ping.APIVersion = resp.header.Get("API-Version")
-	ping.OSType = resp.header.Get("OSType")
+	ping.APIVersion = resp.header.Get("Api-Version")
+	ping.OSType = resp.header.Get("Ostype")
 	if resp.header.Get("Docker-Experimental") == "true" {
 		ping.Experimental = true
 	}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
index 8d2c8857f..037327b90 100644
--- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -7,7 +7,7 @@ import (
 	"strings"
 	"time"
 
-	units "github.com/docker/go-units"
+	"github.com/docker/go-units"
 	"github.com/moby/term"
 	"github.com/morikuni/aec"
 )
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index a22953805..4528059ca 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,5 +1,5 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
 before:
   hooks:
     - ./gen.sh
@@ -99,7 +99,7 @@ archives:
 checksum:
   name_template: 'checksums.txt'
 snapshot:
-  name_template: "{{ .Tag }}-next"
+  version_template: "{{ .Tag }}-next"
 changelog:
   sort: asc
   filters:
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 684a30853..de264c85a 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,13 @@ This package provides various compression algorithms.
 
 # changelog
 
+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
+	* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
+	* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
+	* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
+	* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
+	* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
+
 * Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
 	* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
 	* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index a79c4a527..8f8223cd3 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,6 +6,7 @@ package zstd
 
 import (
 	"crypto/rand"
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
 // and write CRC if requested.
 func (e *Encoder) Write(p []byte) (n int, err error) {
 	s := &e.state
+	if s.eofWritten {
+		return 0, ErrEncoderClosed
+	}
 	for len(p) > 0 {
 		if len(p)+len(s.filling) < e.o.blockSize {
 			if e.o.crc {
@@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
 	s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
 	s.nInput += int64(len(s.current))
 	s.wg.Add(1)
+	if final {
+		s.eofWritten = true
+	}
 	go func(src []byte) {
 		if debugEncoder {
 			println("Adding block,", len(src), "bytes, final:", final)
@@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
 		blk := enc.Block()
 		enc.Encode(blk, src)
 		blk.last = final
-		if final {
-			s.eofWritten = true
-		}
 		// Wait for pending writes.
 		s.wWg.Wait()
 		if s.writeErr != nil {
@@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
 	if len(s.filling) > 0 {
 		err := e.nextBlock(false)
 		if err != nil {
+			// Ignore Flush after Close.
+			if errors.Is(s.err, ErrEncoderClosed) {
+				return nil
+			}
 			return err
 		}
 	}
 	s.wg.Wait()
 	s.wWg.Wait()
 	if s.err != nil {
+		// Ignore Flush after Close.
+		if errors.Is(s.err, ErrEncoderClosed) {
+			return nil
+		}
 		return s.err
 	}
 	return s.writeErr
@@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
 	}
 	err := e.nextBlock(true)
 	if err != nil {
+		if errors.Is(s.err, ErrEncoderClosed) {
+			return nil
+		}
 		return err
 	}
 	if s.frameContentSize > 0 {
@@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
 		}
 		_, s.err = s.w.Write(frame)
 	}
+	if s.err == nil {
+		s.err = ErrEncoderClosed
+		return nil
+	}
+
 	return s.err
 }
 
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 4be7cc736..066bef2a4 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -88,6 +88,10 @@ var (
 	// Close has been called.
 	ErrDecoderClosed = errors.New("decoder used after Close")
 
+	// ErrEncoderClosed will be returned if the Encoder was used after
+	// Close has been called.
+	ErrEncoderClosed = errors.New("encoder used after Close")
+
 	// ErrDecoderNilInput is returned when a nil Reader was provided
 	// and an operation other than Reset/DecodeAll/Close was attempted.
 	ErrDecoderNilInput = errors.New("nil input provided as reader")
diff --git a/vendor/github.com/moby/sys/capability/CHANGELOG.md b/vendor/github.com/moby/sys/capability/CHANGELOG.md
new file mode 100644
index 000000000..299b36d92
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/CHANGELOG.md
@@ -0,0 +1,124 @@
+# Changelog
+This file documents all notable changes made to this project since the initial fork
+from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [0.4.0] - 2024-11-11
+
+### Added
+* New separate API for ambient ([GetAmbient], [SetAmbient], [ResetAmbient])
+  and bound ([GetBound], [DropBound]) capabilities, modelled after libcap. (#176)
+
+### Fixed
+* [Apply] now returns an error if called for non-zero `pid`. Before this change,
+  it could silently change some capabilities of the current process, instead of
+  the one identified by the `pid`. (#168, #174)
+* Fixed tests that change capabilities to be run in a separate process. (#173)
+* Other improvements in tests. (#169, #170)
+
+### Changed
+* Use raw syscalls (which are slightly faster). (#176)
+* Most tests are now limited to testing the public API of the package. (#162)
+* Simplify parsing /proc/*pid*/status, add a test case. (#162)
+* Optimize the number of syscall to set ambient capabilities in Apply
+  by clearing them first; add a test case. (#163, #164)
+* Better documentation for [Apply], [NewFile], [NewFile2], [NewPid], [NewPid2]. (#175)
+
+### Removed
+* `.golangci.yml` and `.codespellrc` are no longer part of the package. (#158)
+
+## [0.3.0] - 2024-09-25
+
+### Added
+* Added [ListKnown] and [ListSupported] functions. (#153)
+* [LastCap] is now available on non-Linux platforms (where it returns an error). (#152)
+
+### Changed
+* [List] is now deprecated in favor of [ListKnown] and [ListSupported]. (#153)
+
+### Fixed
+* Various documentation improvements. (#151)
+* Fix "generated code" comment. (#153)
+
+## [0.2.0] - 2024-09-16
+
+This is the first release after the move to a new home in
+github.com/moby/sys/capability.
+
+### Fixed
+ * Fixed URLs in documentation to reflect the new home.
+
+## [0.1.1] - 2024-08-01
+
+This is a maintenance release, fixing a few minor issues.
+
+### Fixed
+ * Fixed future kernel compatibility, for real this time. [#11]
+ * Fixed [LastCap] to be a function. [#12]
+
+## [0.1.0] - 2024-07-31
+
+This is an initial release since the fork.
+
+### Breaking changes
+
+ * The `CAP_LAST_CAP` variable is removed; users need to modify the code to
+   use [LastCap] to get the value. [#6]
+ * The code now requires Go >= 1.21.
+
+### Added
+ * `go.mod` and `go.sum` files. [#2]
+ * New [LastCap] function. [#6]
+ * Basic CI using GHA infra. [#8], [#9]
+ * README and CHANGELOG. [#10]
+
+### Fixed
+ * Fixed ambient capabilities error handling in [Apply]. [#3]
+ * Fixed future kernel compatibility. [#1]
+ * Fixed various linter warnings. [#4], [#7]
+
+### Changed
+ * Go build tags changed from old-style (`+build`) to new Go 1.17+ style (`go:build`). [#2]
+
+### Removed
+ * Removed support for capabilities v1 and v2. [#1]
+ * Removed init function so programs that use this package start faster. [#6]
+ * Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6]
+
+<!-- Doc links (please keep sorted). -->
+[Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply
+[DropBound]: https://pkg.go.dev/github.com/moby/sys/capability#DropBound
+[GetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#GetAmbient
+[GetBound]: https://pkg.go.dev/github.com/moby/sys/capability#GetBound
+[LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap
+[ListKnown]: https://pkg.go.dev/github.com/moby/sys/capability#ListKnown
+[ListSupported]: https://pkg.go.dev/github.com/moby/sys/capability#ListSupported
+[List]: https://pkg.go.dev/github.com/moby/sys/capability#List
+[NewFile2]: https://pkg.go.dev/github.com/moby/sys/capability#NewFile2
+[NewFile]: https://pkg.go.dev/github.com/moby/sys/capability#NewFile
+[NewPid2]: https://pkg.go.dev/github.com/moby/sys/capability#NewPid2
+[NewPid]: https://pkg.go.dev/github.com/moby/sys/capability#NewPid
+[ResetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#ResetAmbient
+[SetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#SetAmbient
+
+<!-- Minor releases. -->
+[0.4.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.4.0
+[0.3.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.3.0
+[0.2.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.2.0
+[0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1
+[0.1.0]: https://github.com/kolyshkin/capability/compare/42c35b4376354fd5...v0.1.0
+
+<!-- PRs in 0.1.x releases. -->
+[#1]: https://github.com/kolyshkin/capability/pull/1
+[#2]: https://github.com/kolyshkin/capability/pull/2
+[#3]: https://github.com/kolyshkin/capability/pull/3
+[#4]: https://github.com/kolyshkin/capability/pull/4
+[#6]: https://github.com/kolyshkin/capability/pull/6
+[#7]: https://github.com/kolyshkin/capability/pull/7
+[#8]: https://github.com/kolyshkin/capability/pull/8
+[#9]: https://github.com/kolyshkin/capability/pull/9
+[#10]: https://github.com/kolyshkin/capability/pull/10
+[#11]: https://github.com/kolyshkin/capability/pull/11
+[#12]: https://github.com/kolyshkin/capability/pull/12
diff --git a/vendor/github.com/syndtr/gocapability/LICENSE b/vendor/github.com/moby/sys/capability/LICENSE
similarity index 97%
rename from vendor/github.com/syndtr/gocapability/LICENSE
rename to vendor/github.com/moby/sys/capability/LICENSE
index 80dd96de7..08adcd6ec 100644
--- a/vendor/github.com/syndtr/gocapability/LICENSE
+++ b/vendor/github.com/moby/sys/capability/LICENSE
@@ -1,3 +1,4 @@
+Copyright 2023 The Capability Authors.
 Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
 All rights reserved.
 
diff --git a/vendor/github.com/moby/sys/capability/README.md b/vendor/github.com/moby/sys/capability/README.md
new file mode 100644
index 000000000..84b74871a
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/README.md
@@ -0,0 +1,13 @@
+This is a fork of (apparently no longer maintained)
+https://github.com/syndtr/gocapability package. It provides basic primitives to
+work with [Linux capabilities][capabilities(7)].
+
+For changes, see [CHANGELOG.md](./CHANGELOG.md).
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/moby/sys/capability/capability.svg)](https://pkg.go.dev/github.com/moby/sys/capability)
+
+## Alternatives
+
+ * https://pkg.go.dev/kernel.org/pub/linux/libs/security/libcap/cap
+
+[capabilities(7)]: https://man7.org/linux/man-pages/man7/capabilities.7.html
diff --git a/vendor/github.com/moby/sys/capability/capability.go b/vendor/github.com/moby/sys/capability/capability.go
new file mode 100644
index 000000000..11e47bed7
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/capability.go
@@ -0,0 +1,176 @@
+// Copyright 2023 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package capability provides utilities for manipulating POSIX capabilities.
+package capability
+
+type Capabilities interface {
+	// Get check whether a capability present in the given
+	// capabilities set. The 'which' value should be one of EFFECTIVE,
+	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+	Get(which CapType, what Cap) bool
+
+	// Empty check whether all capability bits of the given capabilities
+	// set are zero. The 'which' value should be one of EFFECTIVE,
+	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+	Empty(which CapType) bool
+
+	// Full check whether all capability bits of the given capabilities
+	// set are one. The 'which' value should be one of EFFECTIVE,
+	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+	Full(which CapType) bool
+
+	// Set sets capabilities of the given capabilities sets. The
+	// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+	Set(which CapType, caps ...Cap)
+
+	// Unset unsets capabilities of the given capabilities sets. The
+	// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
+	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
+	Unset(which CapType, caps ...Cap)
+
+	// Fill sets all bits of the given capabilities kind to one. The
+	// 'kind' value should be one or combination (OR'ed) of CAPS,
+	// BOUNDS or AMBS.
+	Fill(kind CapType)
+
+	// Clear sets all bits of the given capabilities kind to zero. The
+	// 'kind' value should be one or combination (OR'ed) of CAPS,
+	// BOUNDS or AMBS.
+	Clear(kind CapType)
+
+	// String return current capabilities state of the given capabilities
+	// set as string. The 'which' value should be one of EFFECTIVE,
+	// PERMITTED, INHERITABLE BOUNDING or AMBIENT
+	StringCap(which CapType) string
+
+	// String return current capabilities state as string.
+	String() string
+
+	// Load load actual capabilities value. This will overwrite all
+	// outstanding changes.
+	Load() error
+
+	// Apply apply the capabilities settings, so all changes made by
+	// [Set], [Unset], [Fill], or [Clear] will take effect.
+	Apply(kind CapType) error
+}
+
+// NewPid initializes a new [Capabilities] object for given pid when
+// it is nonzero, or for the current process if pid is 0.
+//
+// Deprecated: replace with [NewPid2] followed by optional [Capabilities.Load]
+// (only if needed). For example, replace:
+//
+//	c, err := NewPid(0)
+//	if err != nil {
+//		return err
+//	}
+//
+// with:
+//
+//	c, err := NewPid2(0)
+//	if err != nil {
+//		return err
+//	}
+//	err = c.Load()
+//	if err != nil {
+//		return err
+//	}
+func NewPid(pid int) (Capabilities, error) {
+	c, err := newPid(pid)
+	if err != nil {
+		return c, err
+	}
+	err = c.Load()
+	return c, err
+}
+
+// NewPid2 initializes a new [Capabilities] object for given pid when
+// it is nonzero, or for the current process if pid is 0. This
+// does not load the process's current capabilities; if needed,
+// call [Capabilities.Load].
+func NewPid2(pid int) (Capabilities, error) {
+	return newPid(pid)
+}
+
+// NewFile initializes a new Capabilities object for given file path.
+//
+// Deprecated: replace with [NewFile2] followed by optional [Capabilities.Load]
+// (only if needed). For example, replace:
+//
+//	c, err := NewFile(path)
+//	if err != nil {
+//		return err
+//	}
+//
+// with:
+//
+//	c, err := NewFile2(path)
+//	if err != nil {
+//		return err
+//	}
+//	err = c.Load()
+//	if err != nil {
+//		return err
+//	}
+func NewFile(path string) (Capabilities, error) {
+	c, err := newFile(path)
+	if err != nil {
+		return c, err
+	}
+	err = c.Load()
+	return c, err
+}
+
+// NewFile2 creates a new initialized [Capabilities] object for given
+// file path. This does not load the process's current capabilities;
+// if needed, call [Capabilities.Load].
+func NewFile2(path string) (Capabilities, error) {
+	return newFile(path)
+}
+
+// LastCap returns highest valid capability of the running kernel,
+// or an error if it can not be obtained.
+//
+// See also: [ListSupported].
+func LastCap() (Cap, error) {
+	return lastCap()
+}
+
+// GetAmbient determines if a specific ambient capability is raised in the
+// calling thread.
+func GetAmbient(c Cap) (bool, error) {
+	return getAmbient(c)
+}
+
+// SetAmbient raises or lowers specified ambient capabilities for the calling
+// thread. To complete successfully, the prevailing effective capability set
+// must have a raised CAP_SETPCAP. Further, to raise a specific ambient
+// capability the inheritable and permitted sets of the calling thread must
+// already contain the specified capability.
+func SetAmbient(raise bool, caps ...Cap) error {
+	return setAmbient(raise, caps...)
+}
+
+// ResetAmbient resets all of the ambient capabilities for the calling thread
+// to their lowered value.
+func ResetAmbient() error {
+	return resetAmbient()
+}
+
+// GetBound determines if a specific bounding capability is raised in the
+// calling thread.
+func GetBound(c Cap) (bool, error) {
+	return getBound(c)
+}
+
+// DropBound lowers the specified bounding set capability.
+func DropBound(caps ...Cap) error {
+	return dropBound(caps...)
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_linux.go b/vendor/github.com/moby/sys/capability/capability_linux.go
similarity index 65%
rename from vendor/github.com/syndtr/gocapability/capability/capability_linux.go
rename to vendor/github.com/moby/sys/capability/capability_linux.go
index 1567dc810..234b1efb2 100644
--- a/vendor/github.com/syndtr/gocapability/capability/capability_linux.go
+++ b/vendor/github.com/moby/sys/capability/capability_linux.go
@@ -1,8 +1,9 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// Copyright 2023 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
 // All rights reserved.
 //
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package capability
 
@@ -12,62 +13,53 @@ import (
 	"fmt"
 	"io"
 	"os"
+	"strconv"
 	"strings"
+	"sync"
 	"syscall"
 )
 
-var errUnknownVers = errors.New("unknown capability version")
-
 const (
-	linuxCapVer1 = 0x19980330
-	linuxCapVer2 = 0x20071026
+	linuxCapVer1 = 0x19980330 // No longer supported.
+	linuxCapVer2 = 0x20071026 // No longer supported.
 	linuxCapVer3 = 0x20080522
 )
 
-var (
-	capVers    uint32
-	capLastCap Cap
-)
-
-func init() {
-	var hdr capHeader
-	capget(&hdr, nil)
-	capVers = hdr.version
-
-	if initLastCap() == nil {
-		CAP_LAST_CAP = capLastCap
-		if capLastCap > 31 {
-			capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1
-		} else {
-			capUpperMask = 0
-		}
-	}
-}
-
-func initLastCap() error {
-	if capLastCap != 0 {
-		return nil
-	}
-
+var lastCap = sync.OnceValues(func() (Cap, error) {
 	f, err := os.Open("/proc/sys/kernel/cap_last_cap")
 	if err != nil {
-		return err
+		return 0, err
 	}
-	defer f.Close()
 
-	var b []byte = make([]byte, 11)
-	_, err = f.Read(b)
+	buf := make([]byte, 11)
+	l, err := f.Read(buf)
+	f.Close()
 	if err != nil {
-		return err
+		return 0, err
 	}
+	buf = buf[:l]
 
-	fmt.Sscanf(string(b), "%d", &capLastCap)
+	last, err := strconv.Atoi(strings.TrimSpace(string(buf)))
+	if err != nil {
+		return 0, err
+	}
+	return Cap(last), nil
+})
 
-	return nil
+func capUpperMask() uint32 {
+	last, err := lastCap()
+	if err != nil || last < 32 {
+		return 0
+	}
+	return (uint32(1) << (uint(last) - 31)) - 1
 }
 
 func mkStringCap(c Capabilities, which CapType) (ret string) {
-	for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ {
+	last, err := lastCap()
+	if err != nil {
+		return ""
+	}
+	for i, first := Cap(0), true; i <= last; i++ {
 		if !c.Get(which, i) {
 			continue
 		}
@@ -98,136 +90,38 @@ func mkString(c Capabilities, max CapType) (ret string) {
 	return
 }
 
-func newPid(pid int) (c Capabilities, err error) {
-	switch capVers {
-	case linuxCapVer1:
-		p := new(capsV1)
-		p.hdr.version = capVers
-		p.hdr.pid = int32(pid)
-		c = p
-	case linuxCapVer2, linuxCapVer3:
+var capVersion = sync.OnceValues(func() (uint32, error) {
+	var hdr capHeader
+	err := capget(&hdr, nil)
+	return hdr.version, err
+})
+
+func newPid(pid int) (c Capabilities, retErr error) {
+	ver, err := capVersion()
+	if err != nil {
+		retErr = fmt.Errorf("unable to get capability version from the kernel: %w", err)
+		return
+	}
+	switch ver {
+	case linuxCapVer1, linuxCapVer2:
+		retErr = errors.New("old/unsupported capability version (kernel older than 2.6.26?)")
+	default:
+		// Either linuxCapVer3, or an unknown/future version (such as v4).
+		// In the latter case, we fall back to v3 as the latest version known
+		// to this package, as kernel should be backward-compatible to v3.
 		p := new(capsV3)
-		p.hdr.version = capVers
+		p.hdr.version = linuxCapVer3
 		p.hdr.pid = int32(pid)
 		c = p
-	default:
-		err = errUnknownVers
-		return
 	}
 	return
 }
 
-type capsV1 struct {
-	hdr  capHeader
-	data capData
-}
-
-func (c *capsV1) Get(which CapType, what Cap) bool {
-	if what > 32 {
-		return false
+func ignoreEINVAL(err error) error {
+	if errors.Is(err, syscall.EINVAL) {
+		err = nil
 	}
-
-	switch which {
-	case EFFECTIVE:
-		return (1<<uint(what))&c.data.effective != 0
-	case PERMITTED:
-		return (1<<uint(what))&c.data.permitted != 0
-	case INHERITABLE:
-		return (1<<uint(what))&c.data.inheritable != 0
-	}
-
-	return false
-}
-
-func (c *capsV1) getData(which CapType) (ret uint32) {
-	switch which {
-	case EFFECTIVE:
-		ret = c.data.effective
-	case PERMITTED:
-		ret = c.data.permitted
-	case INHERITABLE:
-		ret = c.data.inheritable
-	}
-	return
-}
-
-func (c *capsV1) Empty(which CapType) bool {
-	return c.getData(which) == 0
-}
-
-func (c *capsV1) Full(which CapType) bool {
-	return (c.getData(which) & 0x7fffffff) == 0x7fffffff
-}
-
-func (c *capsV1) Set(which CapType, caps ...Cap) {
-	for _, what := range caps {
-		if what > 32 {
-			continue
-		}
-
-		if which&EFFECTIVE != 0 {
-			c.data.effective |= 1 << uint(what)
-		}
-		if which&PERMITTED != 0 {
-			c.data.permitted |= 1 << uint(what)
-		}
-		if which&INHERITABLE != 0 {
-			c.data.inheritable |= 1 << uint(what)
-		}
-	}
-}
-
-func (c *capsV1) Unset(which CapType, caps ...Cap) {
-	for _, what := range caps {
-		if what > 32 {
-			continue
-		}
-
-		if which&EFFECTIVE != 0 {
-			c.data.effective &= ^(1 << uint(what))
-		}
-		if which&PERMITTED != 0 {
-			c.data.permitted &= ^(1 << uint(what))
-		}
-		if which&INHERITABLE != 0 {
-			c.data.inheritable &= ^(1 << uint(what))
-		}
-	}
-}
-
-func (c *capsV1) Fill(kind CapType) {
-	if kind&CAPS == CAPS {
-		c.data.effective = 0x7fffffff
-		c.data.permitted = 0x7fffffff
-		c.data.inheritable = 0
-	}
-}
-
-func (c *capsV1) Clear(kind CapType) {
-	if kind&CAPS == CAPS {
-		c.data.effective = 0
-		c.data.permitted = 0
-		c.data.inheritable = 0
-	}
-}
-
-func (c *capsV1) StringCap(which CapType) (ret string) {
-	return mkStringCap(c, which)
-}
-
-func (c *capsV1) String() (ret string) {
-	return mkString(c, BOUNDING)
-}
-
-func (c *capsV1) Load() (err error) {
-	return capget(&c.hdr, &c.data)
-}
-
-func (c *capsV1) Apply(kind CapType) error {
-	if kind&CAPS == CAPS {
-		return capset(&c.hdr, &c.data)
-	}
-	return nil
+	return err
 }
 
 type capsV3 struct {
@@ -292,7 +186,8 @@ func (c *capsV3) Full(which CapType) bool {
 	if (data[0] & 0xffffffff) != 0xffffffff {
 		return false
 	}
-	return (data[1] & capUpperMask) == capUpperMask
+	mask := capUpperMask()
+	return (data[1] & mask) == mask
 }
 
 func (c *capsV3) Set(which CapType, caps ...Cap) {
@@ -401,15 +296,12 @@ func (c *capsV3) Load() (err error) {
 		return
 	}
 
-	var status_path string
-
-	if c.hdr.pid == 0 {
-		status_path = fmt.Sprintf("/proc/self/status")
-	} else {
-		status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
+	path := "/proc/self/status"
+	if c.hdr.pid != 0 {
+		path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
 	}
 
-	f, err := os.Open(status_path)
+	f, err := os.Open(path)
 	if err != nil {
 		return
 	}
@@ -422,12 +314,18 @@ func (c *capsV3) Load() (err error) {
 			}
 			break
 		}
-		if strings.HasPrefix(line, "CapB") {
-			fmt.Sscanf(line[4:], "nd:  %08x%08x", &c.bounds[1], &c.bounds[0])
+		if val, ok := strings.CutPrefix(line, "CapBnd:\t"); ok {
+			_, err = fmt.Sscanf(val, "%08x%08x", &c.bounds[1], &c.bounds[0])
+			if err != nil {
+				break
+			}
 			continue
 		}
-		if strings.HasPrefix(line, "CapA") {
-			fmt.Sscanf(line[4:], "mb:  %08x%08x", &c.ambient[1], &c.ambient[0])
+		if val, ok := strings.CutPrefix(line, "CapAmb:\t"); ok {
+			_, err = fmt.Sscanf(val, "%08x%08x", &c.ambient[1], &c.ambient[0])
+			if err != nil {
+				break
+			}
 			continue
 		}
 	}
@@ -436,26 +334,29 @@ func (c *capsV3) Load() (err error) {
 	return
 }
 
-func (c *capsV3) Apply(kind CapType) (err error) {
+func (c *capsV3) Apply(kind CapType) error {
+	if c.hdr.pid != 0 {
+		return errors.New("unable to modify capabilities of another process")
+	}
+	last, err := LastCap()
+	if err != nil {
+		return err
+	}
 	if kind&BOUNDS == BOUNDS {
 		var data [2]capData
 		err = capget(&c.hdr, &data[0])
 		if err != nil {
-			return
+			return err
 		}
 		if (1<<uint(CAP_SETPCAP))&data[0].effective != 0 {
-			for i := Cap(0); i <= CAP_LAST_CAP; i++ {
+			for i := Cap(0); i <= last; i++ {
 				if c.Get(BOUNDING, i) {
 					continue
 				}
-				err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0)
+				// Ignore EINVAL since the capability may not be supported in this system.
+				err = ignoreEINVAL(dropBound(i))
 				if err != nil {
-					// Ignore EINVAL since the capability may not be supported in this system.
-					if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
-						err = nil
-						continue
-					}
-					return
+					return err
 				}
 			}
 		}
@@ -464,26 +365,73 @@ func (c *capsV3) Apply(kind CapType) (err error) {
 	if kind&CAPS == CAPS {
 		err = capset(&c.hdr, &c.data[0])
 		if err != nil {
-			return
+			return err
 		}
 	}
 
 	if kind&AMBS == AMBS {
-		for i := Cap(0); i <= CAP_LAST_CAP; i++ {
-			action := pr_CAP_AMBIENT_LOWER
-			if c.Get(AMBIENT, i) {
-				action = pr_CAP_AMBIENT_RAISE
+		// Ignore EINVAL as not supported on kernels before 4.3
+		err = ignoreEINVAL(resetAmbient())
+		if err != nil {
+			return err
+		}
+		for i := Cap(0); i <= last; i++ {
+			if !c.Get(AMBIENT, i) {
+				continue
 			}
-			err := prctl(pr_CAP_AMBIENT, action, uintptr(i), 0, 0)
 			// Ignore EINVAL as not supported on kernels before 4.3
-			if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
-				err = nil
-				continue
+			err = ignoreEINVAL(setAmbient(true, i))
+			if err != nil {
+				return err
 			}
 		}
 	}
 
-	return
+	return nil
+}
+
+func getAmbient(c Cap) (bool, error) {
+	res, err := prctlRetInt(pr_CAP_AMBIENT, pr_CAP_AMBIENT_IS_SET, uintptr(c))
+	if err != nil {
+		return false, err
+	}
+	return res > 0, nil
+}
+
+func setAmbient(raise bool, caps ...Cap) error {
+	op := pr_CAP_AMBIENT_RAISE
+	if !raise {
+		op = pr_CAP_AMBIENT_LOWER
+	}
+	for _, val := range caps {
+		err := prctl(pr_CAP_AMBIENT, op, uintptr(val))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func resetAmbient() error {
+	return prctl(pr_CAP_AMBIENT, pr_CAP_AMBIENT_CLEAR_ALL, 0)
+}
+
+func getBound(c Cap) (bool, error) {
+	res, err := prctlRetInt(syscall.PR_CAPBSET_READ, uintptr(c), 0)
+	if err != nil {
+		return false, err
+	}
+	return res > 0, nil
+}
+
+func dropBound(caps ...Cap) error {
+	for _, val := range caps {
+		err := prctl(syscall.PR_CAPBSET_DROP, uintptr(val), 0)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
 }
 
 func newFile(path string) (c Capabilities, err error) {
@@ -547,7 +495,8 @@ func (c *capsFile) Full(which CapType) bool {
 	if (data[0] & 0xffffffff) != 0xffffffff {
 		return false
 	}
-	return (data[1] & capUpperMask) == capUpperMask
+	mask := capUpperMask()
+	return (data[1] & mask) == mask
 }
 
 func (c *capsFile) Set(which CapType, caps ...Cap) {
diff --git a/vendor/github.com/moby/sys/capability/capability_noop.go b/vendor/github.com/moby/sys/capability/capability_noop.go
new file mode 100644
index 000000000..b766e444f
--- /dev/null
+++ b/vendor/github.com/moby/sys/capability/capability_noop.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux
+
+package capability
+
+import "errors"
+
+var errNotSup = errors.New("not supported")
+
+func newPid(_ int) (Capabilities, error) {
+	return nil, errNotSup
+}
+
+func newFile(_ string) (Capabilities, error) {
+	return nil, errNotSup
+}
+
+func lastCap() (Cap, error) {
+	return -1, errNotSup
+}
+
+func getAmbient(_ Cap) (bool, error) {
+	return false, errNotSup
+}
+
+func setAmbient(_ bool, _ ...Cap) error {
+	return errNotSup
+}
+
+func resetAmbient() error {
+	return errNotSup
+}
+
+func getBound(_ Cap) (bool, error) {
+	return false, errNotSup
+}
+
+func dropBound(_ ...Cap) error {
+	return errNotSup
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/enum.go b/vendor/github.com/moby/sys/capability/enum.go
similarity index 91%
rename from vendor/github.com/syndtr/gocapability/capability/enum.go
rename to vendor/github.com/moby/sys/capability/enum.go
index ad1078531..f88593310 100644
--- a/vendor/github.com/syndtr/gocapability/capability/enum.go
+++ b/vendor/github.com/moby/sys/capability/enum.go
@@ -1,11 +1,14 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// Copyright 2024 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
 // All rights reserved.
 //
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package capability
 
+import "slices"
+
 type CapType uint
 
 func (c CapType) String() string {
@@ -301,9 +304,27 @@ const (
 	CAP_CHECKPOINT_RESTORE = Cap(40)
 )
 
-var (
-	// Highest valid capability of the running kernel.
-	CAP_LAST_CAP = Cap(63)
+// List returns the list of all capabilities known to the package.
+//
+// Deprecated: use [ListKnown] or [ListSupported] instead.
+func List() []Cap {
+	return ListKnown()
+}
 
-	capUpperMask = ^uint32(0)
-)
+// ListKnown returns the list of all capabilities known to the package.
+func ListKnown() []Cap {
+	return list()
+}
+
+// ListSupported returns the list of all capabilities known to the package,
+// except those that are not supported by the currently running Linux kernel.
+func ListSupported() ([]Cap, error) {
+	last, err := LastCap()
+	if err != nil {
+		return nil, err
+	}
+	return slices.DeleteFunc(list(), func(c Cap) bool {
+		// Remove caps not supported by the kernel.
+		return c > last
+	}), nil
+}
diff --git a/vendor/github.com/syndtr/gocapability/capability/enum_gen.go b/vendor/github.com/moby/sys/capability/enum_gen.go
similarity index 94%
rename from vendor/github.com/syndtr/gocapability/capability/enum_gen.go
rename to vendor/github.com/moby/sys/capability/enum_gen.go
index 2ff9bf4d8..f72cd43a6 100644
--- a/vendor/github.com/syndtr/gocapability/capability/enum_gen.go
+++ b/vendor/github.com/moby/sys/capability/enum_gen.go
@@ -1,4 +1,4 @@
-// generated file; DO NOT EDIT - use go generate in directory with source
+// Code generated by go generate; DO NOT EDIT.
 
 package capability
 
@@ -90,8 +90,7 @@ func (c Cap) String() string {
 	return "unknown"
 }
 
-// List returns list of all supported capabilities
-func List() []Cap {
+func list() []Cap {
 	return []Cap{
 		CAP_CHOWN,
 		CAP_DAC_OVERRIDE,
diff --git a/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go b/vendor/github.com/moby/sys/capability/syscall_linux.go
similarity index 68%
rename from vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
rename to vendor/github.com/moby/sys/capability/syscall_linux.go
index 3d2bf6927..2d8faa85f 100644
--- a/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
+++ b/vendor/github.com/moby/sys/capability/syscall_linux.go
@@ -1,8 +1,9 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// Copyright 2024 The Capability Authors.
+// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
 // All rights reserved.
 //
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
 
 package capability
 
@@ -23,7 +24,7 @@ type capData struct {
 }
 
 func capget(hdr *capHeader, data *capData) (err error) {
-	_, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+	_, _, e1 := syscall.RawSyscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
 	if e1 != 0 {
 		err = e1
 	}
@@ -31,7 +32,7 @@ func capget(hdr *capHeader, data *capData) (err error) {
 }
 
 func capset(hdr *capHeader, data *capData) (err error) {
-	_, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
+	_, _, e1 := syscall.RawSyscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
 	if e1 != 0 {
 		err = e1
 	}
@@ -47,14 +48,22 @@ const (
 	pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4)
 )
 
-func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
-	_, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
+func prctl(option int, arg2, arg3 uintptr) (err error) {
+	_, _, e1 := syscall.RawSyscall(syscall.SYS_PRCTL, uintptr(option), arg2, arg3)
 	if e1 != 0 {
 		err = e1
 	}
 	return
 }
 
+func prctlRetInt(option int, arg2, arg3 uintptr) (int, error) {
+	ret, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, uintptr(option), arg2, arg3)
+	if err != 0 {
+		return 0, err
+	}
+	return int(ret), nil
+}
+
 const (
 	vfsXattrName = "security.capability"
 
@@ -79,9 +88,7 @@ type vfscapData struct {
 	version   int8
 }
 
-var (
-	_vfsXattrName *byte
-)
+var _vfsXattrName *byte
 
 func init() {
 	_vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
@@ -93,7 +100,7 @@ func getVfsCap(path string, dest *vfscapData) (err error) {
 	if err != nil {
 		return
 	}
-	r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
+	r0, _, e1 := syscall.RawSyscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
 	if e1 != 0 {
 		if e1 == syscall.ENODATA {
 			dest.version = 2
@@ -146,7 +153,7 @@ func setVfsCap(path string, data *vfscapData) (err error) {
 	} else {
 		return syscall.EINVAL
 	}
-	_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
+	_, _, e1 := syscall.RawSyscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
 	if e1 != 0 {
 		err = e1
 	}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
index f61a56015..e49e6d53f 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
@@ -120,10 +120,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
 		c["level"] = "s0"
 		fileLabel = c.Get()
 	}
-	if err := selinux.Chcon(path, fileLabel, true); err != nil {
-		return err
-	}
-	return nil
+	return selinux.Chcon(path, fileLabel, true)
 }
 
 // DisableSecOpt returns a security opt that can disable labeling
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
index f21c80c5a..1c260cb27 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
@@ -6,25 +6,25 @@ package label
 // InitLabels returns the process label and file labels to be used within
 // the container.  A list of options can be passed into this function to alter
 // the labels.
-func InitLabels(options []string) (string, string, error) {
+func InitLabels([]string) (string, string, error) {
 	return "", "", nil
 }
 
 // Deprecated: The GenLabels function is only to be used during the transition
 // to the official API. Use InitLabels(strings.Fields(options)) instead.
-func GenLabels(options string) (string, string, error) {
+func GenLabels(string) (string, string, error) {
 	return "", "", nil
 }
 
-func SetFileLabel(path string, fileLabel string) error {
+func SetFileLabel(string, string) error {
 	return nil
 }
 
-func SetFileCreateLabel(fileLabel string) error {
+func SetFileCreateLabel(string) error {
 	return nil
 }
 
-func Relabel(path string, fileLabel string, shared bool) error {
+func Relabel(string, string, bool) error {
 	return nil
 }
 
@@ -35,16 +35,16 @@ func DisableSecOpt() []string {
 }
 
 // Validate checks that the label does not include unexpected options
-func Validate(label string) error {
+func Validate(string) error {
 	return nil
 }
 
 // RelabelNeeded checks whether the user requested a relabel
-func RelabelNeeded(label string) bool {
+func RelabelNeeded(string) bool {
 	return false
 }
 
 // IsShared checks that the label includes a "shared" mark
-func IsShared(label string) bool {
+func IsShared(string) bool {
 	return false
 }
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index f1e95977d..c80c10971 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -132,7 +132,7 @@ func verifySELinuxfsMount(mnt string) bool {
 		if err == nil {
 			break
 		}
-		if err == unix.EAGAIN || err == unix.EINTR { //nolint:errorlint // unix errors are bare
+		if err == unix.EAGAIN || err == unix.EINTR {
 			continue
 		}
 		return false
@@ -263,7 +263,7 @@ func isProcHandle(fh *os.File) error {
 		if err == nil {
 			break
 		}
-		if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+		if err != unix.EINTR {
 			return &os.PathError{Op: "fstatfs", Path: fh.Name(), Err: err}
 		}
 	}
@@ -328,8 +328,8 @@ func lSetFileLabel(fpath string, label string) error {
 		if err == nil {
 			break
 		}
-		if err != unix.EINTR { //nolint:errorlint // unix errors are bare
-			return &os.PathError{Op: "lsetxattr", Path: fpath, Err: err}
+		if err != unix.EINTR {
+			return &os.PathError{Op: fmt.Sprintf("lsetxattr(label=%s)", label), Path: fpath, Err: err}
 		}
 	}
 
@@ -347,8 +347,8 @@ func setFileLabel(fpath string, label string) error {
 		if err == nil {
 			break
 		}
-		if err != unix.EINTR { //nolint:errorlint // unix errors are bare
-			return &os.PathError{Op: "setxattr", Path: fpath, Err: err}
+		if err != unix.EINTR {
+			return &os.PathError{Op: fmt.Sprintf("setxattr(label=%s)", label), Path: fpath, Err: err}
 		}
 	}
 
@@ -639,6 +639,7 @@ func (m mlsRange) String() string {
 	return low + "-" + high
 }
 
+// TODO: remove min and max once Go < 1.21 is not supported.
 func max(a, b uint) uint {
 	if a > b {
 		return a
@@ -1134,7 +1135,7 @@ func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity
 	}
 	return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
 		if fastMode {
-			if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label {
+			if cLabel, err := lFileLabel(p); err == nil && cLabel == label {
 				return nil
 			}
 		}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
index bc3fd3b37..0889fbe0e 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
@@ -7,7 +7,7 @@ func attrPath(string) string {
 	return ""
 }
 
-func readCon(fpath string) (string, error) {
+func readCon(string) (string, error) {
 	return "", nil
 }
 
@@ -21,27 +21,27 @@ func getEnabled() bool {
 	return false
 }
 
-func classIndex(class string) (int, error) {
+func classIndex(string) (int, error) {
 	return -1, nil
 }
 
-func setFileLabel(fpath string, label string) error {
+func setFileLabel(string, string) error {
 	return nil
 }
 
-func lSetFileLabel(fpath string, label string) error {
+func lSetFileLabel(string, string) error {
 	return nil
 }
 
-func fileLabel(fpath string) (string, error) {
+func fileLabel(string) (string, error) {
 	return "", nil
 }
 
-func lFileLabel(fpath string) (string, error) {
+func lFileLabel(string) (string, error) {
 	return "", nil
 }
 
-func setFSCreateLabel(label string) error {
+func setFSCreateLabel(string) error {
 	return nil
 }
 
@@ -53,7 +53,7 @@ func currentLabel() (string, error) {
 	return "", nil
 }
 
-func pidLabel(pid int) (string, error) {
+func pidLabel(int) (string, error) {
 	return "", nil
 }
 
@@ -61,23 +61,23 @@ func execLabel() (string, error) {
 	return "", nil
 }
 
-func canonicalizeContext(val string) (string, error) {
+func canonicalizeContext(string) (string, error) {
 	return "", nil
 }
 
-func computeCreateContext(source string, target string, class string) (string, error) {
+func computeCreateContext(string, string, string) (string, error) {
 	return "", nil
 }
 
-func calculateGlbLub(sourceRange, targetRange string) (string, error) {
+func calculateGlbLub(string, string) (string, error) {
 	return "", nil
 }
 
-func peerLabel(fd uintptr) (string, error) {
+func peerLabel(uintptr) (string, error) {
 	return "", nil
 }
 
-func setKeyLabel(label string) error {
+func setKeyLabel(string) error {
 	return nil
 }
 
@@ -85,14 +85,14 @@ func (c Context) get() string {
 	return ""
 }
 
-func newContext(label string) (Context, error) {
+func newContext(string) (Context, error) {
 	return Context{}, nil
 }
 
 func clearLabels() {
 }
 
-func reserveLabel(label string) {
+func reserveLabel(string) {
 }
 
 func isMLSEnabled() bool {
@@ -103,7 +103,7 @@ func enforceMode() int {
 	return Disabled
 }
 
-func setEnforceMode(mode int) error {
+func setEnforceMode(int) error {
 	return nil
 }
 
@@ -111,7 +111,7 @@ func defaultEnforceMode() int {
 	return Disabled
 }
 
-func releaseLabel(label string) {
+func releaseLabel(string) {
 }
 
 func roFileLabel() string {
@@ -126,27 +126,27 @@ func initContainerLabels() (string, string) {
 	return "", ""
 }
 
-func containerLabels() (processLabel string, fileLabel string) {
+func containerLabels() (string, string) {
 	return "", ""
 }
 
-func securityCheckContext(val string) error {
+func securityCheckContext(string) error {
 	return nil
 }
 
-func copyLevel(src, dest string) (string, error) {
+func copyLevel(string, string) (string, error) {
 	return "", nil
 }
 
-func chcon(fpath string, label string, recurse bool) error {
+func chcon(string, string, bool) error {
 	return nil
 }
 
-func dupSecOpt(src string) ([]string, error) {
+func dupSecOpt(string) ([]string, error) {
 	return nil, nil
 }
 
-func getDefaultContextWithLevel(user, level, scon string) (string, error) {
+func getDefaultContextWithLevel(string, string, string) (string, error) {
 	return "", nil
 }
 
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
index 9e473ca16..559c85107 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go
@@ -31,7 +31,7 @@ func lgetxattr(path, attr string) ([]byte, error) {
 func doLgetxattr(path, attr string, dest []byte) (int, error) {
 	for {
 		sz, err := unix.Lgetxattr(path, attr, dest)
-		if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+		if err != unix.EINTR {
 			return sz, err
 		}
 	}
@@ -64,7 +64,7 @@ func getxattr(path, attr string) ([]byte, error) {
 func dogetxattr(path, attr string, dest []byte) (int, error) {
 	for {
 		sz, err := unix.Getxattr(path, attr, dest)
-		if err != unix.EINTR { //nolint:errorlint // unix errors are bare
+		if err != unix.EINTR {
 			return sz, err
 		}
 	}
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
deleted file mode 100644
index 7e78dce01..000000000
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## pwalk: parallel implementation of filepath.Walk
-
-This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk)
-which may speed it up by calling multiple callback functions (WalkFunc) in parallel,
-utilizing goroutines.
-
-By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks.
-This can be changed by using WalkN function which has the additional
-parameter, specifying the number of goroutines (concurrency).
-
-### pwalk vs pwalkdir
-
-This package is deprecated in favor of
-[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir),
-which is faster, but requires at least Go 1.16.
-
-### Caveats
-
-Please note the following limitations of this code:
-
-* Unlike filepath.Walk, the order of calls is non-deterministic;
-
-* Only primitive error handling is supported:
-
-  * filepath.SkipDir is not supported;
-
-  * no errors are ever passed to WalkFunc;
-
-  * once any error is returned from any WalkFunc instance, no more new calls
-    to WalkFunc are made, and the error is returned to the caller of Walk;
-
-  * if more than one walkFunc instance will return an error, only one
-    of such errors will be propagated and returned by Walk, others
-    will be silently discarded.
-
-### Documentation
-
-For the official documentation, see
-https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc
-
-### Benchmarks
-
-For a WalkFunc that consists solely of the return statement, this
-implementation is about 10% slower than the standard library's
-filepath.Walk.
-
-Otherwise (if a WalkFunc is doing something) this is usually faster,
-except when the WalkN(..., 1) is used.
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
deleted file mode 100644
index a28b4c4bb..000000000
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package pwalk
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-	"sync"
-)
-
-// WalkFunc is the type of the function called by Walk to visit each
-// file or directory. It is an alias for [filepath.WalkFunc].
-//
-// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir] and [fs.WalkDirFunc].
-type WalkFunc = filepath.WalkFunc
-
-// Walk is a wrapper for filepath.Walk which can call multiple walkFn
-// in parallel, allowing to handle each item concurrently. A maximum of
-// twice the runtime.NumCPU() walkFn will be called at any one time.
-// If you want to change the maximum, use WalkN instead.
-//
-// The order of calls is non-deterministic.
-//
-// Note that this implementation only supports primitive error handling:
-//
-// - no errors are ever passed to walkFn;
-//
-// - once a walkFn returns any error, all further processing stops
-// and the error is returned to the caller of Walk;
-//
-// - filepath.SkipDir is not supported;
-//
-// - if more than one walkFn instance will return an error, only one
-// of such errors will be propagated and returned by Walk, others
-// will be silently discarded.
-//
-// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.Walk]
-func Walk(root string, walkFn WalkFunc) error {
-	return WalkN(root, walkFn, runtime.NumCPU()*2)
-}
-
-// WalkN is a wrapper for filepath.Walk which can call multiple walkFn
-// in parallel, allowing to handle each item concurrently. A maximum of
-// num walkFn will be called at any one time.
-//
-// Please see Walk documentation for caveats of using this function.
-//
-// Deprecated: use [github.com/opencontainers/selinux/pkg/pwalkdir.WalkN]
-func WalkN(root string, walkFn WalkFunc, num int) error {
-	// make sure limit is sensible
-	if num < 1 {
-		return fmt.Errorf("walk(%q): num must be > 0", root)
-	}
-
-	files := make(chan *walkArgs, 2*num)
-	errCh := make(chan error, 1) // get the first error, ignore others
-
-	// Start walking a tree asap
-	var (
-		err error
-		wg  sync.WaitGroup
-
-		rootLen   = len(root)
-		rootEntry *walkArgs
-	)
-	wg.Add(1)
-	go func() {
-		err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error {
-			if err != nil {
-				close(files)
-				return err
-			}
-			if len(p) == rootLen {
-				// Root entry is processed separately below.
-				rootEntry = &walkArgs{path: p, info: &info}
-				return nil
-			}
-			// add a file to the queue unless a callback sent an error
-			select {
-			case e := <-errCh:
-				close(files)
-				return e
-			default:
-				files <- &walkArgs{path: p, info: &info}
-				return nil
-			}
-		})
-		if err == nil {
-			close(files)
-		}
-		wg.Done()
-	}()
-
-	wg.Add(num)
-	for i := 0; i < num; i++ {
-		go func() {
-			for file := range files {
-				if e := walkFn(file.path, *file.info, nil); e != nil {
-					select {
-					case errCh <- e: // sent ok
-					default: // buffer full
-					}
-				}
-			}
-			wg.Done()
-		}()
-	}
-
-	wg.Wait()
-
-	if err == nil {
-		err = walkFn(rootEntry.path, *rootEntry.info, nil)
-	}
-
-	return err
-}
-
-// walkArgs holds the arguments that were passed to the Walk or WalkN
-// functions.
-type walkArgs struct {
-	info *os.FileInfo
-	path string
-}
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
index 068ac4005..b827e7dd7 100644
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/README.md
@@ -28,7 +28,9 @@ Please note the following limitations of this code:
 
   * fs.SkipDir is not supported;
 
-  * no errors are ever passed to WalkDirFunc;
+  * ErrNotExist errors from filepath.WalkDir are silently ignored for any path
+    except the top directory (WalkDir argument); any other error is returned to
+    the caller of WalkDir;
 
   * once any error is returned from any walkDirFunc instance, no more calls
     to WalkDirFunc are made, and the error is returned to the caller of WalkDir;
@@ -51,4 +53,4 @@ filepath.WalkDir.
 Otherwise (if a WalkDirFunc is actually doing something) this is usually
 faster, except when the WalkDirN(..., 1) is used. Run `go test -bench .`
 to see how different operations can benefit from it, as well as how the
-level of paralellism affects the speed.
+level of parallelism affects the speed.
diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
index 0f5d9f580..5d2d09a29 100644
--- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
+++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
@@ -4,6 +4,7 @@
 package pwalkdir
 
 import (
+	"errors"
 	"fmt"
 	"io/fs"
 	"path/filepath"
@@ -60,6 +61,12 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
 	go func() {
 		err = filepath.WalkDir(root, func(p string, entry fs.DirEntry, err error) error {
 			if err != nil {
+				// Walking a file tree can race with removal,
+				// so ignore ENOENT, except for root.
+				// https://github.com/opencontainers/selinux/issues/199.
+				if errors.Is(err, fs.ErrNotExist) && len(p) != rootLen {
+					return nil
+				}
 				close(files)
 				return err
 			}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
index 0eb1e1d16..6fc80512f 100644
--- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
@@ -12,33 +12,45 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"math"
 	"os"
 	"time"
 
 	"github.com/google/uuid"
 )
 
+var errAlignmentOverflow = errors.New("integer overflow when calculating alignment")
+
 // nextAligned finds the next offset that satisfies alignment.
-func nextAligned(offset int64, alignment int) int64 {
-	align64 := uint64(alignment)
-	offset64 := uint64(offset)
+func nextAligned(offset int64, alignment int) (int64, error) {
+	align64 := int64(alignment)
+
+	if align64 <= 0 || offset%align64 == 0 {
+		return offset, nil
+	}
 
-	if align64 != 0 && offset64%align64 != 0 {
-		offset64 = (offset64 & ^(align64 - 1)) + align64
+	align64 -= offset % align64
+
+	if (math.MaxInt64 - offset) < align64 {
+		return 0, errAlignmentOverflow
 	}
 
-	return int64(offset64)
+	return offset + align64, nil
 }
 
 // writeDataObjectAt writes the data object described by di to ws, using time t, recording details
 // in d. The object is written at the first position that satisfies the alignment requirements
 // described by di following offsetUnaligned.
 func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll
-	offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart)
+	offset, err := nextAligned(offsetUnaligned, di.opts.alignment)
 	if err != nil {
 		return err
 	}
 
+	if _, err := ws.Seek(offset, io.SeekStart); err != nil {
+		return err
+	}
+
 	n, err := io.Copy(ws, di.r)
 	if err != nil {
 		return err
@@ -72,6 +84,7 @@ func (f *FileImage) calculatedDataSize() int64 {
 var (
 	errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image")
 	errPrimaryPartition     = errors.New("image already contains a primary partition")
+	errObjectIDOverflow     = errors.New("object ID would overflow")
 )
 
 // writeDataObject writes the data object described by di to f, using time t, recording details in
@@ -81,6 +94,11 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro
 		return errInsufficientCapacity
 	}
 
+	// We derive the ID from i, so make sure the ID will not overflow.
+	if int64(i) >= math.MaxUint32 {
+		return errObjectIDOverflow
+	}
+
 	// If this is a primary partition, verify there isn't another primary partition, and update the
 	// architecture in the global header.
 	if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys {
@@ -92,7 +110,7 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro
 	}
 
 	d := &f.rds[i]
-	d.ID = uint32(i) + 1
+	d.ID = uint32(i) + 1 //nolint:gosec // Overflow handled above.
 
 	f.h.DataSize = f.calculatedDataSize()
 
@@ -213,8 +231,16 @@ func OptCreateWithCloseOnUnload(b bool) CreateOpt {
 	}
 }
 
+var errDescriptorCapacityNotSupported = errors.New("descriptor capacity not supported")
+
 // createContainer creates a new SIF container file in rw, according to opts.
 func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
+	// The supported number of descriptors is limited by the unsigned 32-bit ID field in each
+	// rawDescriptor.
+	if co.descriptorCapacity >= math.MaxUint32 {
+		return nil, errDescriptorCapacityNotSupported
+	}
+
 	rds := make([]rawDescriptor, co.descriptorCapacity)
 	rdsSize := int64(binary.Size(rds))
 
diff --git a/vendor/github.com/syndtr/gocapability/capability/capability.go b/vendor/github.com/syndtr/gocapability/capability/capability.go
deleted file mode 100644
index 61a90775e..000000000
--- a/vendor/github.com/syndtr/gocapability/capability/capability.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Package capability provides utilities for manipulating POSIX capabilities.
-package capability
-
-type Capabilities interface {
-	// Get check whether a capability present in the given
-	// capabilities set. The 'which' value should be one of EFFECTIVE,
-	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
-	Get(which CapType, what Cap) bool
-
-	// Empty check whether all capability bits of the given capabilities
-	// set are zero. The 'which' value should be one of EFFECTIVE,
-	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
-	Empty(which CapType) bool
-
-	// Full check whether all capability bits of the given capabilities
-	// set are one. The 'which' value should be one of EFFECTIVE,
-	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
-	Full(which CapType) bool
-
-	// Set sets capabilities of the given capabilities sets. The
-	// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
-	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
-	Set(which CapType, caps ...Cap)
-
-	// Unset unsets capabilities of the given capabilities sets. The
-	// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
-	// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
-	Unset(which CapType, caps ...Cap)
-
-	// Fill sets all bits of the given capabilities kind to one. The
-	// 'kind' value should be one or combination (OR'ed) of CAPS,
-	// BOUNDS or AMBS.
-	Fill(kind CapType)
-
-	// Clear sets all bits of the given capabilities kind to zero. The
-	// 'kind' value should be one or combination (OR'ed) of CAPS,
-	// BOUNDS or AMBS.
-	Clear(kind CapType)
-
-	// String return current capabilities state of the given capabilities
-	// set as string. The 'which' value should be one of EFFECTIVE,
-	// PERMITTED, INHERITABLE BOUNDING or AMBIENT
-	StringCap(which CapType) string
-
-	// String return current capabilities state as string.
-	String() string
-
-	// Load load actual capabilities value. This will overwrite all
-	// outstanding changes.
-	Load() error
-
-	// Apply apply the capabilities settings, so all changes will take
-	// effect.
-	Apply(kind CapType) error
-}
-
-// NewPid initializes a new Capabilities object for given pid when
-// it is nonzero, or for the current process if pid is 0.
-//
-// Deprecated: Replace with NewPid2.  For example, replace:
-//
-//    c, err := NewPid(0)
-//    if err != nil {
-//      return err
-//    }
-//
-// with:
-//
-//    c, err := NewPid2(0)
-//    if err != nil {
-//      return err
-//    }
-//    err = c.Load()
-//    if err != nil {
-//      return err
-//    }
-func NewPid(pid int) (Capabilities, error) {
-	c, err := newPid(pid)
-	if err != nil {
-		return c, err
-	}
-	err = c.Load()
-	return c, err
-}
-
-// NewPid2 initializes a new Capabilities object for given pid when
-// it is nonzero, or for the current process if pid is 0.  This
-// does not load the process's current capabilities; to do that you
-// must call Load explicitly.
-func NewPid2(pid int) (Capabilities, error) {
-	return newPid(pid)
-}
-
-// NewFile initializes a new Capabilities object for given file path.
-//
-// Deprecated: Replace with NewFile2.  For example, replace:
-//
-//    c, err := NewFile(path)
-//    if err != nil {
-//      return err
-//    }
-//
-// with:
-//
-//    c, err := NewFile2(path)
-//    if err != nil {
-//      return err
-//    }
-//    err = c.Load()
-//    if err != nil {
-//      return err
-//    }
-func NewFile(path string) (Capabilities, error) {
-	c, err := newFile(path)
-	if err != nil {
-		return c, err
-	}
-	err = c.Load()
-	return c, err
-}
-
-// NewFile2 creates a new initialized Capabilities object for given
-// file path.  This does not load the process's current capabilities;
-// to do that you must call Load explicitly.
-func NewFile2(path string) (Capabilities, error) {
-	return newFile(path)
-}
diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_noop.go b/vendor/github.com/syndtr/gocapability/capability/capability_noop.go
deleted file mode 100644
index 9bb3070c5..000000000
--- a/vendor/github.com/syndtr/gocapability/capability/capability_noop.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !linux
-
-package capability
-
-import "errors"
-
-func newPid(pid int) (Capabilities, error) {
-	return nil, errors.New("not supported")
-}
-
-func newFile(path string) (Capabilities, error) {
-	return nil, errors.New("not supported")
-}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/vendor/github.com/vbatts/tar-split/archive/tar/format.go
index 1f89d0c59..60977980c 100644
--- a/vendor/github.com/vbatts/tar-split/archive/tar/format.go
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/format.go
@@ -143,6 +143,10 @@ const (
 	blockSize  = 512 // Size of each block in a tar stream
 	nameSize   = 100 // Max length of the name field in USTAR format
 	prefixSize = 155 // Max length of the prefix field in USTAR format
+
+	// Max length of a special file (PAX header, GNU long name or link).
+	// This matches the limit used by libarchive.
+	maxSpecialFileSize = 1 << 20
 )
 
 // blockPadding computes the number of bytes needed to pad offset up to the
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
index 6a6b3e018..248a7ccb1 100644
--- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
@@ -144,7 +144,7 @@ func (tr *Reader) next() (*Header, error) {
 			continue // This is a meta header affecting the next header
 		case TypeGNULongName, TypeGNULongLink:
 			format.mayOnlyBe(FormatGNU)
-			realname, err := io.ReadAll(tr)
+			realname, err := readSpecialFile(tr)
 			if err != nil {
 				return nil, err
 			}
@@ -338,7 +338,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
 // parsePAX parses PAX headers.
 // If an extended header (type 'x') is invalid, ErrHeader is returned
 func parsePAX(r io.Reader) (map[string]string, error) {
-	buf, err := io.ReadAll(r)
+	buf, err := readSpecialFile(r)
 	if err != nil {
 		return nil, err
 	}
@@ -889,6 +889,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) {
 	return n, err
 }
 
+// readSpecialFile is like io.ReadAll except it returns
+// ErrFieldTooLong if more than maxSpecialFileSize is read.
+func readSpecialFile(r io.Reader) ([]byte, error) {
+	buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
+	if len(buf) > maxSpecialFileSize {
+		return nil, ErrFieldTooLong
+	}
+	return buf, err
+}
+
 // discard skips n bytes in r, reporting an error if unable to do so.
 func discard(tr *Reader, n int64) error {
 	var seekSkipped, copySkipped int64
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
index 214acaf58..5d6e6156b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -18,13 +18,6 @@ const (
 	WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
 )
 
-// Server HTTP metrics.
-const (
-	serverRequestSize  = "http.server.request.size"  // Incoming request bytes total
-	serverResponseSize = "http.server.response.size" // Incoming response bytes total
-	serverDuration     = "http.server.duration"      // Incoming end to end duration, milliseconds
-)
-
 // Client HTTP metrics.
 const (
 	clientRequestSize  = "http.client.request.size"  // Outgoing request bytes total
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
index f0a9bb9ef..a01bfafbe 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -8,6 +8,8 @@ import (
 	"net/http"
 	"net/http/httptrace"
 
+	"go.opentelemetry.io/otel/attribute"
+
 	"go.opentelemetry.io/otel"
 	"go.opentelemetry.io/otel/metric"
 	"go.opentelemetry.io/otel/propagation"
@@ -33,8 +35,9 @@ type config struct {
 	SpanNameFormatter func(string, *http.Request) string
 	ClientTrace       func(context.Context) *httptrace.ClientTrace
 
-	TracerProvider trace.TracerProvider
-	MeterProvider  metric.MeterProvider
+	TracerProvider     trace.TracerProvider
+	MeterProvider      metric.MeterProvider
+	MetricAttributesFn func(*http.Request) []attribute.KeyValue
 }
 
 // Option interface used for setting optional config properties.
@@ -194,3 +197,11 @@ func WithServerName(server string) Option {
 		c.ServerName = server
 	})
 }
+
+// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
+// These attributes will be included in metrics for every request.
+func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
+	return optionFunc(func(c *config) {
+		c.MetricAttributesFn = metricAttributesFn
+	})
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index d01bdccf4..33580a35b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -9,11 +9,9 @@ import (
 
 	"github.com/felixge/httpsnoop"
 
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
 	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
 	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/metric"
 	"go.opentelemetry.io/otel/propagation"
 	"go.opentelemetry.io/otel/trace"
 )
@@ -24,7 +22,6 @@ type middleware struct {
 	server    string
 
 	tracer            trace.Tracer
-	meter             metric.Meter
 	propagators       propagation.TextMapPropagator
 	spanStartOptions  []trace.SpanStartOption
 	readEvent         bool
@@ -34,10 +31,7 @@ type middleware struct {
 	publicEndpoint    bool
 	publicEndpointFn  func(*http.Request) bool
 
-	traceSemconv         semconv.HTTPServer
-	requestBytesCounter  metric.Int64Counter
-	responseBytesCounter metric.Int64Counter
-	serverLatencyMeasure metric.Float64Histogram
+	semconv semconv.HTTPServer
 }
 
 func defaultHandlerFormatter(operation string, _ *http.Request) string {
@@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han
 func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
 	h := middleware{
 		operation: operation,
-
-		traceSemconv: semconv.NewHTTPServer(),
 	}
 
 	defaultOpts := []Option{
@@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
 
 	c := newConfig(append(defaultOpts, opts...)...)
 	h.configure(c)
-	h.createMeasures()
 
 	return func(next http.Handler) http.Handler {
 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
 
 func (h *middleware) configure(c *config) {
 	h.tracer = c.Tracer
-	h.meter = c.Meter
 	h.propagators = c.Propagators
 	h.spanStartOptions = c.SpanStartOptions
 	h.readEvent = c.ReadEvent
@@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) {
 	h.publicEndpoint = c.PublicEndpoint
 	h.publicEndpointFn = c.PublicEndpointFn
 	h.server = c.ServerName
+	h.semconv = semconv.NewHTTPServer(c.Meter)
 }
 
 func handleErr(err error) {
@@ -96,30 +87,6 @@ func handleErr(err error) {
 	}
 }
 
-func (h *middleware) createMeasures() {
-	var err error
-	h.requestBytesCounter, err = h.meter.Int64Counter(
-		serverRequestSize,
-		metric.WithUnit("By"),
-		metric.WithDescription("Measures the size of HTTP request messages."),
-	)
-	handleErr(err)
-
-	h.responseBytesCounter, err = h.meter.Int64Counter(
-		serverResponseSize,
-		metric.WithUnit("By"),
-		metric.WithDescription("Measures the size of HTTP response messages."),
-	)
-	handleErr(err)
-
-	h.serverLatencyMeasure, err = h.meter.Float64Histogram(
-		serverDuration,
-		metric.WithUnit("ms"),
-		metric.WithDescription("Measures the duration of inbound HTTP requests."),
-	)
-	handleErr(err)
-}
-
 // serveHTTP sets up tracing and calls the given next http.Handler with the span
 // context injected into the request context.
 func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
@@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
 
 	ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
 	opts := []trace.SpanStartOption{
-		trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
+		trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
 	}
 
 	opts = append(opts, h.spanStartOptions...)
@@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
 		}
 	}
 
-	var bw bodyWrapper
 	// if request body is nil or NoBody, we don't want to mutate the body as it
 	// will affect the identity of it in an unforeseeable way because we assert
 	// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+	bw := request.NewBodyWrapper(r.Body, readRecordFunc)
 	if r.Body != nil && r.Body != http.NoBody {
-		bw.ReadCloser = r.Body
-		bw.record = readRecordFunc
-		r.Body = &bw
+		r.Body = bw
 	}
 
 	writeRecordFunc := func(int64) {}
@@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
 		}
 	}
 
-	rww := &respWriterWrapper{
-		ResponseWriter: w,
-		record:         writeRecordFunc,
-		ctx:            ctx,
-		props:          h.propagators,
-		statusCode:     http.StatusOK, // default status code in case the Handler doesn't write anything
-	}
+	rww := request.NewRespWriterWrapper(w, writeRecordFunc)
 
 	// Wrap w to use our ResponseWriter methods while also exposing
 	// other interfaces that w may implement (http.CloseNotifier,
@@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
 
 	next.ServeHTTP(w, r.WithContext(ctx))
 
-	span.SetStatus(semconv.ServerStatus(rww.statusCode))
-	span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
-		StatusCode: rww.statusCode,
-		ReadBytes:  bw.read.Load(),
-		ReadError:  bw.err,
-		WriteBytes: rww.written,
-		WriteError: rww.err,
+	statusCode := rww.StatusCode()
+	bytesWritten := rww.BytesWritten()
+	span.SetStatus(h.semconv.Status(statusCode))
+	span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
+		StatusCode: statusCode,
+		ReadBytes:  bw.BytesRead(),
+		ReadError:  bw.Error(),
+		WriteBytes: bytesWritten,
+		WriteError: rww.Error(),
 	})...)
 
-	// Add metrics
-	attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
-	if rww.statusCode > 0 {
-		attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
-	}
-	o := metric.WithAttributeSet(attribute.NewSet(attributes...))
-	addOpts := []metric.AddOption{o} // Allocate vararg slice once.
-	h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
-	h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
-
 	// Use floating point division here for higher precision (instead of Millisecond method).
 	elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
 
-	h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
+	h.semconv.RecordMetrics(ctx, semconv.MetricData{
+		ServerName:           h.server,
+		Req:                  r,
+		StatusCode:           statusCode,
+		AdditionalAttributes: labeler.Get(),
+		RequestSize:          bw.BytesRead(),
+		ResponseSize:         bytesWritten,
+		ElapsedTime:          elapsedTime,
+	})
 }
 
 // WithRouteTag annotates spans and metrics with the provided route name
 // with HTTP route attribute.
 func WithRouteTag(route string, h http.Handler) http.Handler {
-	attr := semconv.NewHTTPServer().Route(route)
+	attr := semconv.NewHTTPServer(nil).Route(route)
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		span := trace.SpanFromContext(r.Context())
 		span.SetAttributes(attr)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
new file mode 100644
index 000000000..a945f5566
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+	"io"
+	"sync"
+)
+
+var _ io.ReadCloser = &BodyWrapper{}
+
+// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type BodyWrapper struct {
+	io.ReadCloser
+	OnRead func(n int64) // must not be nil
+
+	mu   sync.Mutex
+	read int64
+	err  error
+}
+
+// NewBodyWrapper creates a new BodyWrapper.
+//
+// The onRead attribute is a callback that will be called every time the data
+// is read, with the number of bytes being read.
+func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
+	return &BodyWrapper{
+		ReadCloser: body,
+		OnRead:     onRead,
+	}
+}
+
+// Read reads the data from the io.ReadCloser, and stores the number of bytes
+// read and the error.
+func (w *BodyWrapper) Read(b []byte) (int, error) {
+	n, err := w.ReadCloser.Read(b)
+	n1 := int64(n)
+
+	w.updateReadData(n1, err)
+	w.OnRead(n1)
+	return n, err
+}
+
+func (w *BodyWrapper) updateReadData(n int64, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	w.read += n
+	if err != nil {
+		w.err = err
+	}
+}
+
+// Closes closes the io.ReadCloser.
+func (w *BodyWrapper) Close() error {
+	return w.ReadCloser.Close()
+}
+
+// BytesRead returns the number of bytes read up to this point.
+func (w *BodyWrapper) BytesRead() int64 {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	return w.read
+}
+
+// Error returns the last error.
+func (w *BodyWrapper) Error() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
new file mode 100644
index 000000000..aea171fb2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
@@ -0,0 +1,112 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+	"net/http"
+	"sync"
+)
+
+var _ http.ResponseWriter = &RespWriterWrapper{}
+
+// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
+// that may be useful when using it in real life situations.
+type RespWriterWrapper struct {
+	http.ResponseWriter
+	OnWrite func(n int64) // must not be nil
+
+	mu          sync.RWMutex
+	written     int64
+	statusCode  int
+	err         error
+	wroteHeader bool
+}
+
+// NewRespWriterWrapper creates a new RespWriterWrapper.
+//
+// The onWrite attribute is a callback that will be called every time the data
+// is written, with the number of bytes that were written.
+func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
+	return &RespWriterWrapper{
+		ResponseWriter: w,
+		OnWrite:        onWrite,
+		statusCode:     http.StatusOK, // default status code in case the Handler doesn't write anything
+	}
+}
+
+// Write writes the bytes array into the [ResponseWriter], and tracks the
+// number of bytes written and last error.
+func (w *RespWriterWrapper) Write(p []byte) (int, error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	w.writeHeader(http.StatusOK)
+
+	n, err := w.ResponseWriter.Write(p)
+	n1 := int64(n)
+	w.OnWrite(n1)
+	w.written += n1
+	w.err = err
+	return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *RespWriterWrapper) WriteHeader(statusCode int) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	w.writeHeader(statusCode)
+}
+
+// writeHeader persists the status code for span attribution, and propagates
+// the call to the underlying ResponseWriter.
+// It does not acquire a lock, and therefore assumes that is being handled by a
+// parent method.
+func (w *RespWriterWrapper) writeHeader(statusCode int) {
+	if !w.wroteHeader {
+		w.wroteHeader = true
+		w.statusCode = statusCode
+	}
+	w.ResponseWriter.WriteHeader(statusCode)
+}
+
+// Flush implements [http.Flusher].
+func (w *RespWriterWrapper) Flush() {
+	w.WriteHeader(http.StatusOK)
+
+	if f, ok := w.ResponseWriter.(http.Flusher); ok {
+		f.Flush()
+	}
+}
+
+// BytesWritten returns the number of bytes written.
+func (w *RespWriterWrapper) BytesWritten() int64 {
+	w.mu.RLock()
+	defer w.mu.RUnlock()
+
+	return w.written
+}
+
+// BytesWritten returns the HTTP status code that was sent.
+func (w *RespWriterWrapper) StatusCode() int {
+	w.mu.RLock()
+	defer w.mu.RUnlock()
+
+	return w.statusCode
+}
+
+// Error returns the last error.
+func (w *RespWriterWrapper) Error() error {
+	w.mu.RLock()
+	defer w.mu.RUnlock()
+
+	return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
index 3ec0ad00c..9cae4cab8 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -4,6 +4,7 @@
 package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"os"
@@ -11,6 +12,7 @@ import (
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/metric"
 )
 
 type ResponseTelemetry struct {
@@ -23,6 +25,11 @@ type ResponseTelemetry struct {
 
 type HTTPServer struct {
 	duplicate bool
+
+	// Old metrics
+	requestBytesCounter  metric.Int64Counter
+	responseBytesCounter metric.Int64Counter
+	serverLatencyMeasure metric.Float64Histogram
 }
 
 // RequestTraceAttrs returns trace attributes for an HTTP request received by a
@@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue {
 	return oldHTTPServer{}.Route(route)
 }
 
-func NewHTTPServer() HTTPServer {
-	env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
-	return HTTPServer{duplicate: env == "http/dup"}
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
+// Status returns a span status code and message for an HTTP status code
 // value returned by a server. Status codes in the 400-499 range are not
 // returned as errors.
-func ServerStatus(code int) (codes.Code, string) {
+func (s HTTPServer) Status(code int) (codes.Code, string) {
 	if code < 100 || code >= 600 {
 		return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
 	}
@@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) {
 	}
 	return codes.Unset, ""
 }
+
+type MetricData struct {
+	ServerName           string
+	Req                  *http.Request
+	StatusCode           int
+	AdditionalAttributes []attribute.KeyValue
+
+	RequestSize  int64
+	ResponseSize int64
+	ElapsedTime  float64
+}
+
+func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) {
+	if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
+		// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
+		return
+	}
+
+	attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+	o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+	addOpts := []metric.AddOption{o} // Allocate vararg slice once.
+	s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
+	s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
+	s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
+
+	// TODO: Duplicate Metrics
+}
+
+func NewHTTPServer(meter metric.Meter) HTTPServer {
+	env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
+	duplicate := env == "http/dup"
+	server := HTTPServer{
+		duplicate: duplicate,
+	}
+	server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
+	return server
+}
+
+type HTTPClient struct {
+	duplicate bool
+}
+
+func NewHTTPClient() HTTPClient {
+	env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
+	return HTTPClient{duplicate: env == "http/dup"}
+}
+
+// RequestTraceAttrs returns attributes for an HTTP request made by a client.
+func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+	if c.duplicate {
+		return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
+	}
+	return oldHTTPClient{}.RequestTraceAttrs(req)
+}
+
+// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
+func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+	if c.duplicate {
+		return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
+	}
+
+	return oldHTTPClient{}.ResponseTraceAttrs(resp)
+}
+
+func (c HTTPClient) Status(code int) (codes.Code, string) {
+	if code < 100 || code >= 600 {
+		return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+	}
+	if code >= 400 {
+		return codes.Error, ""
+	}
+	return codes.Unset, ""
+}
+
+func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
+	if c.duplicate {
+		return newHTTPClient{}.ErrorType(err)
+	}
+
+	return attribute.KeyValue{}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
similarity index 57%
rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go
rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
index 0c5d4c460..745b8c67b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
@@ -4,11 +4,14 @@
 package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
 
 import (
+	"fmt"
 	"net/http"
+	"reflect"
+	"strconv"
 	"strings"
 
 	"go.opentelemetry.io/otel/attribute"
-	semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
+	semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
 )
 
 type newHTTPServer struct{}
@@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
 func (n newHTTPServer) Route(route string) attribute.KeyValue {
 	return semconvNew.HTTPRoute(route)
 }
+
+type newHTTPClient struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
+func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+	/*
+	   below attributes are returned:
+	   - http.request.method
+	   - http.request.method.original
+	   - url.full
+	   - server.address
+	   - server.port
+	   - network.protocol.name
+	   - network.protocol.version
+	*/
+	numOfAttributes := 3 // URL, server address, proto, and method.
+
+	var urlHost string
+	if req.URL != nil {
+		urlHost = req.URL.Host
+	}
+	var requestHost string
+	var requestPort int
+	for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
+		requestHost, requestPort = splitHostPort(hostport)
+		if requestHost != "" || requestPort > 0 {
+			break
+		}
+	}
+
+	eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+	if eligiblePort > 0 {
+		numOfAttributes++
+	}
+	useragent := req.UserAgent()
+	if useragent != "" {
+		numOfAttributes++
+	}
+
+	protoName, protoVersion := netProtocol(req.Proto)
+	if protoName != "" && protoName != "http" {
+		numOfAttributes++
+	}
+	if protoVersion != "" {
+		numOfAttributes++
+	}
+
+	method, originalMethod := n.method(req.Method)
+	if originalMethod != (attribute.KeyValue{}) {
+		numOfAttributes++
+	}
+
+	attrs := make([]attribute.KeyValue, 0, numOfAttributes)
+
+	attrs = append(attrs, method)
+	if originalMethod != (attribute.KeyValue{}) {
+		attrs = append(attrs, originalMethod)
+	}
+
+	var u string
+	if req.URL != nil {
+		// Remove any username/password info that may be in the URL.
+		userinfo := req.URL.User
+		req.URL.User = nil
+		u = req.URL.String()
+		// Restore any username/password info that was removed.
+		req.URL.User = userinfo
+	}
+	attrs = append(attrs, semconvNew.URLFull(u))
+
+	attrs = append(attrs, semconvNew.ServerAddress(requestHost))
+	if eligiblePort > 0 {
+		attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
+	}
+
+	if protoName != "" && protoName != "http" {
+		attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+	}
+	if protoVersion != "" {
+		attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+	}
+
+	return attrs
+}
+
+// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
+func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+	/*
+	   below attributes are returned:
+	   - http.response.status_code
+	   - error.type
+	*/
+	var count int
+	if resp.StatusCode > 0 {
+		count++
+	}
+
+	if isErrorStatusCode(resp.StatusCode) {
+		count++
+	}
+
+	attrs := make([]attribute.KeyValue, 0, count)
+	if resp.StatusCode > 0 {
+		attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
+	}
+
+	if isErrorStatusCode(resp.StatusCode) {
+		errorType := strconv.Itoa(resp.StatusCode)
+		attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
+	}
+	return attrs
+}
+
+func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
+	t := reflect.TypeOf(err)
+	var value string
+	if t.PkgPath() == "" && t.Name() == "" {
+		// Likely a builtin type.
+		value = t.String()
+	} else {
+		value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+	}
+
+	if value == "" {
+		return semconvNew.ErrorTypeOther
+	}
+
+	return semconvNew.ErrorTypeKey.String(value)
+}
+
+func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+	if method == "" {
+		return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+	}
+	if attr, ok := methodLookup[method]; ok {
+		return attr, attribute.KeyValue{}
+	}
+
+	orig := semconvNew.HTTPRequestMethodOriginal(method)
+	if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+		return attr, orig
+	}
+	return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func isErrorStatusCode(code int) bool {
+	return code >= 400 || code < 100
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
index e7f293761..e6e14924f 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -9,8 +9,9 @@ import (
 	"strconv"
 	"strings"
 
+	"go.opentelemetry.io/otel"
 	"go.opentelemetry.io/otel/attribute"
-	semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
+	semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
 )
 
 // splitHostPort splits a network address hostport of the form "host",
@@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) {
 	if err != nil {
 		return
 	}
-	return host, int(p)
+	return host, int(p) // nolint: gosec  // Byte size checked 16 above.
 }
 
 func requiredHTTPPort(https bool, port int) int { // nolint:revive
@@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{
 	http.MethodPut:     semconvNew.HTTPRequestMethodPut,
 	http.MethodTrace:   semconvNew.HTTPRequestMethodTrace,
 }
+
+func handleErr(err error) {
+	if err != nil {
+		otel.Handle(err)
+	}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
index c3e838aaa..c999b05e6 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
@@ -7,9 +7,13 @@ import (
 	"errors"
 	"io"
 	"net/http"
+	"slices"
+	"strings"
 
 	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
 	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/noop"
 	semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
 )
 
@@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue {
 func HTTPStatusCode(status int) attribute.KeyValue {
 	return semconv.HTTPStatusCode(status)
 }
+
+// Server HTTP metrics.
+const (
+	serverRequestSize  = "http.server.request.size"  // Incoming request bytes total
+	serverResponseSize = "http.server.response.size" // Incoming response bytes total
+	serverDuration     = "http.server.duration"      // Incoming end to end duration, milliseconds
+)
+
+func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
+	if meter == nil {
+		return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
+	}
+	var err error
+	requestBytesCounter, err := meter.Int64Counter(
+		serverRequestSize,
+		metric.WithUnit("By"),
+		metric.WithDescription("Measures the size of HTTP request messages."),
+	)
+	handleErr(err)
+
+	responseBytesCounter, err := meter.Int64Counter(
+		serverResponseSize,
+		metric.WithUnit("By"),
+		metric.WithDescription("Measures the size of HTTP response messages."),
+	)
+	handleErr(err)
+
+	serverLatencyMeasure, err := meter.Float64Histogram(
+		serverDuration,
+		metric.WithUnit("ms"),
+		metric.WithDescription("Measures the duration of inbound HTTP requests."),
+	)
+	handleErr(err)
+
+	return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
+}
+
+func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+	n := len(additionalAttributes) + 3
+	var host string
+	var p int
+	if server == "" {
+		host, p = splitHostPort(req.Host)
+	} else {
+		// Prioritize the primary server name.
+		host, p = splitHostPort(server)
+		if p < 0 {
+			_, p = splitHostPort(req.Host)
+		}
+	}
+	hostPort := requiredHTTPPort(req.TLS != nil, p)
+	if hostPort > 0 {
+		n++
+	}
+	protoName, protoVersion := netProtocol(req.Proto)
+	if protoName != "" {
+		n++
+	}
+	if protoVersion != "" {
+		n++
+	}
+
+	if statusCode > 0 {
+		n++
+	}
+
+	attributes := slices.Grow(additionalAttributes, n)
+	attributes = append(attributes,
+		o.methodMetric(req.Method),
+		o.scheme(req.TLS != nil),
+		semconv.NetHostName(host))
+
+	if hostPort > 0 {
+		attributes = append(attributes, semconv.NetHostPort(hostPort))
+	}
+	if protoName != "" {
+		attributes = append(attributes, semconv.NetProtocolName(protoName))
+	}
+	if protoVersion != "" {
+		attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
+	}
+
+	if statusCode > 0 {
+		attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+	}
+	return attributes
+}
+
+func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue {
+	method = strings.ToUpper(method)
+	switch method {
+	case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+	default:
+		method = "_OTHER"
+	}
+	return semconv.HTTPMethod(method)
+}
+
+func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+	if https {
+		return semconv.HTTPSchemeHTTPS
+	}
+	return semconv.HTTPSchemeHTTP
+}
+
+type oldHTTPClient struct{}
+
+func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+	return semconvutil.HTTPClientRequest(req)
+}
+
+func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+	return semconvutil.HTTPClientResponse(resp)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
index a9a9226b3..b80a1db61 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
 	if err != nil {
 		return
 	}
-	return host, int(p)
+	return host, int(p) // nolint: gosec  // Bitsize checked to be 16 above.
 }
 
 func netProtocol(proto string) (name string, version string) {
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
index 0d3cb2e4a..b4119d343 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -11,13 +11,15 @@ import (
 	"sync/atomic"
 	"time"
 
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
 	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
 	"go.opentelemetry.io/otel"
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/codes"
 	"go.opentelemetry.io/otel/metric"
 	"go.opentelemetry.io/otel/propagation"
-	semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+
 	"go.opentelemetry.io/otel/trace"
 )
 
@@ -26,14 +28,16 @@ import (
 type Transport struct {
 	rt http.RoundTripper
 
-	tracer            trace.Tracer
-	meter             metric.Meter
-	propagators       propagation.TextMapPropagator
-	spanStartOptions  []trace.SpanStartOption
-	filters           []Filter
-	spanNameFormatter func(string, *http.Request) string
-	clientTrace       func(context.Context) *httptrace.ClientTrace
+	tracer             trace.Tracer
+	meter              metric.Meter
+	propagators        propagation.TextMapPropagator
+	spanStartOptions   []trace.SpanStartOption
+	filters            []Filter
+	spanNameFormatter  func(string, *http.Request) string
+	clientTrace        func(context.Context) *httptrace.ClientTrace
+	metricAttributesFn func(*http.Request) []attribute.KeyValue
 
+	semconv              semconv.HTTPClient
 	requestBytesCounter  metric.Int64Counter
 	responseBytesCounter metric.Int64Counter
 	latencyMeasure       metric.Float64Histogram
@@ -53,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
 	}
 
 	t := Transport{
-		rt: base,
+		rt:      base,
+		semconv: semconv.NewHTTPClient(),
 	}
 
 	defaultOpts := []Option{
@@ -76,6 +81,7 @@ func (t *Transport) applyConfig(c *config) {
 	t.filters = c.Filters
 	t.spanNameFormatter = c.SpanNameFormatter
 	t.clientTrace = c.ClientTrace
+	t.metricAttributesFn = c.MetricAttributesFn
 }
 
 func (t *Transport) createMeasures() {
@@ -143,45 +149,49 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
 
 	r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
 
-	// use a body wrapper to determine the request size
-	var bw bodyWrapper
 	// if request body is nil or NoBody, we don't want to mutate the body as it
 	// will affect the identity of it in an unforeseeable way because we assert
 	// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+	bw := request.NewBodyWrapper(r.Body, func(int64) {})
 	if r.Body != nil && r.Body != http.NoBody {
-		bw.ReadCloser = r.Body
-		// noop to prevent nil panic. not using this record fun yet.
-		bw.record = func(int64) {}
-		r.Body = &bw
+		r.Body = bw
 	}
 
-	span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
+	span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
 	t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
 
 	res, err := t.rt.RoundTrip(r)
 	if err != nil {
-		span.RecordError(err)
+		// set error type attribute if the error is part of the predefined
+		// error types.
+		// otherwise, record it as an exception
+		if errType := t.semconv.ErrorType(err); errType.Valid() {
+			span.SetAttributes(errType)
+		} else {
+			span.RecordError(err)
+		}
+
 		span.SetStatus(codes.Error, err.Error())
 		span.End()
 		return res, err
 	}
 
 	// metrics
-	metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
+	metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...)
 	if res.StatusCode > 0 {
 		metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
 	}
 	o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...))
-	addOpts := []metric.AddOption{o} // Allocate vararg slice once.
-	t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
+
+	t.requestBytesCounter.Add(ctx, bw.BytesRead(), o)
 	// For handling response bytes we leverage a callback when the client reads the http response
 	readRecordFunc := func(n int64) {
-		t.responseBytesCounter.Add(ctx, n, addOpts...)
+		t.responseBytesCounter.Add(ctx, n, o)
 	}
 
 	// traces
-	span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
-	span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
+	span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
+	span.SetStatus(t.semconv.Status(res.StatusCode))
 
 	res.Body = newWrappedBody(span, readRecordFunc, res.Body)
 
@@ -193,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
 	return res, err
 }
 
+func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
+	var attributeForRequest []attribute.KeyValue
+	if t.metricAttributesFn != nil {
+		attributeForRequest = t.metricAttributesFn(r)
+	}
+	return attributeForRequest
+}
+
 // newWrappedBody returns a new and appropriately scoped *wrappedBody as an
 // io.ReadCloser. If the passed body implements io.Writer, the returned value
 // will implement io.ReadWriteCloser.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index b0957f28c..502c1bdaf 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
 
 // Version is the current release version of the otelhttp instrumentation.
 func Version() string {
-	return "0.53.0"
+	return "0.54.0"
 	// This string is updated by the pre_release.sh script during release
 }
 
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
deleted file mode 100644
index 948f8406c..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
-	"context"
-	"io"
-	"net/http"
-	"sync/atomic"
-
-	"go.opentelemetry.io/otel/propagation"
-)
-
-var _ io.ReadCloser = &bodyWrapper{}
-
-// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
-// of bytes read and the last error.
-type bodyWrapper struct {
-	io.ReadCloser
-	record func(n int64) // must not be nil
-
-	read atomic.Int64
-	err  error
-}
-
-func (w *bodyWrapper) Read(b []byte) (int, error) {
-	n, err := w.ReadCloser.Read(b)
-	n1 := int64(n)
-	w.read.Add(n1)
-	w.err = err
-	w.record(n1)
-	return n, err
-}
-
-func (w *bodyWrapper) Close() error {
-	return w.ReadCloser.Close()
-}
-
-var _ http.ResponseWriter = &respWriterWrapper{}
-
-// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
-// bytes written, the last error, and to catch the first written statusCode.
-// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
-// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
-// that may be useful when using it in real life situations.
-type respWriterWrapper struct {
-	http.ResponseWriter
-	record func(n int64) // must not be nil
-
-	// used to inject the header
-	ctx context.Context
-
-	props propagation.TextMapPropagator
-
-	written     int64
-	statusCode  int
-	err         error
-	wroteHeader bool
-}
-
-func (w *respWriterWrapper) Header() http.Header {
-	return w.ResponseWriter.Header()
-}
-
-func (w *respWriterWrapper) Write(p []byte) (int, error) {
-	if !w.wroteHeader {
-		w.WriteHeader(http.StatusOK)
-	}
-	n, err := w.ResponseWriter.Write(p)
-	n1 := int64(n)
-	w.record(n1)
-	w.written += n1
-	w.err = err
-	return n, err
-}
-
-// WriteHeader persists initial statusCode for span attribution.
-// All calls to WriteHeader will be propagated to the underlying ResponseWriter
-// and will persist the statusCode from the first call.
-// Blocking consecutive calls to WriteHeader alters expected behavior and will
-// remove warning logs from net/http where developers will notice incorrect handler implementations.
-func (w *respWriterWrapper) WriteHeader(statusCode int) {
-	if !w.wroteHeader {
-		w.wroteHeader = true
-		w.statusCode = statusCode
-	}
-	w.ResponseWriter.WriteHeader(statusCode)
-}
-
-func (w *respWriterWrapper) Flush() {
-	if !w.wroteHeader {
-		w.WriteHeader(http.StatusOK)
-	}
-
-	if f, ok := w.ResponseWriter.(http.Flusher); ok {
-		f.Flush()
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 6d9c8b649..d09555506 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -9,6 +9,8 @@ linters:
   disable-all: true
   # Specifically enable linters we want to use.
   enable:
+    - asasalint
+    - bodyclose
     - depguard
     - errcheck
     - errorlint
@@ -23,6 +25,7 @@ linters:
     - revive
     - staticcheck
     - tenv
+    - testifylint
     - typecheck
     - unconvert
     - unused
@@ -62,12 +65,12 @@ issues:
     - path: _test\.go
       linters:
         - gosec
-    # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+    # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
     # as we commonly use it in tests and examples.
     - text: "G404:"
       linters:
         - gosec
-    # Igonoring gosec G402: TLS MinVersion too low
+    # Ignoring gosec G402: TLS MinVersion too low
     # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
     - text: "G402: TLS MinVersion too low."
       linters:
@@ -300,3 +303,9 @@ linters-settings:
       # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
       - name: waitgroup-by-value
         disabled: false
+  testifylint:
+    enable-all: true
+    disable:
+      - float-compare
+      - go-require
+      - require-error
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index c01e6998e..4b361d026 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,112 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
 
 ## [Unreleased]
 
+<!-- Released section -->
+<!-- Don't change this section unless doing release -->
+
+## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
+- Add `WithExportBufferSize` option to log batch processor.(#5877)
+
+### Changed
+
+- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
+- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
+- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
+- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
+- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
+- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
+- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
+
+### Deprecated
+
+- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
+
+### Fixed
+
+- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
+- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
+- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
+- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
+- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
+
+## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
+
+### Added
+
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
+- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
+
+### Fixed
+
+- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
+- Fix panic on instruments creation when setting meter provider. (#5758)
+- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
+
+### Removed
+
+- Drop support for [Go 1.21]. (#5736, #5740, #5800)
+
+## [1.29.0/0.51.0/0.5.0] 2024-08-23
+
+This release is the last to support [Go 1.21].
+The next release will require at least [Go 1.22].
+
+### Added
+
+- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
+- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
+- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
+  This new module contains an OTLP exporter that transmits log telemetry using gRPC.
+  This module is unstable and breaking changes may be introduced.
+  See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
+- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
+- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
+- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
+- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
+  This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
+  It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
+  It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
+- Support [Go 1.23]. (#5720)
+
+### Changed
+
+- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
+- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
+- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
+- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
+- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
+- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
+  See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
+- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+
+### Fixed
+
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
+- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
+- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
+- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
+- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
+- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+
+### Removed
+
+- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+
 ## [1.28.0/0.50.0/0.4.0] 2024-07-02
 
 ### Added
@@ -49,6 +155,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
 - Fix stale timestamps reported by the last-value aggregation. (#5517)
 - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
 - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
+- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
 
 ## [1.27.0/0.49.0/0.3.0] 2024-05-21
 
@@ -175,7 +282,7 @@ The next release will require at least [Go 1.21].
   This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
   This module is in an alpha state, it is subject to breaking changes.
   See our [versioning policy](./VERSIONING.md) for more info. (#4961)
-- ARM64 platform to the compatibility testing suite. (#4994)
+- Add ARM64 platform to the compatibility testing suite. (#4994)
 
 ### Fixed
 
@@ -1836,7 +1943,7 @@ with major version 0.
 - Setting error status while recording error with Span from oteltest package. (#1729)
 - The concept of a remote and local Span stored in a context is unified to just the current Span.
   Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
-  Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
+  Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
   If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
 - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
   This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
@@ -2410,7 +2517,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
 - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
 - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
 - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
-- Update otel-colector example to use the v0.5.0 collector. (#915)
+- Update otel-collector example to use the v0.5.0 collector. (#915)
 - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
 - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
 - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
@@ -3003,7 +3110,10 @@ It contains api and sdk for trace and meter.
 - CircleCI build CI manifest files.
 - CODEOWNERS file to track owners of this project.
 
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD
+[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
+[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
+[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
@@ -3086,6 +3196,9 @@ It contains api and sdk for trace and meter.
 [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
 
+<!-- Released section ended -->
+
+[Go 1.23]: https://go.dev/doc/go1.23
 [Go 1.22]: https://go.dev/doc/go1.22
 [Go 1.21]: https://go.dev/doc/go1.21
 [Go 1.20]: https://go.dev/doc/go1.20
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 202554933..945a07d2b 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -5,13 +5,13 @@
 #####################################################
 #
 # Learn about membership in OpenTelemetry community:
-#  https://github.com/open-telemetry/community/blob/main/community-membership.md
+#  https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
 #
 #
 # Learn about CODEOWNERS file format:
 #  https://help.github.com/en/articles/about-code-owners
 #
 
-* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+* @MrAlias @XSAM @dashpole @pellared @dmathieu
 
-CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu
+CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index b86572f58..bb3396557 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -578,7 +578,10 @@ See also:
 The tests should never leak goroutines.
 
 Use the term `ConcurrentSafe` in the test name when it aims to verify the
-absence of race conditions.
+absence of race conditions. The top-level tests with this term will be run
+many times in the `test-concurrent-safe` CI job to increase the chance of
+catching concurrency issues. This does not apply to subtests when this term
+is not in their root name.
 
 ### Internal packages
 
@@ -628,11 +631,8 @@ should be canceled.
 
 ### Approvers
 
-- [Chester Cheung](https://github.com/hanyuancheung), Tencent
-
 ### Maintainers
 
-- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
 - [Damien Mathieu](https://github.com/dmathieu), Elastic
 - [David Ashpole](https://github.com/dashpole), Google
 - [Robert Pająk](https://github.com/pellared), Splunk
@@ -641,16 +641,18 @@ should be canceled.
 
 ### Emeritus
 
-- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
-- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
-- [Josh MacDonald](https://github.com/jmacd), LightStep
+- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
 - [Anthony Mirabella](https://github.com/Aneurysm9), AWS
+- [Chester Cheung](https://github.com/hanyuancheung), Tencent
 - [Evan Torrie](https://github.com/evantorrie), Yahoo
+- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
+- [Josh MacDonald](https://github.com/jmacd), LightStep
+- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
 
 ### Become an Approver or a Maintainer
 
 See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
+repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
 
 [Approver]: #approvers
 [Maintainer]: #maintainers
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index f33619f76..a1228a212 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
 PORTO = $(TOOLS)/porto
 $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
 
-GOJQ = $(TOOLS)/gojq
-$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
-
 GOTMPL = $(TOOLS)/gotmpl
 $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
 
@@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
 $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
 
 .PHONY: tools
-tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
 
 # Virtualized python tools via docker
 
@@ -145,12 +142,14 @@ build-tests/%:
 
 # Tests
 
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
 .PHONY: $(TEST_TARGETS) test
 test-default test-race: ARGS=-race
 test-bench:   ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
 test-short:   ARGS=-short
 test-verbose: ARGS=-v -race
+test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
+test-concurrent-safe: TIMEOUT=120
 $(TEST_TARGETS): test
 test: $(OTEL_GO_MOD_DIRS:%=test/%)
 test/%: DIR=$*
@@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE)
 	done; \
 	$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
 
-# Adding a directory will include all benchmarks in that directory if a filter is not specified.
-BENCHMARK_TARGETS := sdk/trace
 .PHONY: benchmark
-benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
-BENCHMARK_FILTER = .
-# You can override the filter for a particular directory by adding a rule here.
-benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
+benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
 benchmark/%:
-	@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
+	@echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
 		&& cd $* \
-		$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
+		&& $(GO) list ./... \
+		| grep -v third_party \
+		| xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
 
 .PHONY: golangci-lint golangci-lint-fix
 golangci-lint-fix: ARGS=--fix
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 5a8909317..efec27890 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner:
 
 Currently, this project supports the following environments.
 
-| OS      | Go Version | Architecture |
-|---------|------------|--------------|
-| Ubuntu  | 1.22       | amd64        |
-| Ubuntu  | 1.21       | amd64        |
-| Ubuntu  | 1.22       | 386          |
-| Ubuntu  | 1.21       | 386          |
-| Linux   | 1.22       | arm64        |
-| Linux   | 1.21       | arm64        |
-| MacOS   | 1.22       | amd64        |
-| MacOS   | 1.21       | amd64        |
-| Windows | 1.22       | amd64        |
-| Windows | 1.21       | amd64        |
-| Windows | 1.22       | 386          |
-| Windows | 1.21       | 386          |
+| OS       | Go Version | Architecture |
+|----------|------------|--------------|
+| Ubuntu   | 1.23       | amd64        |
+| Ubuntu   | 1.22       | amd64        |
+| Ubuntu   | 1.23       | 386          |
+| Ubuntu   | 1.22       | 386          |
+| Linux    | 1.23       | arm64        |
+| Linux    | 1.22       | arm64        |
+| macOS 13 | 1.23       | amd64        |
+| macOS 13 | 1.22       | amd64        |
+| macOS    | 1.23       | arm64        |
+| macOS    | 1.22       | arm64        |
+| Windows  | 1.23       | amd64        |
+| Windows  | 1.22       | amd64        |
+| Windows  | 1.23       | 386          |
+| Windows  | 1.22       | 386          |
 
 While this project should work for other systems, no compatibility guarantees
 are made for those systems currently.
@@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want
 to build your own instrumentation for your application directly you will need
 to use the
 [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
-package. The included [examples](./example/) are a good way to see some
-practical uses of this process.
+package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
+are a good way to see some practical uses of this process.
 
 ### Export
 
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 940f57f3d..ffa9b6125 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
        ```
 
    - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
+   - Make sure the new section is under the comment for released section, like `<!-- Released section -->`, so it is protected from being overwritten in the future.
    - Update all the appropriate links at the bottom.
 
 4. Push the changes to upstream and create a Pull Request on GitHub.
@@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct.
 Finally create a Release for the new `<new tag>` on GitHub.
 The release body should include all the release notes from the Changelog for this release.
 
-## Verify Examples
-
-After releasing verify that examples build outside of the repository.
-
-```
-./verify_examples.sh
-```
-
-The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
-This ensures they build with the published release, not the local copy.
-
 ## Post-Release
 
 ### Contrib Repository
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index bff9c7fdb..6cbefcead 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
 func computeDistinctFixed(kvs []KeyValue) interface{} {
 	switch len(kvs) {
 	case 1:
-		ptr := new([1]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [1]KeyValue(kvs)
 	case 2:
-		ptr := new([2]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [2]KeyValue(kvs)
 	case 3:
-		ptr := new([3]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [3]KeyValue(kvs)
 	case 4:
-		ptr := new([4]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [4]KeyValue(kvs)
 	case 5:
-		ptr := new([5]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [5]KeyValue(kvs)
 	case 6:
-		ptr := new([6]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [6]KeyValue(kvs)
 	case 7:
-		ptr := new([7]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [7]KeyValue(kvs)
 	case 8:
-		ptr := new([8]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [8]KeyValue(kvs)
 	case 9:
-		ptr := new([9]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [9]KeyValue(kvs)
 	case 10:
-		ptr := new([10]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
+		return [10]KeyValue(kvs)
 	default:
 		return nil
 	}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index c40c896cc..36f536703 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -44,9 +44,15 @@ type Property struct {
 
 // NewKeyProperty returns a new Property for key.
 //
+// The passed key must be valid, non-empty UTF-8 string.
 // If key is invalid, an error will be returned.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
 func NewKeyProperty(key string) (Property, error) {
-	if !validateKey(key) {
+	if !validateBaggageName(key) {
 		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
 	}
 
@@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) {
 // Notice: Consider using [NewKeyValuePropertyRaw] instead
 // that does not require percent-encoding of the value.
 func NewKeyValueProperty(key, value string) (Property, error) {
+	if !validateKey(key) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+
 	if !validateValue(value) {
 		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
 	}
@@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) {
 
 // NewKeyValuePropertyRaw returns a new Property for key with value.
 //
-// The passed key must be compliant with W3C Baggage specification.
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
 func NewKeyValuePropertyRaw(key, value string) (Property, error) {
-	if !validateKey(key) {
+	if !validateBaggageName(key) {
 		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
 	}
+	if !validateBaggageValue(value) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
 
 	p := Property{
 		key:      key,
@@ -115,12 +134,15 @@ func (p Property) validate() error {
 		return fmt.Errorf("invalid property: %w", err)
 	}
 
-	if !validateKey(p.key) {
+	if !validateBaggageName(p.key) {
 		return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
 	}
 	if !p.hasValue && p.value != "" {
 		return errFunc(errors.New("inconsistent value"))
 	}
+	if p.hasValue && !validateBaggageValue(p.value) {
+		return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
+	}
 	return nil
 }
 
@@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) {
 
 // String encodes Property into a header string compliant with the W3C Baggage
 // specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
 func (p Property) String() string {
+	//  W3C Baggage specification does not allow percent-encoded keys.
+	if !validateKey(p.key) {
+		return ""
+	}
+
 	if p.hasValue {
 		return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
 	}
@@ -203,9 +233,14 @@ func (p properties) validate() error {
 // String encodes properties into a header string compliant with the W3C Baggage
 // specification.
 func (p properties) String() string {
-	props := make([]string, len(p))
-	for i, prop := range p {
-		props[i] = prop.String()
+	props := make([]string, 0, len(p))
+	for _, prop := range p {
+		s := prop.String()
+
+		// Ignored empty properties.
+		if s != "" {
+			props = append(props, s)
+		}
 	}
 	return strings.Join(props, propertyDelimiter)
 }
@@ -230,6 +265,10 @@ type Member struct {
 // Notice: Consider using [NewMemberRaw] instead
 // that does not require percent-encoding of the value.
 func NewMember(key, value string, props ...Property) (Member, error) {
+	if !validateKey(key) {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+
 	if !validateValue(value) {
 		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
 	}
@@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) {
 
 // NewMemberRaw returns a new Member from the passed arguments.
 //
-// The passed key must be compliant with W3C Baggage specification.
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on baggage key.
+// For example, the W3C Baggage specification restricts the baggage keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
 func NewMemberRaw(key, value string, props ...Property) (Member, error) {
 	m := Member{
 		key:        key,
@@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) {
 		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
 	}
 
-	val := strings.TrimSpace(v)
-	if !validateValue(val) {
+	rawVal := strings.TrimSpace(v)
+	if !validateValue(rawVal) {
 		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
 	}
 
 	// Decode a percent-encoded value.
-	value, err := url.PathUnescape(val)
+	unescapeVal, err := url.PathUnescape(rawVal)
 	if err != nil {
 		return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
 	}
+
+	value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
 	return Member{key: key, value: value, properties: props, hasData: true}, nil
 }
 
+// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'.
+func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
+	if utf8.ValidString(unescapeVal) {
+		return unescapeVal
+	}
+	// W3C baggage spec:
+	// https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
+
+	var b strings.Builder
+	b.Grow(cap)
+	for i := 0; i < len(unescapeVal); {
+		r, size := utf8.DecodeRuneInString(unescapeVal[i:])
+		if r == utf8.RuneError && size == 1 {
+			// Invalid UTF-8 sequence found, replace it with '�'
+			_, _ = b.WriteString("�")
+		} else {
+			_, _ = b.WriteRune(r)
+		}
+		i += size
+	}
+
+	return b.String()
+}
+
 // validate ensures m conforms to the W3C Baggage specification.
 // A key must be an ASCII string, returning an error otherwise.
 func (m Member) validate() error {
@@ -314,9 +385,12 @@ func (m Member) validate() error {
 		return fmt.Errorf("%w: %q", errInvalidMember, m)
 	}
 
-	if !validateKey(m.key) {
+	if !validateBaggageName(m.key) {
 		return fmt.Errorf("%w: %q", errInvalidKey, m.key)
 	}
+	if !validateBaggageValue(m.value) {
+		return fmt.Errorf("%w: %q", errInvalidValue, m.value)
+	}
 	return m.properties.validate()
 }
 
@@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
 
 // String encodes Member into a header string compliant with the W3C Baggage
 // specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
 func (m Member) String() string {
-	// A key is just an ASCII string. A value is restricted to be
-	// US-ASCII characters excluding CTLs, whitespace,
-	// DQUOTE, comma, semicolon, and backslash.
+	//  W3C Baggage specification does not allow percent-encoded keys.
+	if !validateKey(m.key) {
+		return ""
+	}
+
 	s := m.key + keyValueDelimiter + valueEscape(m.value)
 	if len(m.properties) > 0 {
 		s += propertyDelimiter + m.properties.String()
@@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member {
 }
 
 // Members returns all the baggage list-members.
-// The order of the returned list-members does not have significance.
+// The order of the returned list-members is not significant.
 //
 // The returned members are not validated, as we assume the validation happened
 // when they were added to the Baggage.
@@ -469,8 +548,8 @@ func (b Baggage) Members() []Member {
 	return members
 }
 
-// SetMember returns a copy the Baggage with the member included. If the
-// baggage contains a Member with the same key the existing Member is
+// SetMember returns a copy of the Baggage with the member included. If the
+// baggage contains a Member with the same key, the existing Member is
 // replaced.
 //
 // If member is invalid according to the W3C Baggage specification, an error
@@ -528,14 +607,22 @@ func (b Baggage) Len() int {
 
 // String encodes Baggage into a header string compliant with the W3C Baggage
 // specification.
+// It would ignore members where the member key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
 func (b Baggage) String() string {
 	members := make([]string, 0, len(b.list))
 	for k, v := range b.list {
-		members = append(members, Member{
+		s := Member{
 			key:        k,
 			value:      v.Value,
 			properties: fromInternalProperties(v.Properties),
-		}.String())
+		}.String()
+
+		// Ignored empty members.
+		if s != "" {
+			members = append(members, s)
+		}
 	}
 	return strings.Join(members, listDelimiter)
 }
@@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
 	}
 
 	// Decode a percent-encoded value.
-	value, err := url.PathUnescape(s[valueStart:valueEnd])
+	rawVal := s[valueStart:valueEnd]
+	unescapeVal, err := url.PathUnescape(rawVal)
 	if err != nil {
 		return
 	}
+	value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
 
 	ok = true
 	p.key = s[keyStart:keyEnd]
@@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
 	'~': true,
 }
 
+// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
+// Baggage name is a valid, non-empty UTF-8 string.
+func validateBaggageName(s string) bool {
+	if len(s) == 0 {
+		return false
+	}
+
+	return utf8.ValidString(s)
+}
+
+// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
+// Baggage value is a valid UTF-8 strings.
+// Empty string is also a valid UTF-8 string.
+func validateBaggageValue(s string) bool {
+	return utf8.ValidString(s)
+}
+
+// validateKey checks if the string is a valid W3C Baggage key.
 func validateKey(s string) bool {
 	if len(s) == 0 {
 		return false
@@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool {
 	return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
 }
 
+// validateValue checks if the string is a valid W3C Baggage value.
 func validateValue(s string) bool {
 	for _, c := range s {
 		if !validateValueChar(c) {
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index df29d96a6..2acbac354 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
 				return fmt.Errorf("invalid code: %q", ci)
 			}
 
-			*c = Code(ci)
+			*c = Code(ci) // nolint: gosec  // Bit size of 32 check above.
 			return nil
 		}
 		return fmt.Errorf("invalid code: %q", string(b))
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index 441c59501..921f85961 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace.
 
 To read more about metrics, see go.opentelemetry.io/otel/metric.
 
+To read more about logs, see go.opentelemetry.io/otel/log.
+
 To read more about propagation, see go.opentelemetry.io/otel/propagation and
 go.opentelemetry.io/otel/baggage.
 */
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index cfd1df9bf..e3db438a0 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -5,8 +5,8 @@ package global // import "go.opentelemetry.io/otel/internal/global"
 
 import (
 	"container/list"
+	"reflect"
 	"sync"
-	"sync/atomic"
 
 	"go.opentelemetry.io/otel/metric"
 	"go.opentelemetry.io/otel/metric/embedded"
@@ -76,7 +76,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
 		return val
 	}
 
-	t := &meter{name: name, opts: opts}
+	t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
 	p.meters[key] = t
 	return t
 }
@@ -92,17 +92,29 @@ type meter struct {
 	opts []metric.MeterOption
 
 	mtx         sync.Mutex
-	instruments []delegatedInstrument
+	instruments map[instID]delegatedInstrument
 
 	registry list.List
 
-	delegate atomic.Value // metric.Meter
+	delegate metric.Meter
 }
 
 type delegatedInstrument interface {
 	setDelegate(metric.Meter)
 }
 
+// instID are the identifying properties of a instrument.
+type instID struct {
+	// name is the name of the stream.
+	name string
+	// description is the description of the stream.
+	description string
+	// kind defines the functional group of the instrument.
+	kind reflect.Type
+	// unit is the unit of the stream.
+	unit string
+}
+
 // setDelegate configures m to delegate all Meter functionality to Meters
 // created by provider.
 //
@@ -110,12 +122,12 @@ type delegatedInstrument interface {
 //
 // It is guaranteed by the caller that this happens only once.
 func (m *meter) setDelegate(provider metric.MeterProvider) {
-	meter := provider.Meter(m.name, m.opts...)
-	m.delegate.Store(meter)
-
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
 
+	meter := provider.Meter(m.name, m.opts...)
+	m.delegate = meter
+
 	for _, inst := range m.instruments {
 		inst.setDelegate(meter)
 	}
@@ -133,169 +145,337 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
 }
 
 func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64Counter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64Counter(name, options...)
+	}
+
+	cfg := metric.NewInt64CounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*siCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64Counter), nil
+	}
 	i := &siCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64UpDownCounter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64UpDownCounter(name, options...)
+	}
+
+	cfg := metric.NewInt64UpDownCounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*siUpDownCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64UpDownCounter), nil
+	}
 	i := &siUpDownCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64Histogram(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64Histogram(name, options...)
+	}
+
+	cfg := metric.NewInt64HistogramConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*siHistogram)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64Histogram), nil
+	}
 	i := &siHistogram{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64Gauge(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64Gauge(name, options...)
+	}
+
+	cfg := metric.NewInt64GaugeConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*siGauge)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64Gauge), nil
+	}
 	i := &siGauge{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64ObservableCounter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64ObservableCounter(name, options...)
+	}
+
+	cfg := metric.NewInt64ObservableCounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*aiCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64ObservableCounter), nil
+	}
 	i := &aiCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64ObservableUpDownCounter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64ObservableUpDownCounter(name, options...)
+	}
+
+	cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*aiUpDownCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64ObservableUpDownCounter), nil
+	}
 	i := &aiUpDownCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Int64ObservableGauge(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Int64ObservableGauge(name, options...)
+	}
+
+	cfg := metric.NewInt64ObservableGaugeConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*aiGauge)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Int64ObservableGauge), nil
+	}
 	i := &aiGauge{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64Counter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64Counter(name, options...)
+	}
+
+	cfg := metric.NewFloat64CounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*sfCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64Counter), nil
+	}
 	i := &sfCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64UpDownCounter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64UpDownCounter(name, options...)
+	}
+
+	cfg := metric.NewFloat64UpDownCounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*sfUpDownCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64UpDownCounter), nil
+	}
 	i := &sfUpDownCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64Histogram(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64Histogram(name, options...)
+	}
+
+	cfg := metric.NewFloat64HistogramConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*sfHistogram)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64Histogram), nil
+	}
 	i := &sfHistogram{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64Gauge(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64Gauge(name, options...)
+	}
+
+	cfg := metric.NewFloat64GaugeConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*sfGauge)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64Gauge), nil
+	}
 	i := &sfGauge{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64ObservableCounter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64ObservableCounter(name, options...)
+	}
+
+	cfg := metric.NewFloat64ObservableCounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*afCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64ObservableCounter), nil
+	}
 	i := &afCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64ObservableUpDownCounter(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64ObservableUpDownCounter(name, options...)
+	}
+
+	cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*afUpDownCounter)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64ObservableUpDownCounter), nil
+	}
 	i := &afUpDownCounter{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		return del.Float64ObservableGauge(name, options...)
-	}
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+
+	if m.delegate != nil {
+		return m.delegate.Float64ObservableGauge(name, options...)
+	}
+
+	cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+	id := instID{
+		name:        name,
+		kind:        reflect.TypeOf((*afGauge)(nil)),
+		description: cfg.Description(),
+		unit:        cfg.Unit(),
+	}
+	if f, ok := m.instruments[id]; ok {
+		return f.(metric.Float64ObservableGauge), nil
+	}
 	i := &afGauge{name: name, opts: options}
-	m.instruments = append(m.instruments, i)
+	m.instruments[id] = i
 	return i, nil
 }
 
 // RegisterCallback captures the function that will be called during Collect.
 func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
-	if del, ok := m.delegate.Load().(metric.Meter); ok {
-		insts = unwrapInstruments(insts)
-		return del.RegisterCallback(f, insts...)
-	}
-
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
 
+	if m.delegate != nil {
+		insts = unwrapInstruments(insts)
+		return m.delegate.RegisterCallback(f, insts...)
+	}
+
 	reg := &registration{instruments: insts, function: f}
 	e := m.registry.PushBack(reg)
 	reg.unreg = func() error {
@@ -349,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) {
 	reg, err := m.RegisterCallback(c.function, insts...)
 	if err != nil {
 		GetErrorHandler().Handle(err)
+		return
 	}
 
 	c.unreg = reg.Unregister
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
index 3e7bb3b35..b2fe3e41d 100644
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -20,11 +20,13 @@ func RawToBool(r uint64) bool {
 }
 
 func Int64ToRaw(i int64) uint64 {
-	return uint64(i)
+	// Assumes original was a valid int64 (overflow not checked).
+	return uint64(i) // nolint: gosec
 }
 
 func RawToInt64(r uint64) int64 {
-	return int64(r)
+	// Assumes original was a valid int64 (overflow not checked).
+	return int64(r) // nolint: gosec
 }
 
 func Float64ToRaw(f float64) uint64 {
@@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 {
 }
 
 func RawPtrToFloat64Ptr(r *uint64) *float64 {
-	return (*float64)(unsafe.Pointer(r))
+	// Assumes original was a valid *float64 (overflow not checked).
+	return (*float64)(unsafe.Pointer(r)) // nolint: gosec
 }
 
 func RawPtrToInt64Ptr(r *uint64) *int64 {
-	return (*int64)(unsafe.Pointer(r))
+	// Assumes original was a valid *int64 (overflow not checked).
+	return (*int64)(unsafe.Pointer(r)) // nolint: gosec
 }
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
index cf23db778..f8435d8f2 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -213,7 +213,7 @@ type Float64Observer interface {
 }
 
 // Float64Callback is a function registered with a Meter that makes
-// observations for a Float64Observerable instrument it is registered with.
+// observations for a Float64Observable instrument it is registered with.
 // Calls to the Float64Observer record measurement values for the
 // Float64Observable.
 //
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
index c82ba5324..e079aaef1 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -212,7 +212,7 @@ type Int64Observer interface {
 }
 
 // Int64Callback is a function registered with a Meter that makes observations
-// for an Int64Observerable instrument it is registered with. Calls to the
+// for an Int64Observable instrument it is registered with. Calls to the
 // Int64Observer record measurement values for the Int64Observable.
 //
 // The function needs to complete in a finite amount of time and the deadline
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
index ea52e4023..a535782e1 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption {
 //
 //	cp := make([]attribute.KeyValue, len(attributes))
 //	copy(cp, attributes)
-//	WithAttributes(attribute.NewSet(cp...))
+//	WithAttributeSet(attribute.NewSet(cp...))
 //
 // [attribute.NewSet] may modify the passed attributes so this will make a copy
 // of attributes before creating a set in order to ensure this function is
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 6a7991e01..14e08c24a 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -52,6 +52,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
+
 	// Int64UpDownCounter returns a new Int64UpDownCounter instrument
 	// identified by name and configured with options. The instrument is used
 	// to synchronously record int64 measurements during a computational
@@ -61,6 +62,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
+
 	// Int64Histogram returns a new Int64Histogram instrument identified by
 	// name and configured with options. The instrument is used to
 	// synchronously record the distribution of int64 measurements during a
@@ -70,6 +72,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+
 	// Int64Gauge returns a new Int64Gauge instrument identified by name and
 	// configured with options. The instrument is used to synchronously record
 	// instantaneous int64 measurements during a computational operation.
@@ -78,6 +81,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
+
 	// Int64ObservableCounter returns a new Int64ObservableCounter identified
 	// by name and configured with options. The instrument is used to
 	// asynchronously record increasing int64 measurements once per a
@@ -92,6 +96,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
+
 	// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
 	// instrument identified by name and configured with options. The
 	// instrument is used to asynchronously record int64 measurements once per
@@ -106,6 +111,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
+
 	// Int64ObservableGauge returns a new Int64ObservableGauge instrument
 	// identified by name and configured with options. The instrument is used
 	// to asynchronously record instantaneous int64 measurements once per a
@@ -130,6 +136,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
+
 	// Float64UpDownCounter returns a new Float64UpDownCounter instrument
 	// identified by name and configured with options. The instrument is used
 	// to synchronously record float64 measurements during a computational
@@ -139,6 +146,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
+
 	// Float64Histogram returns a new Float64Histogram instrument identified by
 	// name and configured with options. The instrument is used to
 	// synchronously record the distribution of float64 measurements during a
@@ -148,6 +156,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+
 	// Float64Gauge returns a new Float64Gauge instrument identified by name and
 	// configured with options. The instrument is used to synchronously record
 	// instantaneous float64 measurements during a computational operation.
@@ -156,6 +165,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
+
 	// Float64ObservableCounter returns a new Float64ObservableCounter
 	// instrument identified by name and configured with options. The
 	// instrument is used to asynchronously record increasing float64
@@ -170,6 +180,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
+
 	// Float64ObservableUpDownCounter returns a new
 	// Float64ObservableUpDownCounter instrument identified by name and
 	// configured with options. The instrument is used to asynchronously record
@@ -184,6 +195,7 @@ type Meter interface {
 	// See the Instrument Name section of the package documentation for more
 	// information.
 	Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
+
 	// Float64ObservableGauge returns a new Float64ObservableGauge instrument
 	// identified by name and configured with options. The instrument is used
 	// to asynchronously record instantaneous float64 measurements once per a
@@ -242,6 +254,7 @@ type Observer interface {
 
 	// ObserveFloat64 records the float64 value for obsrv.
 	ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
+
 	// ObserveInt64 records the int64 value for obsrv.
 	ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
 }
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 000000000..bb8969435
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 000000000..ca6fcbdc0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,281 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+	// Compile-time check this implements the OpenTelemetry API.
+
+	_ metric.MeterProvider                  = MeterProvider{}
+	_ metric.Meter                          = Meter{}
+	_ metric.Observer                       = Observer{}
+	_ metric.Registration                   = Registration{}
+	_ metric.Int64Counter                   = Int64Counter{}
+	_ metric.Float64Counter                 = Float64Counter{}
+	_ metric.Int64UpDownCounter             = Int64UpDownCounter{}
+	_ metric.Float64UpDownCounter           = Float64UpDownCounter{}
+	_ metric.Int64Histogram                 = Int64Histogram{}
+	_ metric.Float64Histogram               = Float64Histogram{}
+	_ metric.Int64Gauge                     = Int64Gauge{}
+	_ metric.Float64Gauge                   = Float64Gauge{}
+	_ metric.Int64ObservableCounter         = Int64ObservableCounter{}
+	_ metric.Float64ObservableCounter       = Float64ObservableCounter{}
+	_ metric.Int64ObservableGauge           = Int64ObservableGauge{}
+	_ metric.Float64ObservableGauge         = Float64ObservableGauge{}
+	_ metric.Int64ObservableUpDownCounter   = Int64ObservableUpDownCounter{}
+	_ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+	_ metric.Int64Observer                  = Int64Observer{}
+	_ metric.Float64Observer                = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+	return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+	return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+	return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+	return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+	return Int64Histogram{}, nil
+}
+
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+	return Int64Gauge{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+	return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+	return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+	return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+	return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+	return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+	return Float64Histogram{}, nil
+}
+
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+	return Float64Gauge{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+	return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+	return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+	return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+	return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+	metric.Int64Observable
+	embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+	metric.Float64Observable
+	embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+	metric.Int64Observable
+	embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+	metric.Float64Observable
+	embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+	metric.Int64Observable
+	embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+	metric.Float64Observable
+	embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
index 8c5ac55ca..0a29a2f13 100644
--- a/vendor/go.opentelemetry.io/otel/renovate.json
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -19,6 +19,14 @@
       "matchManagers": ["gomod"],
       "matchDepTypes": ["indirect"],
       "enabled": false
+    },
+    {
+      "matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
+      "groupName": "googleapis"
+    },
+    {
+      "matchPackageNames": ["golang.org/x/**"],
+      "groupName": "golang.org/x"
     }
   ]
 }
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
deleted file mode 100644
index 0b6cbe960..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.24.0
-
-[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
deleted file mode 100644
index 6e688345c..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
+++ /dev/null
@@ -1,4387 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Describes FaaS attributes.
-const (
-	// FaaSInvokedNameKey is the attribute Key conforming to the
-	// "faas.invoked_name" semantic conventions. It represents the name of the
-	// invoked function.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'my-function'
-	// Note: SHOULD be equal to the `faas.name` resource attribute of the
-	// invoked function.
-	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
-	// FaaSInvokedProviderKey is the attribute Key conforming to the
-	// "faas.invoked_provider" semantic conventions. It represents the cloud
-	// provider of the invoked function.
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the
-	// invoked function.
-	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
-	// FaaSInvokedRegionKey is the attribute Key conforming to the
-	// "faas.invoked_region" semantic conventions. It represents the cloud
-	// region of the invoked function.
-	//
-	// Type: string
-	// RequirementLevel: ConditionallyRequired (For some cloud providers, like
-	// AWS or GCP, the region in which a function is hosted is essential to
-	// uniquely identify the function and also part of its endpoint. Since it's
-	// part of the endpoint being called, the region is always known to
-	// clients. In these cases, `faas.invoked_region` MUST be set accordingly.
-	// If the region is unknown to the client or not required for identifying
-	// the invoked function, setting `faas.invoked_region` is optional.)
-	// Stability: experimental
-	// Examples: 'eu-central-1'
-	// Note: SHOULD be equal to the `cloud.region` resource attribute of the
-	// invoked function.
-	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
-	// FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
-	// semantic conventions. It represents the type of the trigger which caused
-	// this function invocation.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	FaaSTriggerKey = attribute.Key("faas.trigger")
-)
-
-var (
-	// Alibaba Cloud
-	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
-	// Amazon Web Services
-	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
-	// Microsoft Azure
-	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
-	// Google Cloud Platform
-	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
-	// Tencent Cloud
-	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
-	// A response to some data source operation such as a database or filesystem read/write
-	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
-	// To provide an answer to an inbound HTTP request
-	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
-	// A function is set to be executed when messages are sent to a messaging system
-	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
-	// A function is scheduled to be executed regularly
-	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
-	// If none of the others apply
-	FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
-	return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
-	return FaaSInvokedRegionKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
-	// EventNameKey is the attribute Key conforming to the "event.name"
-	// semantic conventions. It represents the identifies the class / type of
-	// event.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'browser.mouse.click', 'device.app.lifecycle'
-	// Note: Event names are subject to the same rules as [attribute
-	// names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md).
-	// Notably, event names are namespaced to avoid collisions and provide a
-	// clean separation of semantics for events in separate domains like
-	// browser, mobile, and kubernetes.
-	EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
-	return EventNameKey.String(val)
-}
-
-// The attributes described in this section are rather generic. They may be
-// used in any Log Record they apply to.
-const (
-	// LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
-	// semantic conventions. It represents a unique identifier for the Log
-	// Record.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
-	// Note: If an id is provided, other log records with the same id will be
-	// considered duplicates and can be removed safely. This means, that two
-	// distinguishable log records MUST have different values.
-	// The id MAY be an [Universally Unique Lexicographically Sortable
-	// Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
-	// (e.g. UUID) may be used as needed.
-	LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
-	return LogRecordUIDKey.String(val)
-}
-
-// Describes Log attributes
-const (
-	// LogIostreamKey is the attribute Key conforming to the "log.iostream"
-	// semantic conventions. It represents the stream associated with the log.
-	// See below for a list of well-known values.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
-	// Logs from stdout stream
-	LogIostreamStdout = LogIostreamKey.String("stdout")
-	// Events from stderr stream
-	LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// A file to which log was emitted.
-const (
-	// LogFileNameKey is the attribute Key conforming to the "log.file.name"
-	// semantic conventions. It represents the basename of the file.
-	//
-	// Type: string
-	// RequirementLevel: Recommended
-	// Stability: experimental
-	// Examples: 'audit.log'
-	LogFileNameKey = attribute.Key("log.file.name")
-
-	// LogFileNameResolvedKey is the attribute Key conforming to the
-	// "log.file.name_resolved" semantic conventions. It represents the
-	// basename of the file, with symlinks resolved.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'uuid.log'
-	LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
-	// LogFilePathKey is the attribute Key conforming to the "log.file.path"
-	// semantic conventions. It represents the full path to the file.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/var/log/mysql/audit.log'
-	LogFilePathKey = attribute.Key("log.file.path")
-
-	// LogFilePathResolvedKey is the attribute Key conforming to the
-	// "log.file.path_resolved" semantic conventions. It represents the full
-	// path to the file, with symlinks resolved.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/var/lib/docker/uuid.log'
-	LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
-	return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
-	return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
-	return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
-	return LogFilePathResolvedKey.String(val)
-}
-
-// Describes Database attributes
-const (
-	// PoolNameKey is the attribute Key conforming to the "pool.name" semantic
-	// conventions. It represents the name of the connection pool; unique
-	// within the instrumented application. In case the connection pool
-	// implementation doesn't provide a name, then the
-	// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
-	// should be used
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'myDataSource'
-	PoolNameKey = attribute.Key("pool.name")
-
-	// StateKey is the attribute Key conforming to the "state" semantic
-	// conventions. It represents the state of a connection in the pool
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'idle'
-	StateKey = attribute.Key("state")
-)
-
-var (
-	// idle
-	StateIdle = StateKey.String("idle")
-	// used
-	StateUsed = StateKey.String("used")
-)
-
-// PoolName returns an attribute KeyValue conforming to the "pool.name"
-// semantic conventions. It represents the name of the connection pool; unique
-// within the instrumented application. In case the connection pool
-// implementation doesn't provide a name, then the
-// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
-// should be used
-func PoolName(val string) attribute.KeyValue {
-	return PoolNameKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
-	// AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
-	// the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-	// represents the full type name of the
-	// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-	// implementation that handled the exception.
-	//
-	// Type: string
-	// RequirementLevel: ConditionallyRequired (if and only if the exception
-	// was handled by this handler.)
-	// Stability: experimental
-	// Examples: 'Contoso.MyHandler'
-	AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
-	// AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
-	// "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-	// the rate limiting policy name.
-	//
-	// Type: string
-	// RequirementLevel: ConditionallyRequired (if the matched endpoint for the
-	// request had a rate-limiting policy.)
-	// Stability: experimental
-	// Examples: 'fixed', 'sliding', 'token'
-	AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
-	// AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
-	// "aspnetcore.rate_limiting.result" semantic conventions. It represents
-	// the rate-limiting result, shows whether the lease was acquired or
-	// contains a rejection reason
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'acquired', 'request_canceled'
-	AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
-	// AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
-	// "aspnetcore.request.is_unhandled" semantic conventions. It represents
-	// the flag indicating if request was handled by the application pipeline.
-	//
-	// Type: boolean
-	// RequirementLevel: ConditionallyRequired (if and only if the request was
-	// not handled.)
-	// Stability: experimental
-	// Examples: True
-	AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
-	// AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
-	// "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-	// value that indicates whether the matched route is a fallback route.
-	//
-	// Type: boolean
-	// RequirementLevel: ConditionallyRequired (If and only if a route was
-	// successfully matched.)
-	// Stability: experimental
-	// Examples: True
-	AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-)
-
-var (
-	// Lease was acquired
-	AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
-	// Lease request was rejected by the endpoint limiter
-	AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
-	// Lease request was rejected by the global limiter
-	AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
-	// Lease request was canceled
-	AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
-	return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
-	return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
-	return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
-	return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// SignalR attributes
-const (
-	// SignalrConnectionStatusKey is the attribute Key conforming to the
-	// "signalr.connection.status" semantic conventions. It represents the
-	// signalR HTTP connection closure status.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'app_shutdown', 'timeout'
-	SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
-	// SignalrTransportKey is the attribute Key conforming to the
-	// "signalr.transport" semantic conventions. It represents the [SignalR
-	// transport
-	// type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'web_sockets', 'long_polling'
-	SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
-	// The connection was closed normally
-	SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
-	// The connection was closed due to a timeout
-	SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
-	// The connection was closed because the app is shutting down
-	SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
-	// ServerSentEvents protocol
-	SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
-	// LongPolling protocol
-	SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
-	// WebSockets protocol
-	SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// Describes JVM buffer metric attributes.
-const (
-	// JvmBufferPoolNameKey is the attribute Key conforming to the
-	// "jvm.buffer.pool.name" semantic conventions. It represents the name of
-	// the buffer pool.
-	//
-	// Type: string
-	// RequirementLevel: Recommended
-	// Stability: experimental
-	// Examples: 'mapped', 'direct'
-	// Note: Pool names are generally obtained via
-	// [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
-	JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
-	return JvmBufferPoolNameKey.String(val)
-}
-
-// Describes JVM memory metric attributes.
-const (
-	// JvmMemoryPoolNameKey is the attribute Key conforming to the
-	// "jvm.memory.pool.name" semantic conventions. It represents the name of
-	// the memory pool.
-	//
-	// Type: string
-	// RequirementLevel: Recommended
-	// Stability: stable
-	// Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
-	// Note: Pool names are generally obtained via
-	// [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
-	JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
-	// JvmMemoryTypeKey is the attribute Key conforming to the
-	// "jvm.memory.type" semantic conventions. It represents the type of
-	// memory.
-	//
-	// Type: Enum
-	// RequirementLevel: Recommended
-	// Stability: stable
-	// Examples: 'heap', 'non_heap'
-	JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-)
-
-var (
-	// Heap memory
-	JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
-	// Non-heap memory
-	JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
-	return JvmMemoryPoolNameKey.String(val)
-}
-
-// Describes System metric attributes
-const (
-	// SystemDeviceKey is the attribute Key conforming to the "system.device"
-	// semantic conventions. It represents the device identifier
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '(identifier)'
-	SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
-	return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU metric attributes
-const (
-	// SystemCPULogicalNumberKey is the attribute Key conforming to the
-	// "system.cpu.logical_number" semantic conventions. It represents the
-	// logical CPU number [0..n-1]
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1
-	SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
-	// SystemCPUStateKey is the attribute Key conforming to the
-	// "system.cpu.state" semantic conventions. It represents the state of the
-	// CPU
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'idle', 'interrupt'
-	SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
-	// user
-	SystemCPUStateUser = SystemCPUStateKey.String("user")
-	// system
-	SystemCPUStateSystem = SystemCPUStateKey.String("system")
-	// nice
-	SystemCPUStateNice = SystemCPUStateKey.String("nice")
-	// idle
-	SystemCPUStateIdle = SystemCPUStateKey.String("idle")
-	// iowait
-	SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
-	// interrupt
-	SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
-	// steal
-	SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
-	return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory metric attributes
-const (
-	// SystemMemoryStateKey is the attribute Key conforming to the
-	// "system.memory.state" semantic conventions. It represents the memory
-	// state
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'free', 'cached'
-	SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
-	// used
-	SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
-	// free
-	SystemMemoryStateFree = SystemMemoryStateKey.String("free")
-	// shared
-	SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
-	// buffers
-	SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
-	// cached
-	SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging metric attributes
-const (
-	// SystemPagingDirectionKey is the attribute Key conforming to the
-	// "system.paging.direction" semantic conventions. It represents the paging
-	// access direction
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'in'
-	SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
-	// SystemPagingStateKey is the attribute Key conforming to the
-	// "system.paging.state" semantic conventions. It represents the memory
-	// paging state
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'free'
-	SystemPagingStateKey = attribute.Key("system.paging.state")
-
-	// SystemPagingTypeKey is the attribute Key conforming to the
-	// "system.paging.type" semantic conventions. It represents the memory
-	// paging type
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'minor'
-	SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
-	// in
-	SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
-	// out
-	SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
-	// used
-	SystemPagingStateUsed = SystemPagingStateKey.String("used")
-	// free
-	SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
-	// major
-	SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
-	// minor
-	SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem metric attributes
-const (
-	// SystemFilesystemModeKey is the attribute Key conforming to the
-	// "system.filesystem.mode" semantic conventions. It represents the
-	// filesystem mode
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'rw, ro'
-	SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
-	// SystemFilesystemMountpointKey is the attribute Key conforming to the
-	// "system.filesystem.mountpoint" semantic conventions. It represents the
-	// filesystem mount path
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/mnt/data'
-	SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
-	// SystemFilesystemStateKey is the attribute Key conforming to the
-	// "system.filesystem.state" semantic conventions. It represents the
-	// filesystem state
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'used'
-	SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
-	// SystemFilesystemTypeKey is the attribute Key conforming to the
-	// "system.filesystem.type" semantic conventions. It represents the
-	// filesystem type
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'ext4'
-	SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
-	// used
-	SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
-	// free
-	SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
-	// reserved
-	SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
-	// fat32
-	SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
-	// exfat
-	SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
-	// ntfs
-	SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
-	// refs
-	SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
-	// hfsplus
-	SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
-	// ext4
-	SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
-	return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
-	return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network metric attributes
-const (
-	// SystemNetworkStateKey is the attribute Key conforming to the
-	// "system.network.state" semantic conventions. It represents a stateless
-	// protocol MUST NOT set this attribute
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'close_wait'
-	SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
-	// close
-	SystemNetworkStateClose = SystemNetworkStateKey.String("close")
-	// close_wait
-	SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
-	// closing
-	SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
-	// delete
-	SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
-	// established
-	SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
-	// fin_wait_1
-	SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
-	// fin_wait_2
-	SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
-	// last_ack
-	SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
-	// listen
-	SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
-	// syn_recv
-	SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
-	// syn_sent
-	SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
-	// time_wait
-	SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process metric attributes
-const (
-	// SystemProcessesStatusKey is the attribute Key conforming to the
-	// "system.processes.status" semantic conventions. It represents the
-	// process state, e.g., [Linux Process State
-	// Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'running'
-	SystemProcessesStatusKey = attribute.Key("system.processes.status")
-)
-
-var (
-	// running
-	SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
-	// sleeping
-	SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
-	// stopped
-	SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
-	// defunct
-	SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
-)
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
-	// ClientAddressKey is the attribute Key conforming to the "client.address"
-	// semantic conventions. It represents the client address - domain name if
-	// available without reverse DNS lookup; otherwise, IP address or Unix
-	// domain socket name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
-	// Note: When observed from the server side, and when communicating through
-	// an intermediary, `client.address` SHOULD represent the client address
-	// behind any intermediaries,  for example proxies, if it's available.
-	ClientAddressKey = attribute.Key("client.address")
-
-	// ClientPortKey is the attribute Key conforming to the "client.port"
-	// semantic conventions. It represents the client port number.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 65123
-	// Note: When observed from the server side, and when communicating through
-	// an intermediary, `client.port` SHOULD represent the client port behind
-	// any intermediaries,  for example proxies, if it's available.
-	ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
-	return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
-	return ClientPortKey.Int(val)
-}
-
-// The attributes used to describe telemetry in the context of databases.
-const (
-	// DBCassandraConsistencyLevelKey is the attribute Key conforming to the
-	// "db.cassandra.consistency_level" semantic conventions. It represents the
-	// consistency level of the query. Based on consistency values from
-	// [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
-	// DBCassandraCoordinatorDCKey is the attribute Key conforming to the
-	// "db.cassandra.coordinator.dc" semantic conventions. It represents the
-	// data center of the coordinating node for a query.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'us-west-2'
-	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
-	// DBCassandraCoordinatorIDKey is the attribute Key conforming to the
-	// "db.cassandra.coordinator.id" semantic conventions. It represents the ID
-	// of the coordinating node for a query.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
-	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
-	// DBCassandraIdempotenceKey is the attribute Key conforming to the
-	// "db.cassandra.idempotence" semantic conventions. It represents the
-	// whether or not the query is idempotent.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
-	// DBCassandraPageSizeKey is the attribute Key conforming to the
-	// "db.cassandra.page_size" semantic conventions. It represents the fetch
-	// size used for paging, i.e. how many rows will be returned at once.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 5000
-	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
-	// DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
-	// to the "db.cassandra.speculative_execution_count" semantic conventions.
-	// It represents the number of times a query was speculatively executed.
-	// Not set or `0` if the query was not executed speculatively.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 0, 2
-	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
-	// DBCassandraTableKey is the attribute Key conforming to the
-	// "db.cassandra.table" semantic conventions. It represents the name of the
-	// primary Cassandra table that the operation is acting upon, including the
-	// keyspace name (if applicable).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'mytable'
-	// Note: This mirrors the db.sql.table attribute but references cassandra
-	// rather than sql. It is not recommended to attempt any client-side
-	// parsing of `db.statement` just to get this property, but it should be
-	// set if it is provided by the library being instrumented. If the
-	// operation is acting upon an anonymous table, or more than one table,
-	// this value MUST NOT be set.
-	DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
-	// DBConnectionStringKey is the attribute Key conforming to the
-	// "db.connection_string" semantic conventions. It represents the
-	// connection string used to connect to the database. It is recommended to
-	// remove embedded credentials.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
-	DBConnectionStringKey = attribute.Key("db.connection_string")
-
-	// DBCosmosDBClientIDKey is the attribute Key conforming to the
-	// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-	// Cosmos client instance id.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
-	DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
-	// DBCosmosDBConnectionModeKey is the attribute Key conforming to the
-	// "db.cosmosdb.connection_mode" semantic conventions. It represents the
-	// cosmos client connection mode.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
-	// DBCosmosDBContainerKey is the attribute Key conforming to the
-	// "db.cosmosdb.container" semantic conventions. It represents the cosmos
-	// DB container name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'anystring'
-	DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
-
-	// DBCosmosDBOperationTypeKey is the attribute Key conforming to the
-	// "db.cosmosdb.operation_type" semantic conventions. It represents the
-	// cosmosDB Operation Type.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
-	// DBCosmosDBRequestChargeKey is the attribute Key conforming to the
-	// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-	// consumed for that operation
-	//
-	// Type: double
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 46.18, 1.0
-	DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
-	// DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
-	// "db.cosmosdb.request_content_length" semantic conventions. It represents
-	// the request payload size in bytes
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
-	// DBCosmosDBStatusCodeKey is the attribute Key conforming to the
-	// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
-	// DB status code.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 200, 201
-	DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
-	// DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
-	// "db.cosmosdb.sub_status_code" semantic conventions. It represents the
-	// cosmos DB sub status code.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1000, 1002
-	DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-
-	// DBElasticsearchClusterNameKey is the attribute Key conforming to the
-	// "db.elasticsearch.cluster.name" semantic conventions. It represents the
-	// represents the identifier of an Elasticsearch cluster.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
-	DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
-	// DBElasticsearchNodeNameKey is the attribute Key conforming to the
-	// "db.elasticsearch.node.name" semantic conventions. It represents the
-	// represents the human-readable identifier of the node/instance to which a
-	// request was routed.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'instance-0000000001'
-	DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-
-	// DBInstanceIDKey is the attribute Key conforming to the "db.instance.id"
-	// semantic conventions. It represents an identifier (address, unique name,
-	// or any other identifier) of the database instance that is executing
-	// queries or mutations on the current connection. This is useful in cases
-	// where the database is running in a clustered environment and the
-	// instrumentation is able to record the node executing the query. The
-	// client may obtain this value in databases like MySQL using queries like
-	// `select @@hostname`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'mysql-e26b99z.example.com'
-	DBInstanceIDKey = attribute.Key("db.instance.id")
-
-	// DBJDBCDriverClassnameKey is the attribute Key conforming to the
-	// "db.jdbc.driver_classname" semantic conventions. It represents the
-	// fully-qualified class name of the [Java Database Connectivity
-	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
-	// driver used to connect.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'org.postgresql.Driver',
-	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
-	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
-	// DBMongoDBCollectionKey is the attribute Key conforming to the
-	// "db.mongodb.collection" semantic conventions. It represents the MongoDB
-	// collection being accessed within the database stated in `db.name`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'customers', 'products'
-	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-
-	// DBMSSQLInstanceNameKey is the attribute Key conforming to the
-	// "db.mssql.instance_name" semantic conventions. It represents the
-	// Microsoft SQL Server [instance
-	// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-	// connecting to. This name is used to determine the port of a named
-	// instance.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MSSQLSERVER'
-	// Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
-	// required (but still recommended if non-standard).
-	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-
-	// DBNameKey is the attribute Key conforming to the "db.name" semantic
-	// conventions. It represents the this attribute is used to report the name
-	// of the database being accessed. For commands that switch the database,
-	// this should be set to the target database (even if the command fails).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'customers', 'main'
-	// Note: In some SQL databases, the database name to be used is called
-	// "schema name". In case there are multiple layers that could be
-	// considered for database name (e.g. Oracle instance name and schema
-	// name), the database name to be used is the more specific layer (e.g.
-	// Oracle schema name).
-	DBNameKey = attribute.Key("db.name")
-
-	// DBOperationKey is the attribute Key conforming to the "db.operation"
-	// semantic conventions. It represents the name of the operation being
-	// executed, e.g. the [MongoDB command
-	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-	// such as `findAndModify`, or the SQL keyword.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'findAndModify', 'HMSET', 'SELECT'
-	// Note: When setting this to an SQL keyword, it is not recommended to
-	// attempt any client-side parsing of `db.statement` just to get this
-	// property, but it should be set if the operation name is provided by the
-	// library being instrumented. If the SQL statement has an ambiguous
-	// operation, or performs more than one operation, this value may be
-	// omitted.
-	DBOperationKey = attribute.Key("db.operation")
-
-	// DBRedisDBIndexKey is the attribute Key conforming to the
-	// "db.redis.database_index" semantic conventions. It represents the index
-	// of the database being accessed as used in the [`SELECT`
-	// command](https://redis.io/commands/select), provided as an integer. To
-	// be used instead of the generic `db.name` attribute.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 0, 1, 15
-	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-
-	// DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
-	// semantic conventions. It represents the name of the primary table that
-	// the operation is acting upon, including the database name (if
-	// applicable).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'public.users', 'customers'
-	// Note: It is not recommended to attempt any client-side parsing of
-	// `db.statement` just to get this property, but it should be set if it is
-	// provided by the library being instrumented. If the operation is acting
-	// upon an anonymous table, or more than one table, this value MUST NOT be
-	// set.
-	DBSQLTableKey = attribute.Key("db.sql.table")
-
-	// DBStatementKey is the attribute Key conforming to the "db.statement"
-	// semantic conventions. It represents the database statement being
-	// executed.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
-	DBStatementKey = attribute.Key("db.statement")
-
-	// DBSystemKey is the attribute Key conforming to the "db.system" semantic
-	// conventions. It represents an identifier for the database management
-	// system (DBMS) product being used. See below for a list of well-known
-	// identifiers.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	DBSystemKey = attribute.Key("db.system")
-
-	// DBUserKey is the attribute Key conforming to the "db.user" semantic
-	// conventions. It represents the username for accessing the database.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'readonly_user', 'reporting_user'
-	DBUserKey = attribute.Key("db.user")
-)
-
-var (
-	// all
-	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
-	// each_quorum
-	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
-	// quorum
-	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
-	// local_quorum
-	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
-	// one
-	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
-	// two
-	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
-	// three
-	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
-	// local_one
-	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
-	// any
-	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
-	// serial
-	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
-	// local_serial
-	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-var (
-	// Gateway (HTTP) connections mode
-	DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
-	// Direct connection
-	DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
-	// invalid
-	DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
-	// create
-	DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
-	// patch
-	DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
-	// read
-	DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
-	// read_feed
-	DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
-	// delete
-	DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
-	// replace
-	DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
-	// execute
-	DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
-	// query
-	DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
-	// head
-	DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
-	// head_feed
-	DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
-	// upsert
-	DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
-	// batch
-	DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
-	// query_plan
-	DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
-	// execute_javascript
-	DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-var (
-	// Some other SQL database. Fallback only. See notes
-	DBSystemOtherSQL = DBSystemKey.String("other_sql")
-	// Microsoft SQL Server
-	DBSystemMSSQL = DBSystemKey.String("mssql")
-	// Microsoft SQL Server Compact
-	DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
-	// MySQL
-	DBSystemMySQL = DBSystemKey.String("mysql")
-	// Oracle Database
-	DBSystemOracle = DBSystemKey.String("oracle")
-	// IBM DB2
-	DBSystemDB2 = DBSystemKey.String("db2")
-	// PostgreSQL
-	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
-	// Amazon Redshift
-	DBSystemRedshift = DBSystemKey.String("redshift")
-	// Apache Hive
-	DBSystemHive = DBSystemKey.String("hive")
-	// Cloudscape
-	DBSystemCloudscape = DBSystemKey.String("cloudscape")
-	// HyperSQL DataBase
-	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
-	// Progress Database
-	DBSystemProgress = DBSystemKey.String("progress")
-	// SAP MaxDB
-	DBSystemMaxDB = DBSystemKey.String("maxdb")
-	// SAP HANA
-	DBSystemHanaDB = DBSystemKey.String("hanadb")
-	// Ingres
-	DBSystemIngres = DBSystemKey.String("ingres")
-	// FirstSQL
-	DBSystemFirstSQL = DBSystemKey.String("firstsql")
-	// EnterpriseDB
-	DBSystemEDB = DBSystemKey.String("edb")
-	// InterSystems Caché
-	DBSystemCache = DBSystemKey.String("cache")
-	// Adabas (Adaptable Database System)
-	DBSystemAdabas = DBSystemKey.String("adabas")
-	// Firebird
-	DBSystemFirebird = DBSystemKey.String("firebird")
-	// Apache Derby
-	DBSystemDerby = DBSystemKey.String("derby")
-	// FileMaker
-	DBSystemFilemaker = DBSystemKey.String("filemaker")
-	// Informix
-	DBSystemInformix = DBSystemKey.String("informix")
-	// InstantDB
-	DBSystemInstantDB = DBSystemKey.String("instantdb")
-	// InterBase
-	DBSystemInterbase = DBSystemKey.String("interbase")
-	// MariaDB
-	DBSystemMariaDB = DBSystemKey.String("mariadb")
-	// Netezza
-	DBSystemNetezza = DBSystemKey.String("netezza")
-	// Pervasive PSQL
-	DBSystemPervasive = DBSystemKey.String("pervasive")
-	// PointBase
-	DBSystemPointbase = DBSystemKey.String("pointbase")
-	// SQLite
-	DBSystemSqlite = DBSystemKey.String("sqlite")
-	// Sybase
-	DBSystemSybase = DBSystemKey.String("sybase")
-	// Teradata
-	DBSystemTeradata = DBSystemKey.String("teradata")
-	// Vertica
-	DBSystemVertica = DBSystemKey.String("vertica")
-	// H2
-	DBSystemH2 = DBSystemKey.String("h2")
-	// ColdFusion IMQ
-	DBSystemColdfusion = DBSystemKey.String("coldfusion")
-	// Apache Cassandra
-	DBSystemCassandra = DBSystemKey.String("cassandra")
-	// Apache HBase
-	DBSystemHBase = DBSystemKey.String("hbase")
-	// MongoDB
-	DBSystemMongoDB = DBSystemKey.String("mongodb")
-	// Redis
-	DBSystemRedis = DBSystemKey.String("redis")
-	// Couchbase
-	DBSystemCouchbase = DBSystemKey.String("couchbase")
-	// CouchDB
-	DBSystemCouchDB = DBSystemKey.String("couchdb")
-	// Microsoft Azure Cosmos DB
-	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
-	// Amazon DynamoDB
-	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
-	// Neo4j
-	DBSystemNeo4j = DBSystemKey.String("neo4j")
-	// Apache Geode
-	DBSystemGeode = DBSystemKey.String("geode")
-	// Elasticsearch
-	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
-	// Memcached
-	DBSystemMemcached = DBSystemKey.String("memcached")
-	// CockroachDB
-	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
-	// OpenSearch
-	DBSystemOpensearch = DBSystemKey.String("opensearch")
-	// ClickHouse
-	DBSystemClickhouse = DBSystemKey.String("clickhouse")
-	// Cloud Spanner
-	DBSystemSpanner = DBSystemKey.String("spanner")
-	// Trino
-	DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
-	return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
-	return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
-	return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
-	return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
-	return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary Cassandra table that the operation is acting upon, including the
-// keyspace name (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
-	return DBCassandraTableKey.String(val)
-}
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
-	return DBConnectionStringKey.String(val)
-}
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
-	return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBContainer returns an attribute KeyValue conforming to the
-// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
-// container name.
-func DBCosmosDBContainer(val string) attribute.KeyValue {
-	return DBCosmosDBContainerKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
-	return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
-	return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
-	return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
-	return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
-	return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
-	return DBElasticsearchNodeNameKey.String(val)
-}
-
-// DBInstanceID returns an attribute KeyValue conforming to the
-// "db.instance.id" semantic conventions. It represents an identifier (address,
-// unique name, or any other identifier) of the database instance that is
-// executing queries or mutations on the current connection. This is useful in
-// cases where the database is running in a clustered environment and the
-// instrumentation is able to record the node executing the query. The client
-// may obtain this value in databases like MySQL using queries like `select
-// @@hostname`.
-func DBInstanceID(val string) attribute.KeyValue {
-	return DBInstanceIDKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
-	return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the MongoDB
-// collection being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
-	return DBMongoDBCollectionKey.String(val)
-}
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
-	return DBMSSQLInstanceNameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
-	return DBNameKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
-	return DBOperationKey.String(val)
-}
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
-	return DBRedisDBIndexKey.Int(val)
-}
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
-	return DBSQLTableKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
-	return DBStatementKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
-	return DBUserKey.String(val)
-}
-
-// Describes deprecated HTTP attributes.
-const (
-	// HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
-	// semantic conventions.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorKey = attribute.Key("http.flavor")
-
-	// HTTPMethodKey is the attribute Key conforming to the "http.method"
-	// semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'GET', 'POST', 'HEAD'
-	// Deprecated: use `http.request.method` instead.
-	HTTPMethodKey = attribute.Key("http.method")
-
-	// HTTPRequestContentLengthKey is the attribute Key conforming to the
-	// "http.request_content_length" semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 3495
-	// Deprecated: use `http.request.header.content-length` instead.
-	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
-	// HTTPResponseContentLengthKey is the attribute Key conforming to the
-	// "http.response_content_length" semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 3495
-	// Deprecated: use `http.response.header.content-length` instead.
-	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-
-	// HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
-	// semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'http', 'https'
-	// Deprecated: use `url.scheme` instead.
-	HTTPSchemeKey = attribute.Key("http.scheme")
-
-	// HTTPStatusCodeKey is the attribute Key conforming to the
-	// "http.status_code" semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 200
-	// Deprecated: use `http.response.status_code` instead.
-	HTTPStatusCodeKey = attribute.Key("http.status_code")
-
-	// HTTPTargetKey is the attribute Key conforming to the "http.target"
-	// semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: '/search?q=OpenTelemetry#SemConv'
-	// Deprecated: use `url.path` and `url.query` instead.
-	HTTPTargetKey = attribute.Key("http.target")
-
-	// HTTPURLKey is the attribute Key conforming to the "http.url" semantic
-	// conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
-	// Deprecated: use `url.full` instead.
-	HTTPURLKey = attribute.Key("http.url")
-
-	// HTTPUserAgentKey is the attribute Key conforming to the
-	// "http.user_agent" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
-	// iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
-	// Version/14.1.2 Mobile/15E148 Safari/604.1'
-	// Deprecated: use `user_agent.original` instead.
-	HTTPUserAgentKey = attribute.Key("http.user_agent")
-)
-
-var (
-	// HTTP/1.0
-	//
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
-	// HTTP/1.1
-	//
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
-	// HTTP/2
-	//
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
-	// HTTP/3
-	//
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
-	// SPDY protocol
-	//
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
-	// QUIC protocol
-	//
-	// Deprecated: use `network.protocol.name` instead.
-	HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions.
-//
-// Deprecated: use `http.request.method` instead.
-func HTTPMethod(val string) attribute.KeyValue {
-	return HTTPMethodKey.String(val)
-}
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions.
-//
-// Deprecated: use `http.request.header.content-length` instead.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
-	return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions.
-//
-// Deprecated: use `http.response.header.content-length` instead.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
-	return HTTPResponseContentLengthKey.Int(val)
-}
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions.
-//
-// Deprecated: use `url.scheme` instead.
-func HTTPScheme(val string) attribute.KeyValue {
-	return HTTPSchemeKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions.
-//
-// Deprecated: use `http.response.status_code` instead.
-func HTTPStatusCode(val int) attribute.KeyValue {
-	return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions.
-//
-// Deprecated: use `url.path` and `url.query` instead.
-func HTTPTarget(val string) attribute.KeyValue {
-	return HTTPTargetKey.String(val)
-}
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions.
-//
-// Deprecated: use `url.full` instead.
-func HTTPURL(val string) attribute.KeyValue {
-	return HTTPURLKey.String(val)
-}
-
-// HTTPUserAgent returns an attribute KeyValue conforming to the
-// "http.user_agent" semantic conventions.
-//
-// Deprecated: use `user_agent.original` instead.
-func HTTPUserAgent(val string) attribute.KeyValue {
-	return HTTPUserAgentKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
-	// NetHostNameKey is the attribute Key conforming to the "net.host.name"
-	// semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'example.com'
-	// Deprecated: use `server.address`.
-	NetHostNameKey = attribute.Key("net.host.name")
-
-	// NetHostPortKey is the attribute Key conforming to the "net.host.port"
-	// semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 8080
-	// Deprecated: use `server.port`.
-	NetHostPortKey = attribute.Key("net.host.port")
-
-	// NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
-	// semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'example.com'
-	// Deprecated: use `server.address` on client spans and `client.address` on
-	// server spans.
-	NetPeerNameKey = attribute.Key("net.peer.name")
-
-	// NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
-	// semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 8080
-	// Deprecated: use `server.port` on client spans and `client.port` on
-	// server spans.
-	NetPeerPortKey = attribute.Key("net.peer.port")
-
-	// NetProtocolNameKey is the attribute Key conforming to the
-	// "net.protocol.name" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'amqp', 'http', 'mqtt'
-	// Deprecated: use `network.protocol.name`.
-	NetProtocolNameKey = attribute.Key("net.protocol.name")
-
-	// NetProtocolVersionKey is the attribute Key conforming to the
-	// "net.protocol.version" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: '3.1.1'
-	// Deprecated: use `network.protocol.version`.
-	NetProtocolVersionKey = attribute.Key("net.protocol.version")
-
-	// NetSockFamilyKey is the attribute Key conforming to the
-	// "net.sock.family" semantic conventions.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Deprecated: use `network.transport` and `network.type`.
-	NetSockFamilyKey = attribute.Key("net.sock.family")
-
-	// NetSockHostAddrKey is the attribute Key conforming to the
-	// "net.sock.host.addr" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: '/var/my.sock'
-	// Deprecated: use `network.local.address`.
-	NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
-	// NetSockHostPortKey is the attribute Key conforming to the
-	// "net.sock.host.port" semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 8080
-	// Deprecated: use `network.local.port`.
-	NetSockHostPortKey = attribute.Key("net.sock.host.port")
-
-	// NetSockPeerAddrKey is the attribute Key conforming to the
-	// "net.sock.peer.addr" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: '192.168.0.1'
-	// Deprecated: use `network.peer.address`.
-	NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
-	// NetSockPeerNameKey is the attribute Key conforming to the
-	// "net.sock.peer.name" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: '/var/my.sock'
-	// Deprecated: no replacement at this time.
-	NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
-	// NetSockPeerPortKey is the attribute Key conforming to the
-	// "net.sock.peer.port" semantic conventions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 65531
-	// Deprecated: use `network.peer.port`.
-	NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
-	// NetTransportKey is the attribute Key conforming to the "net.transport"
-	// semantic conventions.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Deprecated: use `network.transport`.
-	NetTransportKey = attribute.Key("net.transport")
-)
-
-var (
-	// IPv4 address
-	//
-	// Deprecated: use `network.transport` and `network.type`.
-	NetSockFamilyInet = NetSockFamilyKey.String("inet")
-	// IPv6 address
-	//
-	// Deprecated: use `network.transport` and `network.type`.
-	NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
-	// Unix domain socket path
-	//
-	// Deprecated: use `network.transport` and `network.type`.
-	NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-var (
-	// ip_tcp
-	//
-	// Deprecated: use `network.transport`.
-	NetTransportTCP = NetTransportKey.String("ip_tcp")
-	// ip_udp
-	//
-	// Deprecated: use `network.transport`.
-	NetTransportUDP = NetTransportKey.String("ip_udp")
-	// Named or anonymous pipe
-	//
-	// Deprecated: use `network.transport`.
-	NetTransportPipe = NetTransportKey.String("pipe")
-	// In-process communication
-	//
-	// Deprecated: use `network.transport`.
-	NetTransportInProc = NetTransportKey.String("inproc")
-	// Something else (non IP-based)
-	//
-	// Deprecated: use `network.transport`.
-	NetTransportOther = NetTransportKey.String("other")
-)
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions.
-//
-// Deprecated: use `server.address`.
-func NetHostName(val string) attribute.KeyValue {
-	return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions.
-//
-// Deprecated: use `server.port`.
-func NetHostPort(val int) attribute.KeyValue {
-	return NetHostPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions.
-//
-// Deprecated: use `server.address` on client spans and `client.address` on
-// server spans.
-func NetPeerName(val string) attribute.KeyValue {
-	return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions.
-//
-// Deprecated: use `server.port` on client spans and `client.port` on server
-// spans.
-func NetPeerPort(val int) attribute.KeyValue {
-	return NetPeerPortKey.Int(val)
-}
-
-// NetProtocolName returns an attribute KeyValue conforming to the
-// "net.protocol.name" semantic conventions.
-//
-// Deprecated: use `network.protocol.name`.
-func NetProtocolName(val string) attribute.KeyValue {
-	return NetProtocolNameKey.String(val)
-}
-
-// NetProtocolVersion returns an attribute KeyValue conforming to the
-// "net.protocol.version" semantic conventions.
-//
-// Deprecated: use `network.protocol.version`.
-func NetProtocolVersion(val string) attribute.KeyValue {
-	return NetProtocolVersionKey.String(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions.
-//
-// Deprecated: use `network.local.address`.
-func NetSockHostAddr(val string) attribute.KeyValue {
-	return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions.
-//
-// Deprecated: use `network.local.port`.
-func NetSockHostPort(val int) attribute.KeyValue {
-	return NetSockHostPortKey.Int(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions.
-//
-// Deprecated: use `network.peer.address`.
-func NetSockPeerAddr(val string) attribute.KeyValue {
-	return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions.
-//
-// Deprecated: no replacement at this time.
-func NetSockPeerName(val string) attribute.KeyValue {
-	return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions.
-//
-// Deprecated: use `network.peer.port`.
-func NetSockPeerPort(val int) attribute.KeyValue {
-	return NetSockPeerPortKey.Int(val)
-}
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
-	// DestinationAddressKey is the attribute Key conforming to the
-	// "destination.address" semantic conventions. It represents the
-	// destination address - domain name if available without reverse DNS
-	// lookup; otherwise, IP address or Unix domain socket name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
-	// Note: When observed from the source side, and when communicating through
-	// an intermediary, `destination.address` SHOULD represent the destination
-	// address behind any intermediaries, for example proxies, if it's
-	// available.
-	DestinationAddressKey = attribute.Key("destination.address")
-
-	// DestinationPortKey is the attribute Key conforming to the
-	// "destination.port" semantic conventions. It represents the destination
-	// port number
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 3389, 2888
-	DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
-	return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
-	return DestinationPortKey.Int(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
-	// DiskIoDirectionKey is the attribute Key conforming to the
-	// "disk.io.direction" semantic conventions. It represents the disk IO
-	// operation direction.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'read'
-	DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
-	// read
-	DiskIoDirectionRead = DiskIoDirectionKey.String("read")
-	// write
-	DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report an error.
-const (
-	// ErrorTypeKey is the attribute Key conforming to the "error.type"
-	// semantic conventions. It represents the describes a class of error the
-	// operation ended with.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'timeout', 'java.net.UnknownHostException',
-	// 'server_certificate_invalid', '500'
-	// Note: The `error.type` SHOULD be predictable and SHOULD have low
-	// cardinality.
-	// Instrumentations SHOULD document the list of errors they report.
-	//
-	// The cardinality of `error.type` within one instrumentation library
-	// SHOULD be low.
-	// Telemetry consumers that aggregate data from multiple instrumentation
-	// libraries and applications
-	// should be prepared for `error.type` to have high cardinality at query
-	// time when no
-	// additional filters are applied.
-	//
-	// If the operation has completed successfully, instrumentations SHOULD NOT
-	// set `error.type`.
-	//
-	// If a specific domain defines its own set of error identifiers (such as
-	// HTTP or gRPC status codes),
-	// it's RECOMMENDED to:
-	//
-	// * Use a domain-specific attribute
-	// * Set `error.type` to capture all errors, regardless of whether they are
-	// defined within the domain-specific set or not.
-	ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
-	// A fallback error value to be used when the instrumentation doesn't define a custom value
-	ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
-	// ExceptionEscapedKey is the attribute Key conforming to the
-	// "exception.escaped" semantic conventions. It represents the sHOULD be
-	// set to true if the exception event is recorded at a point where it is
-	// known that the exception is escaping the scope of the span.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Note: An exception is considered to have escaped (or left) the scope of
-	// a span,
-	// if that span is ended while the exception is still logically "in
-	// flight".
-	// This may be actually "in flight" in some languages (e.g. if the
-	// exception
-	// is passed to a Context manager's `__exit__` method in Python) but will
-	// usually be caught at the point of recording the exception in most
-	// languages.
-	//
-	// It is usually not possible to determine at the point where an exception
-	// is thrown
-	// whether it will escape the scope of a span.
-	// However, it is trivial to know that an exception
-	// will escape, if one checks for an active exception just before ending
-	// the span,
-	// as done in the [example for recording span
-	// exceptions](#recording-an-exception).
-	//
-	// It follows that an exception may still escape the scope of the span
-	// even if the `exception.escaped` attribute was not set or set to false,
-	// since the event might have been recorded at a time where it was not
-	// clear whether the exception will escape.
-	ExceptionEscapedKey = attribute.Key("exception.escaped")
-
-	// ExceptionMessageKey is the attribute Key conforming to the
-	// "exception.message" semantic conventions. It represents the exception
-	// message.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Division by zero', "Can't convert 'int' object to str
-	// implicitly"
-	ExceptionMessageKey = attribute.Key("exception.message")
-
-	// ExceptionStacktraceKey is the attribute Key conforming to the
-	// "exception.stacktrace" semantic conventions. It represents a stacktrace
-	// as a string in the natural representation for the language runtime. The
-	// representation is to be determined and documented by each language SIG.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
-	// exception\\n at '
-	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
-	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
-	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
-	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
-	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
-	// semantic conventions. It represents the type of the exception (its
-	// fully-qualified class name, if applicable). The dynamic type of the
-	// exception should be preferred over the static type in languages that
-	// support it.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'java.net.ConnectException', 'OSError'
-	ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
-	return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
-	return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
-	return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
-	return ExceptionTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
-	// HTTPRequestBodySizeKey is the attribute Key conforming to the
-	// "http.request.body.size" semantic conventions. It represents the size of
-	// the request payload body in bytes. This is the number of bytes
-	// transferred excluding headers and is often, but not always, present as
-	// the
-	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-	// header. For requests using transport encoding, this should be the
-	// compressed size.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 3495
-	HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
-	// HTTPRequestMethodKey is the attribute Key conforming to the
-	// "http.request.method" semantic conventions. It represents the hTTP
-	// request method.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'GET', 'POST', 'HEAD'
-	// Note: HTTP request method value SHOULD be "known" to the
-	// instrumentation.
-	// By default, this convention defines "known" methods as the ones listed
-	// in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
-	// and the PATCH method defined in
-	// [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
-	//
-	// If the HTTP request method is not known to instrumentation, it MUST set
-	// the `http.request.method` attribute to `_OTHER`.
-	//
-	// If the HTTP instrumentation could end up converting valid HTTP request
-	// methods to `_OTHER`, then it MUST provide a way to override
-	// the list of known HTTP methods. If this override is done via environment
-	// variable, then the environment variable MUST be named
-	// OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
-	// list of case-sensitive known HTTP methods
-	// (this list MUST be a full override of the default known method, it is
-	// not a list of known methods in addition to the defaults).
-	//
-	// HTTP method names are case-sensitive and `http.request.method` attribute
-	// value MUST match a known HTTP method name exactly.
-	// Instrumentations for specific web frameworks that consider HTTP methods
-	// to be case insensitive, SHOULD populate a canonical equivalent.
-	// Tracing instrumentations that do so, MUST also set
-	// `http.request.method_original` to the original value.
-	HTTPRequestMethodKey = attribute.Key("http.request.method")
-
-	// HTTPRequestMethodOriginalKey is the attribute Key conforming to the
-	// "http.request.method_original" semantic conventions. It represents the
-	// original HTTP method sent by the client in the request line.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'GeT', 'ACL', 'foo'
-	HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
-	// HTTPRequestResendCountKey is the attribute Key conforming to the
-	// "http.request.resend_count" semantic conventions. It represents the
-	// ordinal number of request resending attempt (for any reason, including
-	// redirects).
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 3
-	// Note: The resend count SHOULD be updated each time an HTTP request gets
-	// resent by the client, regardless of what was the cause of the resending
-	// (e.g. redirection, authorization failure, 503 Server Unavailable,
-	// network issues, or any other).
-	HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
-	// HTTPResponseBodySizeKey is the attribute Key conforming to the
-	// "http.response.body.size" semantic conventions. It represents the size
-	// of the response payload body in bytes. This is the number of bytes
-	// transferred excluding headers and is often, but not always, present as
-	// the
-	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-	// header. For requests using transport encoding, this should be the
-	// compressed size.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 3495
-	HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
-	// HTTPResponseStatusCodeKey is the attribute Key conforming to the
-	// "http.response.status_code" semantic conventions. It represents the
-	// [HTTP response status
-	// code](https://tools.ietf.org/html/rfc7231#section-6).
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 200
-	HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
-	// HTTPRouteKey is the attribute Key conforming to the "http.route"
-	// semantic conventions. It represents the matched route, that is, the path
-	// template in the format used by the respective server framework.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: '/users/:userID?', '{controller}/{action}/{id?}'
-	// Note: MUST NOT be populated when this is not supported by the HTTP
-	// server framework as the route attribute should have low-cardinality and
-	// the URI path can NOT substitute it.
-	// SHOULD include the [application
-	// root](/docs/http/http-spans.md#http-server-definitions) if there is one.
-	HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
-	// CONNECT method
-	HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
-	// DELETE method
-	HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
-	// GET method
-	HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
-	// HEAD method
-	HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
-	// OPTIONS method
-	HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
-	// PATCH method
-	HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
-	// POST method
-	HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
-	// PUT method
-	HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
-	// TRACE method
-	HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
-	// Any HTTP method that the instrumentation has no prior knowledge of
-	HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
-	return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
-	return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
-	return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
-	return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
-	return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
-	return HTTPRouteKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
-	// MessagingBatchMessageCountKey is the attribute Key conforming to the
-	// "messaging.batch.message_count" semantic conventions. It represents the
-	// number of messages sent, received, or processed in the scope of the
-	// batching operation.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 0, 1, 2
-	// Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
-	// spans that operate with a single message. When a messaging client
-	// library supports both batch and single-message API for the same
-	// operation, instrumentations SHOULD use `messaging.batch.message_count`
-	// for batching APIs and SHOULD NOT use it for single-message APIs.
-	MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
-	// MessagingClientIDKey is the attribute Key conforming to the
-	// "messaging.client_id" semantic conventions. It represents a unique
-	// identifier for the client that consumes or produces a message.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'client-5', 'myhost@8742@s8083jm'
-	MessagingClientIDKey = attribute.Key("messaging.client_id")
-
-	// MessagingDestinationAnonymousKey is the attribute Key conforming to the
-	// "messaging.destination.anonymous" semantic conventions. It represents a
-	// boolean that is true if the message destination is anonymous (could be
-	// unnamed or have auto-generated name).
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
-	// MessagingDestinationNameKey is the attribute Key conforming to the
-	// "messaging.destination.name" semantic conventions. It represents the
-	// message destination name
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MyQueue', 'MyTopic'
-	// Note: Destination name SHOULD uniquely identify a specific queue, topic
-	// or other entity within the broker. If
-	// the broker doesn't have such notion, the destination name SHOULD
-	// uniquely identify the broker.
-	MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
-	// MessagingDestinationTemplateKey is the attribute Key conforming to the
-	// "messaging.destination.template" semantic conventions. It represents the
-	// low cardinality representation of the messaging destination name
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/customers/{customerID}'
-	// Note: Destination names could be constructed from templates. An example
-	// would be a destination name involving a user name or product id.
-	// Although the destination name in this case is of high cardinality, the
-	// underlying template is of low cardinality and can be effectively used
-	// for grouping and aggregation.
-	MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
-	// MessagingDestinationTemporaryKey is the attribute Key conforming to the
-	// "messaging.destination.temporary" semantic conventions. It represents a
-	// boolean that is true if the message destination is temporary and might
-	// not exist anymore after messages are processed.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
-	// MessagingDestinationPublishAnonymousKey is the attribute Key conforming
-	// to the "messaging.destination_publish.anonymous" semantic conventions.
-	// It represents a boolean that is true if the publish message destination
-	// is anonymous (could be unnamed or have auto-generated name).
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
-	// MessagingDestinationPublishNameKey is the attribute Key conforming to
-	// the "messaging.destination_publish.name" semantic conventions. It
-	// represents the name of the original destination the message was
-	// published to
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MyQueue', 'MyTopic'
-	// Note: The name SHOULD uniquely identify a specific queue, topic, or
-	// other entity within the broker. If
-	// the broker doesn't have such notion, the original destination name
-	// SHOULD uniquely identify the broker.
-	MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
-	// MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
-	// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
-	// It represents the ordering key for a given message. If the attribute is
-	// not present, the message does not have an ordering key.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'ordering_key'
-	MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-
-	// MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
-	// "messaging.kafka.consumer.group" semantic conventions. It represents the
-	// name of the Kafka Consumer Group that is handling the message. Only
-	// applies to consumers, not producers.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'my-group'
-	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
-	// MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
-	// the "messaging.kafka.destination.partition" semantic conventions. It
-	// represents the partition the message is sent to.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 2
-	MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
-	// MessagingKafkaMessageKeyKey is the attribute Key conforming to the
-	// "messaging.kafka.message.key" semantic conventions. It represents the
-	// message keys in Kafka are used for grouping alike messages to ensure
-	// they're processed on the same partition. They differ from
-	// `messaging.message.id` in that they're not unique. If the key is `null`,
-	// the attribute MUST NOT be set.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myKey'
-	// Note: If the key type is not string, it's string representation has to
-	// be supplied for the attribute. If the key has no unambiguous, canonical
-	// string form, don't include its value.
-	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
-	// MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
-	// "messaging.kafka.message.offset" semantic conventions. It represents the
-	// offset of a record in the corresponding Kafka partition.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 42
-	MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
-	// MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
-	// "messaging.kafka.message.tombstone" semantic conventions. It represents
-	// a boolean that is true if the message is a tombstone.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-
-	// MessagingMessageBodySizeKey is the attribute Key conforming to the
-	// "messaging.message.body.size" semantic conventions. It represents the
-	// size of the message body in bytes.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1439
-	// Note: This can refer to both the compressed or uncompressed body size.
-	// If both sizes are known, the uncompressed
-	// body size should be used.
-	MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
-	// MessagingMessageConversationIDKey is the attribute Key conforming to the
-	// "messaging.message.conversation_id" semantic conventions. It represents
-	// the conversation ID identifying the conversation to which the message
-	// belongs, represented as a string. Sometimes called "Correlation ID".
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MyConversationID'
-	MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
-	// MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
-	// "messaging.message.envelope.size" semantic conventions. It represents
-	// the size of the message body and metadata in bytes.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 2738
-	// Note: This can refer to both the compressed or uncompressed size. If
-	// both sizes are known, the uncompressed
-	// size should be used.
-	MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
-	// MessagingMessageIDKey is the attribute Key conforming to the
-	// "messaging.message.id" semantic conventions. It represents a value used
-	// by the messaging system as an identifier for the message, represented as
-	// a string.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
-	MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
-	// MessagingOperationKey is the attribute Key conforming to the
-	// "messaging.operation" semantic conventions. It represents a string
-	// identifying the kind of messaging operation.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Note: If a custom value is used, it MUST be of low cardinality.
-	MessagingOperationKey = attribute.Key("messaging.operation")
-
-	// MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
-	// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-	// conventions. It represents the rabbitMQ message routing key.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myKey'
-	MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
-	// MessagingRocketmqClientGroupKey is the attribute Key conforming to the
-	// "messaging.rocketmq.client_group" semantic conventions. It represents
-	// the name of the RocketMQ producer/consumer group that is handling the
-	// message. The client type is identified by the SpanKind.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myConsumerGroup'
-	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
-	// MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
-	// the "messaging.rocketmq.consumption_model" semantic conventions. It
-	// represents the model of message consumption. This only applies to
-	// consumer spans.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
-	// MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
-	// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-	// conventions. It represents the delay time level for delay message, which
-	// determines the message delay time.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 3
-	MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
-	// MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
-	// conforming to the "messaging.rocketmq.message.delivery_timestamp"
-	// semantic conventions. It represents the timestamp in milliseconds that
-	// the delay message is expected to be delivered to consumer.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1665987217045
-	MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
-	// MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
-	// "messaging.rocketmq.message.group" semantic conventions. It represents
-	// the it is essential for FIFO message. Messages that belong to the same
-	// message group are always processed one by one within the same consumer
-	// group.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myMessageGroup'
-	MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
-	// MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
-	// "messaging.rocketmq.message.keys" semantic conventions. It represents
-	// the key(s) of message, another way to mark message besides message id.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'keyA', 'keyB'
-	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
-	// MessagingRocketmqMessageTagKey is the attribute Key conforming to the
-	// "messaging.rocketmq.message.tag" semantic conventions. It represents the
-	// secondary classifier of message besides topic.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'tagA'
-	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
-	// MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
-	// "messaging.rocketmq.message.type" semantic conventions. It represents
-	// the type of message.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
-	// MessagingRocketmqNamespaceKey is the attribute Key conforming to the
-	// "messaging.rocketmq.namespace" semantic conventions. It represents the
-	// namespace of RocketMQ resources, resources in different namespaces are
-	// individual.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myNamespace'
-	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
-	// MessagingSystemKey is the attribute Key conforming to the
-	// "messaging.system" semantic conventions. It represents an identifier for
-	// the messaging system being used. See below for a list of well-known
-	// identifiers.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
-	// One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
-	MessagingOperationPublish = MessagingOperationKey.String("publish")
-	// A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
-	MessagingOperationCreate = MessagingOperationKey.String("create")
-	// One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
-	MessagingOperationReceive = MessagingOperationKey.String("receive")
-	// One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs
-	MessagingOperationDeliver = MessagingOperationKey.String("deliver")
-)
-
-var (
-	// Clustering consumption model
-	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
-	// Broadcasting consumption model
-	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
-	// Normal message
-	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
-	// FIFO message
-	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
-	// Delay message
-	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
-	// Transaction message
-	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
-	// Apache ActiveMQ
-	MessagingSystemActivemq = MessagingSystemKey.String("activemq")
-	// Amazon Simple Queue Service (SQS)
-	MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
-	// Azure Event Grid
-	MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid")
-	// Azure Event Hubs
-	MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs")
-	// Azure Service Bus
-	MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus")
-	// Google Cloud Pub/Sub
-	MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
-	// Java Message Service
-	MessagingSystemJms = MessagingSystemKey.String("jms")
-	// Apache Kafka
-	MessagingSystemKafka = MessagingSystemKey.String("kafka")
-	// RabbitMQ
-	MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
-	// Apache RocketMQ
-	MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
-	return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client_id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
-	return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
-	return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
-	return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
-	return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
-	return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
-	return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
-	return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
-	return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
-	return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
-	return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
-	return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
-	return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
-	return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
-	return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
-	return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
-	return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
-	return MessagingMessageIDKey.String(val)
-}
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
-	return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
-	return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
-	return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
-	return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
-	return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
-	return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
-	return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
-	return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
-	// NetworkCarrierIccKey is the attribute Key conforming to the
-	// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-	// alpha-2 2-character country code associated with the mobile carrier
-	// network.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'DE'
-	NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
-	// NetworkCarrierMccKey is the attribute Key conforming to the
-	// "network.carrier.mcc" semantic conventions. It represents the mobile
-	// carrier country code.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '310'
-	NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
-	// NetworkCarrierMncKey is the attribute Key conforming to the
-	// "network.carrier.mnc" semantic conventions. It represents the mobile
-	// carrier network code.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '001'
-	NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
-	// NetworkCarrierNameKey is the attribute Key conforming to the
-	// "network.carrier.name" semantic conventions. It represents the name of
-	// the mobile carrier.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'sprint'
-	NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
-	// NetworkConnectionSubtypeKey is the attribute Key conforming to the
-	// "network.connection.subtype" semantic conventions. It represents the
-	// this describes more details regarding the connection.type. It may be the
-	// type of cell technology connection, but it could be used for describing
-	// details about a wifi connection.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'LTE'
-	NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
-	// NetworkConnectionTypeKey is the attribute Key conforming to the
-	// "network.connection.type" semantic conventions. It represents the
-	// internet connection type.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'wifi'
-	NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
-	// NetworkIoDirectionKey is the attribute Key conforming to the
-	// "network.io.direction" semantic conventions. It represents the network
-	// IO operation direction.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'transmit'
-	NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
-	// NetworkLocalAddressKey is the attribute Key conforming to the
-	// "network.local.address" semantic conventions. It represents the local
-	// address of the network connection - IP address or Unix domain socket
-	// name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: '10.1.2.80', '/tmp/my.sock'
-	NetworkLocalAddressKey = attribute.Key("network.local.address")
-
-	// NetworkLocalPortKey is the attribute Key conforming to the
-	// "network.local.port" semantic conventions. It represents the local port
-	// number of the network connection.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 65123
-	NetworkLocalPortKey = attribute.Key("network.local.port")
-
-	// NetworkPeerAddressKey is the attribute Key conforming to the
-	// "network.peer.address" semantic conventions. It represents the peer
-	// address of the network connection - IP address or Unix domain socket
-	// name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: '10.1.2.80', '/tmp/my.sock'
-	NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
-	// NetworkPeerPortKey is the attribute Key conforming to the
-	// "network.peer.port" semantic conventions. It represents the peer port
-	// number of the network connection.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 65123
-	NetworkPeerPortKey = attribute.Key("network.peer.port")
-
-	// NetworkProtocolNameKey is the attribute Key conforming to the
-	// "network.protocol.name" semantic conventions. It represents the [OSI
-	// application layer](https://osi-model.com/application-layer/) or non-OSI
-	// equivalent.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'amqp', 'http', 'mqtt'
-	// Note: The value SHOULD be normalized to lowercase.
-	NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
-	// NetworkProtocolVersionKey is the attribute Key conforming to the
-	// "network.protocol.version" semantic conventions. It represents the
-	// version of the protocol specified in `network.protocol.name`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: '3.1.1'
-	// Note: `network.protocol.version` refers to the version of the protocol
-	// used and might be different from the protocol client's version. If the
-	// HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`,
-	// this attribute should be set to `1.1`.
-	NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
-	// NetworkTransportKey is the attribute Key conforming to the
-	// "network.transport" semantic conventions. It represents the [OSI
-	// transport layer](https://osi-model.com/transport-layer/) or
-	// [inter-process communication
-	// method](https://wikipedia.org/wiki/Inter-process_communication).
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'tcp', 'udp'
-	// Note: The value SHOULD be normalized to lowercase.
-	//
-	// Consider always setting the transport when setting a port number, since
-	// a port number is ambiguous without knowing the transport. For example
-	// different processes could be listening on TCP port 12345 and UDP port
-	// 12345.
-	NetworkTransportKey = attribute.Key("network.transport")
-
-	// NetworkTypeKey is the attribute Key conforming to the "network.type"
-	// semantic conventions. It represents the [OSI network
-	// layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'ipv4', 'ipv6'
-	// Note: The value SHOULD be normalized to lowercase.
-	NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
-	// GPRS
-	NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
-	// EDGE
-	NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
-	// UMTS
-	NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
-	// CDMA
-	NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
-	// EVDO Rel. 0
-	NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
-	// EVDO Rev. A
-	NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
-	// CDMA2000 1XRTT
-	NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
-	// HSDPA
-	NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
-	// HSUPA
-	NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
-	// HSPA
-	NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
-	// IDEN
-	NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
-	// EVDO Rev. B
-	NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
-	// LTE
-	NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
-	// EHRPD
-	NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
-	// HSPAP
-	NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
-	// GSM
-	NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
-	// TD-SCDMA
-	NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
-	// IWLAN
-	NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
-	// 5G NR (New Radio)
-	NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
-	// 5G NRNSA (New Radio Non-Standalone)
-	NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
-	// LTE CA
-	NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
-	// wifi
-	NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
-	// wired
-	NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
-	// cell
-	NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
-	// unavailable
-	NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
-	// unknown
-	NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
-	// transmit
-	NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
-	// receive
-	NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
-	// TCP
-	NetworkTransportTCP = NetworkTransportKey.String("tcp")
-	// UDP
-	NetworkTransportUDP = NetworkTransportKey.String("udp")
-	// Named or anonymous pipe
-	NetworkTransportPipe = NetworkTransportKey.String("pipe")
-	// Unix domain socket
-	NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
-	// IPv4
-	NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
-	// IPv6
-	NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
-	return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
-	return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
-	return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
-	return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
-	return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
-	return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
-	return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
-	return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
-	return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the version
-// of the protocol specified in `network.protocol.name`.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
-	return NetworkProtocolVersionKey.String(val)
-}
-
-// Attributes for remote procedure calls.
-const (
-	// RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
-	// "rpc.connect_rpc.error_code" semantic conventions. It represents the
-	// [error codes](https://connect.build/docs/protocol/#error-codes) of the
-	// Connect request. Error codes are always string values.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
-	// RPCGRPCStatusCodeKey is the attribute Key conforming to the
-	// "rpc.grpc.status_code" semantic conventions. It represents the [numeric
-	// status
-	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
-	// the gRPC request.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
-	// RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
-	// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-	// `error.code` property of response if it is an error response.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: -32700, 100
-	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
-	// RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
-	// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-	// `error.message` property of response if it is an error response.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Parse error', 'User already exists'
-	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
-	// RPCJsonrpcRequestIDKey is the attribute Key conforming to the
-	// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-	// property of request or response. Since protocol allows id to be int,
-	// string, `null` or missing (for notifications), value is expected to be
-	// cast to string for simplicity. Use empty string in case of `null` value.
-	// Omit entirely if this is a notification.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '10', 'request-7', ''
-	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
-	// RPCJsonrpcVersionKey is the attribute Key conforming to the
-	// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-	// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-	// doesn't specify this, the value can be omitted.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2.0', '1.0'
-	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
-	// RPCMethodKey is the attribute Key conforming to the "rpc.method"
-	// semantic conventions. It represents the name of the (logical) method
-	// being called, must be equal to the $method part in the span name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'exampleMethod'
-	// Note: This is the logical name of the method from the RPC interface
-	// perspective, which can be different from the name of any implementing
-	// method/function. The `code.function` attribute may be used to store the
-	// latter (e.g., method actually executing the call on the server side, RPC
-	// client stub method on the client side).
-	RPCMethodKey = attribute.Key("rpc.method")
-
-	// RPCServiceKey is the attribute Key conforming to the "rpc.service"
-	// semantic conventions. It represents the full (logical) name of the
-	// service being called, including its package name, if applicable.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myservice.EchoService'
-	// Note: This is the logical name of the service from the RPC interface
-	// perspective, which can be different from the name of any implementing
-	// class. The `code.namespace` attribute may be used to store the latter
-	// (despite the attribute name, it may include a class name; e.g., class
-	// with method actually executing the call on the server side, RPC client
-	// stub class on the client side).
-	RPCServiceKey = attribute.Key("rpc.service")
-
-	// RPCSystemKey is the attribute Key conforming to the "rpc.system"
-	// semantic conventions. It represents a string identifying the remoting
-	// system. See below for a list of well-known identifiers.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
-	// cancelled
-	RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
-	// unknown
-	RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
-	// invalid_argument
-	RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
-	// deadline_exceeded
-	RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
-	// not_found
-	RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
-	// already_exists
-	RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
-	// permission_denied
-	RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
-	// resource_exhausted
-	RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
-	// failed_precondition
-	RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
-	// aborted
-	RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
-	// out_of_range
-	RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
-	// unimplemented
-	RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
-	// internal
-	RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
-	// unavailable
-	RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
-	// data_loss
-	RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
-	// unauthenticated
-	RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
-	// OK
-	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
-	// CANCELLED
-	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
-	// UNKNOWN
-	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
-	// INVALID_ARGUMENT
-	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
-	// DEADLINE_EXCEEDED
-	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
-	// NOT_FOUND
-	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
-	// ALREADY_EXISTS
-	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
-	// PERMISSION_DENIED
-	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
-	// RESOURCE_EXHAUSTED
-	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
-	// FAILED_PRECONDITION
-	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
-	// ABORTED
-	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
-	// OUT_OF_RANGE
-	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
-	// UNIMPLEMENTED
-	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
-	// INTERNAL
-	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
-	// UNAVAILABLE
-	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
-	// DATA_LOSS
-	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
-	// UNAUTHENTICATED
-	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
-	// gRPC
-	RPCSystemGRPC = RPCSystemKey.String("grpc")
-	// Java RMI
-	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
-	// .NET WCF
-	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
-	// Apache Dubbo
-	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
-	// Connect RPC
-	RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
-	return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
-	return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
-	return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
-	return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
-	return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
-	return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
-	// ServerAddressKey is the attribute Key conforming to the "server.address"
-	// semantic conventions. It represents the server domain name if available
-	// without reverse DNS lookup; otherwise, IP address or Unix domain socket
-	// name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
-	// Note: When observed from the client side, and when communicating through
-	// an intermediary, `server.address` SHOULD represent the server address
-	// behind any intermediaries, for example proxies, if it's available.
-	ServerAddressKey = attribute.Key("server.address")
-
-	// ServerPortKey is the attribute Key conforming to the "server.port"
-	// semantic conventions. It represents the server port number.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 80, 8080, 443
-	// Note: When observed from the client side, and when communicating through
-	// an intermediary, `server.port` SHOULD represent the server port behind
-	// any intermediaries, for example proxies, if it's available.
-	ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
-	return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
-	return ServerPortKey.Int(val)
-}
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
-	// SourceAddressKey is the attribute Key conforming to the "source.address"
-	// semantic conventions. It represents the source address - domain name if
-	// available without reverse DNS lookup; otherwise, IP address or Unix
-	// domain socket name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
-	// Note: When observed from the destination side, and when communicating
-	// through an intermediary, `source.address` SHOULD represent the source
-	// address behind any intermediaries, for example proxies, if it's
-	// available.
-	SourceAddressKey = attribute.Key("source.address")
-
-	// SourcePortKey is the attribute Key conforming to the "source.port"
-	// semantic conventions. It represents the source port number
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 3389, 2888
-	SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
-	return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
-	return SourcePortKey.Int(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
-	// TLSCipherKey is the attribute Key conforming to the "tls.cipher"
-	// semantic conventions. It represents the string indicating the
-	// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
-	// used during the current connection.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
-	// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
-	// Note: The values allowed for `tls.cipher` MUST be one of the
-	// `Descriptions` of the [registered TLS Cipher
-	// Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
-	TLSCipherKey = attribute.Key("tls.cipher")
-
-	// TLSClientCertificateKey is the attribute Key conforming to the
-	// "tls.client.certificate" semantic conventions. It represents the
-	// pEM-encoded stand-alone certificate offered by the client. This is
-	// usually mutually-exclusive of `client.certificate_chain` since this
-	// value also exists in that list.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MII...'
-	TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
-	// TLSClientCertificateChainKey is the attribute Key conforming to the
-	// "tls.client.certificate_chain" semantic conventions. It represents the
-	// array of PEM-encoded certificates that make up the certificate chain
-	// offered by the client. This is usually mutually-exclusive of
-	// `client.certificate` since that value should be the first certificate in
-	// the chain.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MII...', 'MI...'
-	TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
-	// TLSClientHashMd5Key is the attribute Key conforming to the
-	// "tls.client.hash.md5" semantic conventions. It represents the
-	// certificate fingerprint using the MD5 digest of DER-encoded version of
-	// certificate offered by the client. For consistency with other hash
-	// values, this value should be formatted as an uppercase hash.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
-	TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
-	// TLSClientHashSha1Key is the attribute Key conforming to the
-	// "tls.client.hash.sha1" semantic conventions. It represents the
-	// certificate fingerprint using the SHA1 digest of DER-encoded version of
-	// certificate offered by the client. For consistency with other hash
-	// values, this value should be formatted as an uppercase hash.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
-	TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
-	// TLSClientHashSha256Key is the attribute Key conforming to the
-	// "tls.client.hash.sha256" semantic conventions. It represents the
-	// certificate fingerprint using the SHA256 digest of DER-encoded version
-	// of certificate offered by the client. For consistency with other hash
-	// values, this value should be formatted as an uppercase hash.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
-	TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
-	// TLSClientIssuerKey is the attribute Key conforming to the
-	// "tls.client.issuer" semantic conventions. It represents the
-	// distinguished name of
-	// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
-	// of the issuer of the x.509 certificate presented by the client.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
-	// DC=com'
-	TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
-	// TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
-	// semantic conventions. It represents a hash that identifies clients based
-	// on how they perform an SSL/TLS handshake.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'd4e5b18d6b55c71272893221c96ba240'
-	TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
-	// TLSClientNotAfterKey is the attribute Key conforming to the
-	// "tls.client.not_after" semantic conventions. It represents the date/Time
-	// indicating when client certificate is no longer considered valid.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2021-01-01T00:00:00.000Z'
-	TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
-	// TLSClientNotBeforeKey is the attribute Key conforming to the
-	// "tls.client.not_before" semantic conventions. It represents the
-	// date/Time indicating when client certificate is first considered valid.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1970-01-01T00:00:00.000Z'
-	TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
-	// TLSClientServerNameKey is the attribute Key conforming to the
-	// "tls.client.server_name" semantic conventions. It represents the also
-	// called an SNI, this tells the server which hostname to which the client
-	// is attempting to connect to.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry.io'
-	TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
-	// TLSClientSubjectKey is the attribute Key conforming to the
-	// "tls.client.subject" semantic conventions. It represents the
-	// distinguished name of subject of the x.509 certificate presented by the
-	// client.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
-	TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
-	// TLSClientSupportedCiphersKey is the attribute Key conforming to the
-	// "tls.client.supported_ciphers" semantic conventions. It represents the
-	// array of ciphers offered by the client during the client hello.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
-	// "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
-	TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
-	// TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
-	// conventions. It represents the string indicating the curve used for the
-	// given cipher, when applicable
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'secp256r1'
-	TLSCurveKey = attribute.Key("tls.curve")
-
-	// TLSEstablishedKey is the attribute Key conforming to the
-	// "tls.established" semantic conventions. It represents the boolean flag
-	// indicating if the TLS negotiation was successful and transitioned to an
-	// encrypted tunnel.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: True
-	TLSEstablishedKey = attribute.Key("tls.established")
-
-	// TLSNextProtocolKey is the attribute Key conforming to the
-	// "tls.next_protocol" semantic conventions. It represents the string
-	// indicating the protocol being tunneled. Per the values in the [IANA
-	// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-	// this string should be lower case.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'http/1.1'
-	TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
-	// TLSProtocolNameKey is the attribute Key conforming to the
-	// "tls.protocol.name" semantic conventions. It represents the normalized
-	// lowercase protocol name parsed from original string of the negotiated
-	// [SSL/TLS protocol
-	// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
-	// TLSProtocolVersionKey is the attribute Key conforming to the
-	// "tls.protocol.version" semantic conventions. It represents the numeric
-	// part of the version parsed from the original string of the negotiated
-	// [SSL/TLS protocol
-	// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1.2', '3'
-	TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
-	// TLSResumedKey is the attribute Key conforming to the "tls.resumed"
-	// semantic conventions. It represents the boolean flag indicating if this
-	// TLS connection was resumed from an existing TLS negotiation.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: True
-	TLSResumedKey = attribute.Key("tls.resumed")
-
-	// TLSServerCertificateKey is the attribute Key conforming to the
-	// "tls.server.certificate" semantic conventions. It represents the
-	// pEM-encoded stand-alone certificate offered by the server. This is
-	// usually mutually-exclusive of `server.certificate_chain` since this
-	// value also exists in that list.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MII...'
-	TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
-	// TLSServerCertificateChainKey is the attribute Key conforming to the
-	// "tls.server.certificate_chain" semantic conventions. It represents the
-	// array of PEM-encoded certificates that make up the certificate chain
-	// offered by the server. This is usually mutually-exclusive of
-	// `server.certificate` since that value should be the first certificate in
-	// the chain.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'MII...', 'MI...'
-	TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
-	// TLSServerHashMd5Key is the attribute Key conforming to the
-	// "tls.server.hash.md5" semantic conventions. It represents the
-	// certificate fingerprint using the MD5 digest of DER-encoded version of
-	// certificate offered by the server. For consistency with other hash
-	// values, this value should be formatted as an uppercase hash.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
-	TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
-	// TLSServerHashSha1Key is the attribute Key conforming to the
-	// "tls.server.hash.sha1" semantic conventions. It represents the
-	// certificate fingerprint using the SHA1 digest of DER-encoded version of
-	// certificate offered by the server. For consistency with other hash
-	// values, this value should be formatted as an uppercase hash.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
-	TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
-	// TLSServerHashSha256Key is the attribute Key conforming to the
-	// "tls.server.hash.sha256" semantic conventions. It represents the
-	// certificate fingerprint using the SHA256 digest of DER-encoded version
-	// of certificate offered by the server. For consistency with other hash
-	// values, this value should be formatted as an uppercase hash.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
-	TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
-	// TLSServerIssuerKey is the attribute Key conforming to the
-	// "tls.server.issuer" semantic conventions. It represents the
-	// distinguished name of
-	// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
-	// of the issuer of the x.509 certificate presented by the client.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
-	// DC=com'
-	TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
-	// TLSServerJa3sKey is the attribute Key conforming to the
-	// "tls.server.ja3s" semantic conventions. It represents a hash that
-	// identifies servers based on how they perform an SSL/TLS handshake.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'd4e5b18d6b55c71272893221c96ba240'
-	TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
-	// TLSServerNotAfterKey is the attribute Key conforming to the
-	// "tls.server.not_after" semantic conventions. It represents the date/Time
-	// indicating when server certificate is no longer considered valid.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2021-01-01T00:00:00.000Z'
-	TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
-	// TLSServerNotBeforeKey is the attribute Key conforming to the
-	// "tls.server.not_before" semantic conventions. It represents the
-	// date/Time indicating when server certificate is first considered valid.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1970-01-01T00:00:00.000Z'
-	TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
-	// TLSServerSubjectKey is the attribute Key conforming to the
-	// "tls.server.subject" semantic conventions. It represents the
-	// distinguished name of subject of the x.509 certificate presented by the
-	// server.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
-	TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
-	// ssl
-	TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
-	// tls
-	TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
-	return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
-	return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
-	return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
-	return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
-	return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
-	return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
-	return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
-	return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
-	return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
-	return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
-	return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
-	return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
-	return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
-	return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
-	return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
-	return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
-	return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
-	return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
-	return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
-	return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
-	return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
-	return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
-	return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
-	return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
-	return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
-	return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
-	return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
-	return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
-	// URLFragmentKey is the attribute Key conforming to the "url.fragment"
-	// semantic conventions. It represents the [URI
-	// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'SemConv'
-	URLFragmentKey = attribute.Key("url.fragment")
-
-	// URLFullKey is the attribute Key conforming to the "url.full" semantic
-	// conventions. It represents the absolute URL describing a network
-	// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
-	// '//localhost'
-	// Note: For network calls, URL usually has
-	// `scheme://host[:port][path][?query][#fragment]` format, where the
-	// fragment is not transmitted over HTTP, but if it is known, it SHOULD be
-	// included nevertheless.
-	// `url.full` MUST NOT contain credentials passed via URL in form of
-	// `https://username:password@www.example.com/`. In such case username and
-	// password SHOULD be redacted and attribute's value SHOULD be
-	// `https://REDACTED:REDACTED@www.example.com/`.
-	// `url.full` SHOULD capture the absolute URL when it is available (or can
-	// be reconstructed) and SHOULD NOT be validated or modified except for
-	// sanitizing purposes.
-	URLFullKey = attribute.Key("url.full")
-
-	// URLPathKey is the attribute Key conforming to the "url.path" semantic
-	// conventions. It represents the [URI
-	// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: '/search'
-	URLPathKey = attribute.Key("url.path")
-
-	// URLQueryKey is the attribute Key conforming to the "url.query" semantic
-	// conventions. It represents the [URI
-	// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'q=OpenTelemetry'
-	// Note: Sensitive content provided in query string SHOULD be scrubbed when
-	// instrumentations can identify it.
-	URLQueryKey = attribute.Key("url.query")
-
-	// URLSchemeKey is the attribute Key conforming to the "url.scheme"
-	// semantic conventions. It represents the [URI
-	// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-	// identifying the used protocol.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'https', 'ftp', 'telnet'
-	URLSchemeKey = attribute.Key("url.scheme")
-)
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
-	return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
-	return URLFullKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
-	return URLPathKey.String(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
-	return URLQueryKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
-	return URLSchemeKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
-	// UserAgentOriginalKey is the attribute Key conforming to the
-	// "user_agent.original" semantic conventions. It represents the value of
-	// the [HTTP
-	// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-	// header sent by the client.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: stable
-	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
-	// iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
-	// Version/14.1.2 Mobile/15E148 Safari/604.1'
-	UserAgentOriginalKey = attribute.Key("user_agent.original")
-)
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
-	return UserAgentOriginalKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
-	// SessionIDKey is the attribute Key conforming to the "session.id"
-	// semantic conventions. It represents a unique id to identify a session.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
-	SessionIDKey = attribute.Key("session.id")
-
-	// SessionPreviousIDKey is the attribute Key conforming to the
-	// "session.previous_id" semantic conventions. It represents the previous
-	// `session.id` for this user, when known.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
-	SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
-	return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
-	return SessionPreviousIDKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
deleted file mode 100644
index 6c019aafc..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This event represents an occurrence of a lifecycle transition on the iOS
-// platform.
-const (
-	// IosStateKey is the attribute Key conforming to the "ios.state" semantic
-	// conventions. It represents the this attribute represents the state the
-	// application has transitioned into at the occurrence of the event.
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
-	// documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
-	// and from which the `OS terminology` column values are derived.
-	IosStateKey = attribute.Key("ios.state")
-)
-
-var (
-	// The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
-	IosStateActive = IosStateKey.String("active")
-	// The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
-	IosStateInactive = IosStateKey.String("inactive")
-	// The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
-	IosStateBackground = IosStateKey.String("background")
-	// The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
-	IosStateForeground = IosStateKey.String("foreground")
-	// The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
-	IosStateTerminate = IosStateKey.String("terminate")
-)
-
-// This event represents an occurrence of a lifecycle transition on the Android
-// platform.
-const (
-	// AndroidStateKey is the attribute Key conforming to the "android.state"
-	// semantic conventions. It represents the this attribute represents the
-	// state the application has transitioned into at the occurrence of the
-	// event.
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Note: The Android lifecycle states are defined in [Activity lifecycle
-	// callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
-	// and from which the `OS identifiers` are derived.
-	AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
-	// Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
-	AndroidStateCreated = AndroidStateKey.String("created")
-	// Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
-	AndroidStateBackground = AndroidStateKey.String("background")
-	// Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
-	AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
-	// FeatureFlagKeyKey is the attribute Key conforming to the
-	// "feature_flag.key" semantic conventions. It represents the unique
-	// identifier of the feature flag.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'logo-color'
-	FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
-	// FeatureFlagProviderNameKey is the attribute Key conforming to the
-	// "feature_flag.provider_name" semantic conventions. It represents the
-	// name of the service provider that performs the flag evaluation.
-	//
-	// Type: string
-	// RequirementLevel: Recommended
-	// Stability: experimental
-	// Examples: 'Flag Manager'
-	FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
-	// FeatureFlagVariantKey is the attribute Key conforming to the
-	// "feature_flag.variant" semantic conventions. It represents the sHOULD be
-	// a semantic identifier for a value. If one is unavailable, a stringified
-	// version of the value can be used.
-	//
-	// Type: string
-	// RequirementLevel: Recommended
-	// Stability: experimental
-	// Examples: 'red', 'true', 'on'
-	// Note: A semantic identifier, commonly referred to as a variant, provides
-	// a means
-	// for referring to a value without including the value itself. This can
-	// provide additional context for understanding the meaning behind a value.
-	// For example, the variant `red` maybe be used for the value `#c05543`.
-	//
-	// A stringified version of the value can be used in situations where a
-	// semantic identifier is unavailable. String representation of the value
-	// should be determined by the implementer.
-	FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
-	return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
-	return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
-	return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
-	// MessageCompressedSizeKey is the attribute Key conforming to the
-	// "message.compressed_size" semantic conventions. It represents the
-	// compressed size of the message in bytes.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
-	// MessageIDKey is the attribute Key conforming to the "message.id"
-	// semantic conventions. It represents the mUST be calculated as two
-	// different counters starting from `1` one for sent messages and one for
-	// received message.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Note: This way we guarantee that the values will be consistent between
-	// different implementations.
-	MessageIDKey = attribute.Key("message.id")
-
-	// MessageTypeKey is the attribute Key conforming to the "message.type"
-	// semantic conventions. It represents the whether this is a received or
-	// sent message.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessageTypeKey = attribute.Key("message.type")
-
-	// MessageUncompressedSizeKey is the attribute Key conforming to the
-	// "message.uncompressed_size" semantic conventions. It represents the
-	// uncompressed size of the message in bytes.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
-	// sent
-	MessageTypeSent = MessageTypeKey.String("SENT")
-	// received
-	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
-	return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
-	return MessageIDKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
-	return MessageUncompressedSizeKey.Int(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
deleted file mode 100644
index d66bbe9c2..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
+++ /dev/null
@@ -1,2545 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
-	// CloudAccountIDKey is the attribute Key conforming to the
-	// "cloud.account.id" semantic conventions. It represents the cloud account
-	// ID the resource is assigned to.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '111111111111', 'opentelemetry'
-	CloudAccountIDKey = attribute.Key("cloud.account.id")
-
-	// CloudAvailabilityZoneKey is the attribute Key conforming to the
-	// "cloud.availability_zone" semantic conventions. It represents the cloud
-	// regions often have multiple, isolated locations known as zones to
-	// increase availability. Availability zone represents the zone where the
-	// resource is running.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'us-east-1c'
-	// Note: Availability zones are called "zones" on Alibaba Cloud and Google
-	// Cloud.
-	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
-	// CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
-	// semantic conventions. It represents the cloud platform in use.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Note: The prefix of the service SHOULD match the one specified in
-	// `cloud.provider`.
-	CloudPlatformKey = attribute.Key("cloud.platform")
-
-	// CloudProviderKey is the attribute Key conforming to the "cloud.provider"
-	// semantic conventions. It represents the name of the cloud provider.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	CloudProviderKey = attribute.Key("cloud.provider")
-
-	// CloudRegionKey is the attribute Key conforming to the "cloud.region"
-	// semantic conventions. It represents the geographical region the resource
-	// is running.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'us-central1', 'us-east-1'
-	// Note: Refer to your provider's docs to see the available regions, for
-	// example [Alibaba Cloud
-	// regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
-	// regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
-	// [Azure
-	// regions](https://azure.microsoft.com/global-infrastructure/geographies/),
-	// [Google Cloud regions](https://cloud.google.com/about/locations), or
-	// [Tencent Cloud
-	// regions](https://www.tencentcloud.com/document/product/213/6091).
-	CloudRegionKey = attribute.Key("cloud.region")
-
-	// CloudResourceIDKey is the attribute Key conforming to the
-	// "cloud.resource_id" semantic conventions. It represents the cloud
-	// provider-specific native identifier of the monitored cloud resource
-	// (e.g. an
-	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-	// on AWS, a [fully qualified resource
-	// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
-	// on Azure, a [full resource
-	// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-	// on GCP)
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
-	// '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
-	// '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
-	// Note: On some cloud providers, it may not be possible to determine the
-	// full ID at startup,
-	// so it may be necessary to set `cloud.resource_id` as a span attribute
-	// instead.
-	//
-	// The exact value to use for `cloud.resource_id` depends on the cloud
-	// provider.
-	// The following well-known definitions MUST be used if you set this
-	// attribute and they apply:
-	//
-	// * **AWS Lambda:** The function
-	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
-	//   Take care not to use the "invoked ARN" directly but replace any
-	//   [alias
-	// suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
-	//   with the resolved function version, as the same runtime instance may
-	// be invokable with
-	//   multiple different aliases.
-	// * **GCP:** The [URI of the
-	// resource](https://cloud.google.com/iam/docs/full-resource-names)
-	// * **Azure:** The [Fully Qualified Resource
-	// ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
-	// of the invoked function,
-	//   *not* the function app, having the form
-	// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
-	//   This means that a span attribute MUST be used, as an Azure function
-	// app can host multiple functions that would usually share
-	//   a TracerProvider.
-	CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
-	// Alibaba Cloud Elastic Compute Service
-	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
-	// Alibaba Cloud Function Compute
-	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
-	// Red Hat OpenShift on Alibaba Cloud
-	CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
-	// AWS Elastic Compute Cloud
-	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
-	// AWS Elastic Container Service
-	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
-	// AWS Elastic Kubernetes Service
-	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
-	// AWS Lambda
-	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
-	// AWS Elastic Beanstalk
-	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
-	// AWS App Runner
-	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
-	// Red Hat OpenShift on AWS (ROSA)
-	CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
-	// Azure Virtual Machines
-	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
-	// Azure Container Instances
-	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
-	// Azure Kubernetes Service
-	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
-	// Azure Functions
-	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
-	// Azure App Service
-	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
-	// Azure Red Hat OpenShift
-	CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
-	// Google Bare Metal Solution (BMS)
-	CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
-	// Google Cloud Compute Engine (GCE)
-	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
-	// Google Cloud Run
-	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
-	// Google Cloud Kubernetes Engine (GKE)
-	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
-	// Google Cloud Functions (GCF)
-	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
-	// Google Cloud App Engine (GAE)
-	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
-	// Red Hat OpenShift on Google Cloud
-	CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
-	// Red Hat OpenShift on IBM Cloud
-	CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
-	// Tencent Cloud Cloud Virtual Machine (CVM)
-	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
-	// Tencent Cloud Elastic Kubernetes Service (EKS)
-	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
-	// Tencent Cloud Serverless Cloud Function (SCF)
-	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
-	// Alibaba Cloud
-	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
-	// Amazon Web Services
-	CloudProviderAWS = CloudProviderKey.String("aws")
-	// Microsoft Azure
-	CloudProviderAzure = CloudProviderKey.String("azure")
-	// Google Cloud Platform
-	CloudProviderGCP = CloudProviderKey.String("gcp")
-	// Heroku Platform as a Service
-	CloudProviderHeroku = CloudProviderKey.String("heroku")
-	// IBM Cloud
-	CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
-	// Tencent Cloud
-	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
-	return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
-	return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
-	return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
-	return CloudResourceIDKey.String(val)
-}
-
-// A container instance.
-const (
-	// ContainerCommandKey is the attribute Key conforming to the
-	// "container.command" semantic conventions. It represents the command used
-	// to run the container (i.e. the command name).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'otelcontribcol'
-	// Note: If using embedded credentials or sensitive data, it is recommended
-	// to remove them to prevent potential leakage.
-	ContainerCommandKey = attribute.Key("container.command")
-
-	// ContainerCommandArgsKey is the attribute Key conforming to the
-	// "container.command_args" semantic conventions. It represents the all the
-	// command arguments (including the command/executable itself) run by the
-	// container. [2]
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'otelcontribcol, --config, config.yaml'
-	ContainerCommandArgsKey = attribute.Key("container.command_args")
-
-	// ContainerCommandLineKey is the attribute Key conforming to the
-	// "container.command_line" semantic conventions. It represents the full
-	// command run by the container as a single string representing the full
-	// command. [2]
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'otelcontribcol --config config.yaml'
-	ContainerCommandLineKey = attribute.Key("container.command_line")
-
-	// ContainerIDKey is the attribute Key conforming to the "container.id"
-	// semantic conventions. It represents the container ID. Usually a UUID, as
-	// for example used to [identify Docker
-	// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-	// The UUID might be abbreviated.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'a3bf90e006b2'
-	ContainerIDKey = attribute.Key("container.id")
-
-	// ContainerImageIDKey is the attribute Key conforming to the
-	// "container.image.id" semantic conventions. It represents the runtime
-	// specific image identifier. Usually a hash algorithm followed by a UUID.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
-	// Note: Docker defines a sha256 of the image id; `container.image.id`
-	// corresponds to the `Image` field from the Docker container inspect
-	// [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
-	// endpoint.
-	// K8S defines a link to the container registry repository with digest
-	// `"imageID": "registry.azurecr.io
-	// /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
-	// The ID is assinged by the container runtime and can vary in different
-	// environments. Consider using `oci.manifest.digest` if it is important to
-	// identify the same image in different environments/runtimes.
-	ContainerImageIDKey = attribute.Key("container.image.id")
-
-	// ContainerImageNameKey is the attribute Key conforming to the
-	// "container.image.name" semantic conventions. It represents the name of
-	// the image the container was built on.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'gcr.io/opentelemetry/operator'
-	ContainerImageNameKey = attribute.Key("container.image.name")
-
-	// ContainerImageRepoDigestsKey is the attribute Key conforming to the
-	// "container.image.repo_digests" semantic conventions. It represents the
-	// repo digests of the container image as provided by the container
-	// runtime.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
-	// 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
-	// Note:
-	// [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
-	// and
-	// [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
-	// report those under the `RepoDigests` field.
-	ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
-	// ContainerImageTagsKey is the attribute Key conforming to the
-	// "container.image.tags" semantic conventions. It represents the container
-	// image tags. An example can be found in [Docker Image
-	// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-	// Should be only the `<tag>` section of the full name for example from
-	// `registry.example.com/my-org/my-image:<tag>`.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'v1.27.1', '3.5.7-0'
-	ContainerImageTagsKey = attribute.Key("container.image.tags")
-
-	// ContainerNameKey is the attribute Key conforming to the "container.name"
-	// semantic conventions. It represents the container name used by container
-	// runtime.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry-autoconf'
-	ContainerNameKey = attribute.Key("container.name")
-
-	// ContainerRuntimeKey is the attribute Key conforming to the
-	// "container.runtime" semantic conventions. It represents the container
-	// runtime managing this container.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'docker', 'containerd', 'rkt'
-	ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
-	return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
-	return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
-	return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
-	return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
-	return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
-	return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
-	return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `<tag>` section of the full name for example from
-// `registry.example.com/my-org/my-image:<tag>`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
-	return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
-	return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
-	return ContainerRuntimeKey.String(val)
-}
-
-// Describes device attributes.
-const (
-	// DeviceIDKey is the attribute Key conforming to the "device.id" semantic
-	// conventions. It represents a unique identifier representing the device
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
-	// Note: The device identifier MUST only be defined using the values
-	// outlined below. This value is not an advertising identifier and MUST NOT
-	// be used as such. On iOS (Swift or Objective-C), this value MUST be equal
-	// to the [vendor
-	// identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
-	// On Android (Java or Kotlin), this value MUST be equal to the Firebase
-	// Installation ID or a globally unique UUID which is persisted across
-	// sessions in your application. More information can be found
-	// [here](https://developer.android.com/training/articles/user-data-ids) on
-	// best practices and exact implementation details. Caution should be taken
-	// when storing personal data or anything which can identify a user. GDPR
-	// and data protection laws may apply, ensure you do your own due
-	// diligence.
-	DeviceIDKey = attribute.Key("device.id")
-
-	// DeviceManufacturerKey is the attribute Key conforming to the
-	// "device.manufacturer" semantic conventions. It represents the name of
-	// the device manufacturer
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Apple', 'Samsung'
-	// Note: The Android OS provides this field via
-	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
-	// iOS apps SHOULD hardcode the value `Apple`.
-	DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
-	// DeviceModelIdentifierKey is the attribute Key conforming to the
-	// "device.model.identifier" semantic conventions. It represents the model
-	// identifier for the device
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'iPhone3,4', 'SM-G920F'
-	// Note: It's recommended this value represents a machine-readable version
-	// of the model identifier rather than the market or consumer-friendly name
-	// of the device.
-	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
-	// DeviceModelNameKey is the attribute Key conforming to the
-	// "device.model.name" semantic conventions. It represents the marketing
-	// name for the device model
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
-	// Note: It's recommended this value represents a human-readable version of
-	// the device model rather than a machine-readable alternative.
-	DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
-	return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
-	return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
-	return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
-	return DeviceModelNameKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
-	// HostArchKey is the attribute Key conforming to the "host.arch" semantic
-	// conventions. It represents the CPU architecture the host system is
-	// running on.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	HostArchKey = attribute.Key("host.arch")
-
-	// HostCPUCacheL2SizeKey is the attribute Key conforming to the
-	// "host.cpu.cache.l2.size" semantic conventions. It represents the amount
-	// of level 2 memory cache available to the processor (in Bytes).
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 12288000
-	HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
-	// HostCPUFamilyKey is the attribute Key conforming to the
-	// "host.cpu.family" semantic conventions. It represents the family or
-	// generation of the CPU.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '6', 'PA-RISC 1.1e'
-	HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
-	// HostCPUModelIDKey is the attribute Key conforming to the
-	// "host.cpu.model.id" semantic conventions. It represents the model
-	// identifier. It provides more granular information about the CPU,
-	// distinguishing it from other CPUs within the same family.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '6', '9000/778/B180L'
-	HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
-	// HostCPUModelNameKey is the attribute Key conforming to the
-	// "host.cpu.model.name" semantic conventions. It represents the model
-	// designation of the processor.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
-	HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
-	// HostCPUSteppingKey is the attribute Key conforming to the
-	// "host.cpu.stepping" semantic conventions. It represents the stepping or
-	// core revisions.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1
-	HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
-	// HostCPUVendorIDKey is the attribute Key conforming to the
-	// "host.cpu.vendor.id" semantic conventions. It represents the processor
-	// manufacturer identifier. A maximum 12-character string.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'GenuineIntel'
-	// Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
-	// ID string in EBX, EDX and ECX registers. Writing these to memory in this
-	// order results in a 12-character string.
-	HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
-	// HostIDKey is the attribute Key conforming to the "host.id" semantic
-	// conventions. It represents the unique host ID. For Cloud, this must be
-	// the instance_id assigned by the cloud provider. For non-containerized
-	// systems, this should be the `machine-id`. See the table below for the
-	// sources to use to determine the `machine-id` based on operating system.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'fdbf79e8af94cb7f9e8df36789187052'
-	HostIDKey = attribute.Key("host.id")
-
-	// HostImageIDKey is the attribute Key conforming to the "host.image.id"
-	// semantic conventions. It represents the vM image ID or host OS image ID.
-	// For Cloud, this value is from the provider.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'ami-07b06b442921831e5'
-	HostImageIDKey = attribute.Key("host.image.id")
-
-	// HostImageNameKey is the attribute Key conforming to the
-	// "host.image.name" semantic conventions. It represents the name of the VM
-	// image or OS install the host was instantiated from.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
-	HostImageNameKey = attribute.Key("host.image.name")
-
-	// HostImageVersionKey is the attribute Key conforming to the
-	// "host.image.version" semantic conventions. It represents the version
-	// string of the VM image or host OS as defined in [Version
-	// Attributes](/docs/resource/README.md#version-attributes).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '0.1'
-	HostImageVersionKey = attribute.Key("host.image.version")
-
-	// HostIPKey is the attribute Key conforming to the "host.ip" semantic
-	// conventions. It represents the available IP addresses of the host,
-	// excluding loopback interfaces.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
-	// Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
-	// addresses MUST be specified in the [RFC
-	// 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
-	HostIPKey = attribute.Key("host.ip")
-
-	// HostMacKey is the attribute Key conforming to the "host.mac" semantic
-	// conventions. It represents the available MAC addresses of the host,
-	// excluding loopback interfaces.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
-	// Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
-	// form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
-	// as hyphen-separated octets in uppercase hexadecimal form from most to
-	// least significant.
-	HostMacKey = attribute.Key("host.mac")
-
-	// HostNameKey is the attribute Key conforming to the "host.name" semantic
-	// conventions. It represents the name of the host. On Unix systems, it may
-	// contain what the hostname command returns, or the fully qualified
-	// hostname, or another name specified by the user.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry-test'
-	HostNameKey = attribute.Key("host.name")
-
-	// HostTypeKey is the attribute Key conforming to the "host.type" semantic
-	// conventions. It represents the type of host. For Cloud, this must be the
-	// machine type.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'n1-standard-1'
-	HostTypeKey = attribute.Key("host.type")
-)
-
-var (
-	// AMD64
-	HostArchAMD64 = HostArchKey.String("amd64")
-	// ARM32
-	HostArchARM32 = HostArchKey.String("arm32")
-	// ARM64
-	HostArchARM64 = HostArchKey.String("arm64")
-	// Itanium
-	HostArchIA64 = HostArchKey.String("ia64")
-	// 32-bit PowerPC
-	HostArchPPC32 = HostArchKey.String("ppc32")
-	// 64-bit PowerPC
-	HostArchPPC64 = HostArchKey.String("ppc64")
-	// IBM z/Architecture
-	HostArchS390x = HostArchKey.String("s390x")
-	// 32-bit x86
-	HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
-	return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
-	return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
-	return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
-	return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val int) attribute.KeyValue {
-	return HostCPUSteppingKey.Int(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
-	return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
-	return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
-	return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
-	return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
-	return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
-	return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
-	return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
-	return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
-	return HostTypeKey.String(val)
-}
-
-// Kubernetes resource attributes.
-const (
-	// K8SClusterNameKey is the attribute Key conforming to the
-	// "k8s.cluster.name" semantic conventions. It represents the name of the
-	// cluster.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry-cluster'
-	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
-	// K8SClusterUIDKey is the attribute Key conforming to the
-	// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
-	// the cluster, set to the UID of the `kube-system` namespace.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
-	// Note: K8S doesn't have support for obtaining a cluster ID. If this is
-	// ever
-	// added, we will recommend collecting the `k8s.cluster.uid` through the
-	// official APIs. In the meantime, we are able to use the `uid` of the
-	// `kube-system` namespace as a proxy for cluster ID. Read on for the
-	// rationale.
-	//
-	// Every object created in a K8S cluster is assigned a distinct UID. The
-	// `kube-system` namespace is used by Kubernetes itself and will exist
-	// for the lifetime of the cluster. Using the `uid` of the `kube-system`
-	// namespace is a reasonable proxy for the K8S ClusterID as it will only
-	// change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
-	// UUIDs as standardized by
-	// [ISO/IEC 9834-8 and ITU-T
-	// X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
-	// Which states:
-	//
-	// > If generated according to one of the mechanisms defined in Rec.
-	//   ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
-	//   different from all other UUIDs generated before 3603 A.D., or is
-	//   extremely likely to be different (depending on the mechanism chosen).
-	//
-	// Therefore, UIDs between clusters should be extremely unlikely to
-	// conflict.
-	K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
-	// K8SContainerNameKey is the attribute Key conforming to the
-	// "k8s.container.name" semantic conventions. It represents the name of the
-	// Container from Pod specification, must be unique within a Pod. Container
-	// runtime usually uses different globally unique name (`container.name`).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'redis'
-	K8SContainerNameKey = attribute.Key("k8s.container.name")
-
-	// K8SContainerRestartCountKey is the attribute Key conforming to the
-	// "k8s.container.restart_count" semantic conventions. It represents the
-	// number of times the container was restarted. This attribute can be used
-	// to identify a particular container (running or stopped) within a
-	// container spec.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 0, 2
-	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
-	// K8SCronJobNameKey is the attribute Key conforming to the
-	// "k8s.cronjob.name" semantic conventions. It represents the name of the
-	// CronJob.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
-	// K8SCronJobUIDKey is the attribute Key conforming to the
-	// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-	// CronJob.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
-	// K8SDaemonSetNameKey is the attribute Key conforming to the
-	// "k8s.daemonset.name" semantic conventions. It represents the name of the
-	// DaemonSet.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
-	// K8SDaemonSetUIDKey is the attribute Key conforming to the
-	// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-	// DaemonSet.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
-	// K8SDeploymentNameKey is the attribute Key conforming to the
-	// "k8s.deployment.name" semantic conventions. It represents the name of
-	// the Deployment.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
-	// K8SDeploymentUIDKey is the attribute Key conforming to the
-	// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-	// Deployment.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
-	// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
-	// semantic conventions. It represents the name of the Job.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	K8SJobNameKey = attribute.Key("k8s.job.name")
-
-	// K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
-	// semantic conventions. It represents the UID of the Job.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
-	// K8SNamespaceNameKey is the attribute Key conforming to the
-	// "k8s.namespace.name" semantic conventions. It represents the name of the
-	// namespace that the pod is running in.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'default'
-	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
-	// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
-	// semantic conventions. It represents the name of the Node.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'node-1'
-	K8SNodeNameKey = attribute.Key("k8s.node.name")
-
-	// K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
-	// semantic conventions. It represents the UID of the Node.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
-	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
-	// K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
-	// semantic conventions. It represents the name of the Pod.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry-pod-autoconf'
-	K8SPodNameKey = attribute.Key("k8s.pod.name")
-
-	// K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
-	// semantic conventions. It represents the UID of the Pod.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
-	// K8SReplicaSetNameKey is the attribute Key conforming to the
-	// "k8s.replicaset.name" semantic conventions. It represents the name of
-	// the ReplicaSet.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
-	// K8SReplicaSetUIDKey is the attribute Key conforming to the
-	// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-	// ReplicaSet.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
-	// K8SStatefulSetNameKey is the attribute Key conforming to the
-	// "k8s.statefulset.name" semantic conventions. It represents the name of
-	// the StatefulSet.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
-	// K8SStatefulSetUIDKey is the attribute Key conforming to the
-	// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-	// StatefulSet.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
-	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
-	return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
-	return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
-	return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
-	return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
-	return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
-	return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
-	return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
-	return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
-	return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
-	return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
-	return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
-	return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
-	return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
-	return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
-	return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
-	return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
-	return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
-	return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
-	return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
-	return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
-	return K8SStatefulSetUIDKey.String(val)
-}
-
-// An OCI image manifest.
-const (
-	// OciManifestDigestKey is the attribute Key conforming to the
-	// "oci.manifest.digest" semantic conventions. It represents the digest of
-	// the OCI image manifest. For container images specifically is the digest
-	// by which the container image is known.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
-	// Note: Follows [OCI Image Manifest
-	// Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
-	// and specifically the [Digest
-	// property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
-	// An example can be found in [Example Image
-	// Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
-	OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
-	return OciManifestDigestKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
-	// OSBuildIDKey is the attribute Key conforming to the "os.build_id"
-	// semantic conventions. It represents the unique identifier for a
-	// particular build or compilation of the operating system.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
-	OSBuildIDKey = attribute.Key("os.build_id")
-
-	// OSDescriptionKey is the attribute Key conforming to the "os.description"
-	// semantic conventions. It represents the human readable (not intended to
-	// be parsed) OS version information, like e.g. reported by `ver` or
-	// `lsb_release -a` commands.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
-	// LTS'
-	OSDescriptionKey = attribute.Key("os.description")
-
-	// OSNameKey is the attribute Key conforming to the "os.name" semantic
-	// conventions. It represents the human readable operating system name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'iOS', 'Android', 'Ubuntu'
-	OSNameKey = attribute.Key("os.name")
-
-	// OSTypeKey is the attribute Key conforming to the "os.type" semantic
-	// conventions. It represents the operating system type.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	OSTypeKey = attribute.Key("os.type")
-
-	// OSVersionKey is the attribute Key conforming to the "os.version"
-	// semantic conventions. It represents the version string of the operating
-	// system as defined in [Version
-	// Attributes](/docs/resource/README.md#version-attributes).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '14.2.1', '18.04.1'
-	OSVersionKey = attribute.Key("os.version")
-)
-
-var (
-	// Microsoft Windows
-	OSTypeWindows = OSTypeKey.String("windows")
-	// Linux
-	OSTypeLinux = OSTypeKey.String("linux")
-	// Apple Darwin
-	OSTypeDarwin = OSTypeKey.String("darwin")
-	// FreeBSD
-	OSTypeFreeBSD = OSTypeKey.String("freebsd")
-	// NetBSD
-	OSTypeNetBSD = OSTypeKey.String("netbsd")
-	// OpenBSD
-	OSTypeOpenBSD = OSTypeKey.String("openbsd")
-	// DragonFly BSD
-	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
-	// HP-UX (Hewlett Packard Unix)
-	OSTypeHPUX = OSTypeKey.String("hpux")
-	// AIX (Advanced Interactive eXecutive)
-	OSTypeAIX = OSTypeKey.String("aix")
-	// SunOS, Oracle Solaris
-	OSTypeSolaris = OSTypeKey.String("solaris")
-	// IBM z/OS
-	OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
-	return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
-	return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
-	return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
-	return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
-	// ProcessCommandKey is the attribute Key conforming to the
-	// "process.command" semantic conventions. It represents the command used
-	// to launch the process (i.e. the command name). On Linux based systems,
-	// can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
-	// be set to the first parameter extracted from `GetCommandLineW`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'cmd/otelcol'
-	ProcessCommandKey = attribute.Key("process.command")
-
-	// ProcessCommandArgsKey is the attribute Key conforming to the
-	// "process.command_args" semantic conventions. It represents the all the
-	// command arguments (including the command/executable itself) as received
-	// by the process. On Linux-based systems (and some other Unixoid systems
-	// supporting procfs), can be set according to the list of null-delimited
-	// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-	// this would be the full argv vector passed to `main`.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'cmd/otecol', '--config=config.yaml'
-	ProcessCommandArgsKey = attribute.Key("process.command_args")
-
-	// ProcessCommandLineKey is the attribute Key conforming to the
-	// "process.command_line" semantic conventions. It represents the full
-	// command used to launch the process as a single string representing the
-	// full command. On Windows, can be set to the result of `GetCommandLineW`.
-	// Do not set this if you have to assemble it just for monitoring; use
-	// `process.command_args` instead.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
-	ProcessCommandLineKey = attribute.Key("process.command_line")
-
-	// ProcessExecutableNameKey is the attribute Key conforming to the
-	// "process.executable.name" semantic conventions. It represents the name
-	// of the process executable. On Linux based systems, can be set to the
-	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
-	// of `GetProcessImageFileNameW`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'otelcol'
-	ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
-	// ProcessExecutablePathKey is the attribute Key conforming to the
-	// "process.executable.path" semantic conventions. It represents the full
-	// path to the process executable. On Linux based systems, can be set to
-	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
-	// `GetProcessImageFileNameW`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/usr/bin/cmd/otelcol'
-	ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
-	// ProcessOwnerKey is the attribute Key conforming to the "process.owner"
-	// semantic conventions. It represents the username of the user that owns
-	// the process.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'root'
-	ProcessOwnerKey = attribute.Key("process.owner")
-
-	// ProcessParentPIDKey is the attribute Key conforming to the
-	// "process.parent_pid" semantic conventions. It represents the parent
-	// Process identifier (PPID).
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 111
-	ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
-	// ProcessPIDKey is the attribute Key conforming to the "process.pid"
-	// semantic conventions. It represents the process identifier (PID).
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1234
-	ProcessPIDKey = attribute.Key("process.pid")
-
-	// ProcessRuntimeDescriptionKey is the attribute Key conforming to the
-	// "process.runtime.description" semantic conventions. It represents an
-	// additional description about the runtime of the process, for example a
-	// specific vendor customization of the runtime environment.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
-	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
-	// ProcessRuntimeNameKey is the attribute Key conforming to the
-	// "process.runtime.name" semantic conventions. It represents the name of
-	// the runtime of this process. For compiled native binaries, this SHOULD
-	// be the name of the compiler.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'OpenJDK Runtime Environment'
-	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
-	// ProcessRuntimeVersionKey is the attribute Key conforming to the
-	// "process.runtime.version" semantic conventions. It represents the
-	// version of the runtime of this process, as returned by the runtime
-	// without modification.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '14.0.2'
-	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
-	return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
-	return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
-	return ProcessCommandLineKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
-	return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
-	return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
-	return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
-	return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
-	return ProcessPIDKey.Int(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
-	return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
-	return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
-	return ProcessRuntimeVersionKey.String(val)
-}
-
-// The Android platform on which the Android application is running.
-const (
-	// AndroidOSAPILevelKey is the attribute Key conforming to the
-	// "android.os.api_level" semantic conventions. It represents the uniquely
-	// identifies the framework API revision offered by a version
-	// (`os.version`) of the android operating system. More information can be
-	// found
-	// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '33', '32'
-	AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
-	return AndroidOSAPILevelKey.String(val)
-}
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
-	// BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
-	// semantic conventions. It represents the array of brand name and version
-	// separated by a space
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
-	// Note: This value is intended to be taken from the [UA client hints
-	// API](https://wicg.github.io/ua-client-hints/#interface)
-	// (`navigator.userAgentData.brands`).
-	BrowserBrandsKey = attribute.Key("browser.brands")
-
-	// BrowserLanguageKey is the attribute Key conforming to the
-	// "browser.language" semantic conventions. It represents the preferred
-	// language of the user using the browser
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'en', 'en-US', 'fr', 'fr-FR'
-	// Note: This value is intended to be taken from the Navigator API
-	// `navigator.language`.
-	BrowserLanguageKey = attribute.Key("browser.language")
-
-	// BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
-	// semantic conventions. It represents a boolean that is true if the
-	// browser is running on a mobile device
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Note: This value is intended to be taken from the [UA client hints
-	// API](https://wicg.github.io/ua-client-hints/#interface)
-	// (`navigator.userAgentData.mobile`). If unavailable, this attribute
-	// SHOULD be left unset.
-	BrowserMobileKey = attribute.Key("browser.mobile")
-
-	// BrowserPlatformKey is the attribute Key conforming to the
-	// "browser.platform" semantic conventions. It represents the platform on
-	// which the browser is running
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Windows', 'macOS', 'Android'
-	// Note: This value is intended to be taken from the [UA client hints
-	// API](https://wicg.github.io/ua-client-hints/#interface)
-	// (`navigator.userAgentData.platform`). If unavailable, the legacy
-	// `navigator.platform` API SHOULD NOT be used instead and this attribute
-	// SHOULD be left unset in order for the values to be consistent.
-	// The list of possible values is defined in the [W3C User-Agent Client
-	// Hints
-	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
-	// Note that some (but not all) of these values can overlap with values in
-	// the [`os.type` and `os.name` attributes](./os.md). However, for
-	// consistency, the values in the `browser.platform` attribute should
-	// capture the exact value that the user agent provides.
-	BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
-	return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
-	return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
-	return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
-	return BrowserPlatformKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
-	// AWSECSClusterARNKey is the attribute Key conforming to the
-	// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
-	// [ECS
-	// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
-	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
-	// AWSECSContainerARNKey is the attribute Key conforming to the
-	// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-	// Resource Name (ARN) of an [ECS container
-	// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
-	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
-	// AWSECSLaunchtypeKey is the attribute Key conforming to the
-	// "aws.ecs.launchtype" semantic conventions. It represents the [launch
-	// type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
-	// for an ECS task.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
-	// AWSECSTaskARNKey is the attribute Key conforming to the
-	// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
-	// [ECS task
-	// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
-	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
-	// AWSECSTaskFamilyKey is the attribute Key conforming to the
-	// "aws.ecs.task.family" semantic conventions. It represents the task
-	// definition family this task definition is a member of.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'opentelemetry-family'
-	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
-	// AWSECSTaskRevisionKey is the attribute Key conforming to the
-	// "aws.ecs.task.revision" semantic conventions. It represents the revision
-	// for this task definition.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '8', '26'
-	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
-	// ec2
-	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
-	// fargate
-	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
-	return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
-	return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
-	return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
-	return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
-	return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
-	// AWSEKSClusterARNKey is the attribute Key conforming to the
-	// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
-	// EKS cluster.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
-	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
-	return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
-	// AWSLogGroupARNsKey is the attribute Key conforming to the
-	// "aws.log.group.arns" semantic conventions. It represents the Amazon
-	// Resource Name(s) (ARN) of the AWS log group(s).
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
-	// Note: See the [log group ARN format
-	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
-	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
-	// AWSLogGroupNamesKey is the attribute Key conforming to the
-	// "aws.log.group.names" semantic conventions. It represents the name(s) of
-	// the AWS log group(s) an application is writing to.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
-	// Note: Multiple log groups must be supported for cases like
-	// multi-container applications, where a single application has sidecar
-	// containers, and each write to their own log group.
-	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
-	// AWSLogStreamARNsKey is the attribute Key conforming to the
-	// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
-	// the AWS log stream(s).
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
-	// Note: See the [log stream ARN format
-	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
-	// One log group can contain several log streams, so these ARNs necessarily
-	// identify both a log group and a log stream.
-	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
-	// AWSLogStreamNamesKey is the attribute Key conforming to the
-	// "aws.log.stream.names" semantic conventions. It represents the name(s)
-	// of the AWS log stream(s) an application is writing to.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
-	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
-	return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
-	return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
-	return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
-	return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Resource used by Google Cloud Run.
-const (
-	// GCPCloudRunJobExecutionKey is the attribute Key conforming to the
-	// "gcp.cloud_run.job.execution" semantic conventions. It represents the
-	// name of the Cloud Run
-	// [execution](https://cloud.google.com/run/docs/managing/job-executions)
-	// being run for the Job, as set by the
-	// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-	// environment variable.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'job-name-xxxx', 'sample-job-mdw84'
-	GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
-	// GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
-	// "gcp.cloud_run.job.task_index" semantic conventions. It represents the
-	// index for a task within an execution as provided by the
-	// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-	// environment variable.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 0, 1
-	GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
-	return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
-	return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Resources used by Google Compute Engine (GCE).
-const (
-	// GCPGceInstanceHostnameKey is the attribute Key conforming to the
-	// "gcp.gce.instance.hostname" semantic conventions. It represents the
-	// hostname of a GCE instance. This is the full value of the default or
-	// [custom
-	// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'my-host1234.example.com',
-	// 'sample-vm.us-west1-b.c.my-project.internal'
-	GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
-	// GCPGceInstanceNameKey is the attribute Key conforming to the
-	// "gcp.gce.instance.name" semantic conventions. It represents the instance
-	// name of a GCE instance. This is the value provided by `host.name`, the
-	// visible name of the instance in the Cloud Console UI, and the prefix for
-	// the default hostname of the instance as defined by the [default internal
-	// DNS
-	// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'instance-1', 'my-vm-name'
-	GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
-	return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
-	return GCPGceInstanceNameKey.String(val)
-}
-
-// Heroku dyno metadata
-const (
-	// HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
-	// semantic conventions. It represents the unique identifier for the
-	// application
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
-	HerokuAppIDKey = attribute.Key("heroku.app.id")
-
-	// HerokuReleaseCommitKey is the attribute Key conforming to the
-	// "heroku.release.commit" semantic conventions. It represents the commit
-	// hash for the current release
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
-	HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
-	// HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
-	// "heroku.release.creation_timestamp" semantic conventions. It represents
-	// the time and date the release was created
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2022-10-23T18:00:42Z'
-	HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
-	return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
-	return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
-	return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// The software deployment.
-const (
-	// DeploymentEnvironmentKey is the attribute Key conforming to the
-	// "deployment.environment" semantic conventions. It represents the name of
-	// the [deployment
-	// environment](https://wikipedia.org/wiki/Deployment_environment) (aka
-	// deployment tier).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'staging', 'production'
-	// Note: `deployment.environment` does not affect the uniqueness
-	// constraints defined through
-	// the `service.namespace`, `service.name` and `service.instance.id`
-	// resource attributes.
-	// This implies that resources carrying the following attribute
-	// combinations MUST be
-	// considered to be identifying the same service:
-	//
-	// * `service.name=frontend`, `deployment.environment=production`
-	// * `service.name=frontend`, `deployment.environment=staging`.
-	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
-	return DeploymentEnvironmentKey.String(val)
-}
-
-// A serverless instance.
-const (
-	// FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
-	// semantic conventions. It represents the execution environment ID as a
-	// string, that will be potentially reused for other invocations to the
-	// same function/function version.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
-	// Note: * **AWS Lambda:** Use the (full) log stream name.
-	FaaSInstanceKey = attribute.Key("faas.instance")
-
-	// FaaSMaxMemoryKey is the attribute Key conforming to the
-	// "faas.max_memory" semantic conventions. It represents the amount of
-	// memory available to the serverless function converted to Bytes.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 134217728
-	// Note: It's recommended to set this attribute since e.g. too little
-	// memory can easily stop a Java AWS Lambda function from working
-	// correctly. On AWS Lambda, the environment variable
-	// `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
-	// be multiplied by 1,048,576).
-	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
-	// FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
-	// conventions. It represents the name of the single function that this
-	// runtime instance executes.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
-	// Note: This is the name of the function as configured/deployed on the
-	// FaaS
-	// platform and is usually different from the name of the callback
-	// function (which may be stored in the
-	// [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
-	// span attributes).
-	//
-	// For some cloud providers, the above definition is ambiguous. The
-	// following
-	// definition of function name MUST be used for this attribute
-	// (and consequently the span name) for the listed cloud
-	// providers/products:
-	//
-	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
-	//   followed by a forward slash followed by the function name (this form
-	//   can also be seen in the resource JSON for the function).
-	//   This means that a span attribute MUST be used, as an Azure function
-	//   app can host multiple functions that would usually share
-	//   a TracerProvider (see also the `cloud.resource_id` attribute).
-	FaaSNameKey = attribute.Key("faas.name")
-
-	// FaaSVersionKey is the attribute Key conforming to the "faas.version"
-	// semantic conventions. It represents the immutable version of the
-	// function being executed.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '26', 'pinkfroid-00002'
-	// Note: Depending on the cloud provider and platform, use:
-	//
-	// * **AWS Lambda:** The [function
-	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
-	//   (an integer represented as a decimal string).
-	// * **Google Cloud Run (Services):** The
-	// [revision](https://cloud.google.com/run/docs/managing/revisions)
-	//   (i.e., the function name plus the revision suffix).
-	// * **Google Cloud Functions:** The value of the
-	//   [`K_REVISION` environment
-	// variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
-	// * **Azure Functions:** Not applicable. Do not set this attribute.
-	FaaSVersionKey = attribute.Key("faas.version")
-)
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
-	return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
-	return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
-	return FaaSNameKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
-	return FaaSVersionKey.String(val)
-}
-
-// A service instance.
-const (
-	// ServiceNameKey is the attribute Key conforming to the "service.name"
-	// semantic conventions. It represents the logical name of the service.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'shoppingcart'
-	// Note: MUST be the same for all instances of horizontally scaled
-	// services. If the value was not specified, SDKs MUST fallback to
-	// `unknown_service:` concatenated with
-	// [`process.executable.name`](process.md#process), e.g.
-	// `unknown_service:bash`. If `process.executable.name` is not available,
-	// the value MUST be set to `unknown_service`.
-	ServiceNameKey = attribute.Key("service.name")
-
-	// ServiceVersionKey is the attribute Key conforming to the
-	// "service.version" semantic conventions. It represents the version string
-	// of the service API or implementation. The format is not defined by these
-	// conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2.0.0', 'a01dbef8a'
-	ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
-	return ServiceNameKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
-	return ServiceVersionKey.String(val)
-}
-
-// A service instance.
-const (
-	// ServiceInstanceIDKey is the attribute Key conforming to the
-	// "service.instance.id" semantic conventions. It represents the string ID
-	// of the service instance.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'my-k8s-pod-deployment-1',
-	// '627cc493-f310-47de-96bd-71410b7dec09'
-	// Note: MUST be unique for each instance of the same
-	// `service.namespace,service.name` pair (in other words
-	// `service.namespace,service.name,service.instance.id` triplet MUST be
-	// globally unique). The ID helps to distinguish instances of the same
-	// service that exist at the same time (e.g. instances of a horizontally
-	// scaled service). It is preferable for the ID to be persistent and stay
-	// the same for the lifetime of the service instance, however it is
-	// acceptable that the ID is ephemeral and changes during important
-	// lifetime events for the service (e.g. service restarts). If the service
-	// has no inherent unique ID that can be used as the value of this
-	// attribute it is recommended to generate a random Version 1 or Version 4
-	// RFC 4122 UUID (services aiming for reproducible UUIDs may also use
-	// Version 5, see RFC 4122 for more recommendations).
-	ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
-	// ServiceNamespaceKey is the attribute Key conforming to the
-	// "service.namespace" semantic conventions. It represents a namespace for
-	// `service.name`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Shop'
-	// Note: A string value having a meaning that helps to distinguish a group
-	// of services, for example the team name that owns a group of services.
-	// `service.name` is expected to be unique within the same namespace. If
-	// `service.namespace` is not specified in the Resource then `service.name`
-	// is expected to be unique for all services that have no explicit
-	// namespace defined (so the empty/unspecified namespace is simply one more
-	// valid namespace). Zero-length namespace string is assumed equal to
-	// unspecified namespace.
-	ServiceNamespaceKey = attribute.Key("service.namespace")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
-	return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
-	return ServiceNamespaceKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
-	// TelemetrySDKLanguageKey is the attribute Key conforming to the
-	// "telemetry.sdk.language" semantic conventions. It represents the
-	// language of the telemetry SDK.
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
-	// TelemetrySDKNameKey is the attribute Key conforming to the
-	// "telemetry.sdk.name" semantic conventions. It represents the name of the
-	// telemetry SDK as defined above.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'opentelemetry'
-	// Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
-	// to `opentelemetry`.
-	// If another SDK, like a fork or a vendor-provided implementation, is
-	// used, this SDK MUST set the
-	// `telemetry.sdk.name` attribute to the fully-qualified class or module
-	// name of this SDK's main entry point
-	// or another suitable identifier depending on the language.
-	// The identifier `opentelemetry` is reserved and MUST NOT be used in this
-	// case.
-	// All custom identifiers SHOULD be stable across different versions of an
-	// implementation.
-	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
-	// TelemetrySDKVersionKey is the attribute Key conforming to the
-	// "telemetry.sdk.version" semantic conventions. It represents the version
-	// string of the telemetry SDK.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: '1.2.3'
-	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-)
-
-var (
-	// cpp
-	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
-	// dotnet
-	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
-	// erlang
-	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
-	// go
-	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
-	// java
-	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
-	// nodejs
-	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
-	// php
-	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
-	// python
-	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
-	// ruby
-	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
-	// rust
-	TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
-	// swift
-	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-	// webjs
-	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
-	return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
-	return TelemetrySDKVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
-	// TelemetryDistroNameKey is the attribute Key conforming to the
-	// "telemetry.distro.name" semantic conventions. It represents the name of
-	// the auto instrumentation agent or distribution, if used.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'parts-unlimited-java'
-	// Note: Official auto instrumentation agents and distributions SHOULD set
-	// the `telemetry.distro.name` attribute to
-	// a string starting with `opentelemetry-`, e.g.
-	// `opentelemetry-java-instrumentation`.
-	TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
-	// TelemetryDistroVersionKey is the attribute Key conforming to the
-	// "telemetry.distro.version" semantic conventions. It represents the
-	// version string of the auto instrumentation agent or distribution, if
-	// used.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1.2.3'
-	TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
-	return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
-	return TelemetryDistroVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
-	// WebEngineDescriptionKey is the attribute Key conforming to the
-	// "webengine.description" semantic conventions. It represents the
-	// additional description of the web engine (e.g. detailed version and
-	// edition information).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
-	// 2.2.2.Final'
-	WebEngineDescriptionKey = attribute.Key("webengine.description")
-
-	// WebEngineNameKey is the attribute Key conforming to the "webengine.name"
-	// semantic conventions. It represents the name of the web engine.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'WildFly'
-	WebEngineNameKey = attribute.Key("webengine.name")
-
-	// WebEngineVersionKey is the attribute Key conforming to the
-	// "webengine.version" semantic conventions. It represents the version of
-	// the web engine.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '21.0.0'
-	WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
-	return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
-	return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
-	return WebEngineVersionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
-	// OTelScopeNameKey is the attribute Key conforming to the
-	// "otel.scope.name" semantic conventions. It represents the name of the
-	// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'io.opentelemetry.contrib.mongodb'
-	OTelScopeNameKey = attribute.Key("otel.scope.name")
-
-	// OTelScopeVersionKey is the attribute Key conforming to the
-	// "otel.scope.version" semantic conventions. It represents the version of
-	// the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1.0.0'
-	OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
-	return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
-	return OTelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
-	// OTelLibraryNameKey is the attribute Key conforming to the
-	// "otel.library.name" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: 'io.opentelemetry.contrib.mongodb'
-	// Deprecated: use the `otel.scope.name` attribute.
-	OTelLibraryNameKey = attribute.Key("otel.library.name")
-
-	// OTelLibraryVersionKey is the attribute Key conforming to the
-	// "otel.library.version" semantic conventions.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: deprecated
-	// Examples: '1.0.0'
-	// Deprecated: use the `otel.scope.version` attribute.
-	OTelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OTelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions.
-//
-// Deprecated: use the `otel.scope.name` attribute.
-func OTelLibraryName(val string) attribute.KeyValue {
-	return OTelLibraryNameKey.String(val)
-}
-
-// OTelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions.
-//
-// Deprecated: use the `otel.scope.version` attribute.
-func OTelLibraryVersion(val string) attribute.KeyValue {
-	return OTelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
deleted file mode 100644
index c1718234e..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Operations that access some remote service.
-const (
-	// PeerServiceKey is the attribute Key conforming to the "peer.service"
-	// semantic conventions. It represents the
-	// [`service.name`](/docs/resource/README.md#service) of the remote
-	// service. SHOULD be equal to the actual `service.name` resource attribute
-	// of the remote service if any.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'AuthTokenCache'
-	PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
-	return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
-	// EnduserIDKey is the attribute Key conforming to the "enduser.id"
-	// semantic conventions. It represents the username or client_id extracted
-	// from the access token or
-	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
-	// in the inbound request from outside the system.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'username'
-	EnduserIDKey = attribute.Key("enduser.id")
-
-	// EnduserRoleKey is the attribute Key conforming to the "enduser.role"
-	// semantic conventions. It represents the actual/assumed role the client
-	// is making the request under extracted from token or application security
-	// context.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'admin'
-	EnduserRoleKey = attribute.Key("enduser.role")
-
-	// EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
-	// semantic conventions. It represents the scopes or granted authorities
-	// the client currently possesses extracted from token or application
-	// security context. The value would come from the scope associated with an
-	// [OAuth 2.0 Access
-	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-	// value in a [SAML 2.0
-	// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'read:message, write:files'
-	EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
-	return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
-	return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
-	return EnduserScopeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
-	// CodeColumnKey is the attribute Key conforming to the "code.column"
-	// semantic conventions. It represents the column number in `code.filepath`
-	// best representing the operation. It SHOULD point within the code unit
-	// named in `code.function`.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 16
-	CodeColumnKey = attribute.Key("code.column")
-
-	// CodeFilepathKey is the attribute Key conforming to the "code.filepath"
-	// semantic conventions. It represents the source code file name that
-	// identifies the code unit as uniquely as possible (preferably an absolute
-	// file path).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
-	CodeFilepathKey = attribute.Key("code.filepath")
-
-	// CodeFunctionKey is the attribute Key conforming to the "code.function"
-	// semantic conventions. It represents the method or function name, or
-	// equivalent (usually rightmost part of the code unit's name).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'serveRequest'
-	CodeFunctionKey = attribute.Key("code.function")
-
-	// CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
-	// semantic conventions. It represents the line number in `code.filepath`
-	// best representing the operation. It SHOULD point within the code unit
-	// named in `code.function`.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 42
-	CodeLineNumberKey = attribute.Key("code.lineno")
-
-	// CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
-	// semantic conventions. It represents the "namespace" within which
-	// `code.function` is defined. Usually the qualified class or module name,
-	// such that `code.namespace` + some separator + `code.function` form a
-	// unique identifier for the code unit.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'com.example.MyHTTPService'
-	CodeNamespaceKey = attribute.Key("code.namespace")
-
-	// CodeStacktraceKey is the attribute Key conforming to the
-	// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-	// string in the natural representation for the language runtime. The
-	// representation is to be determined and documented by each language SIG.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'at
-	// com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
-	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
-	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
-	CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
-	return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
-	return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
-	return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
-	return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
-	return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
-	return CodeStacktraceKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
-	// ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
-	// conventions. It represents the current "managed" thread ID (as opposed
-	// to OS thread ID).
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 42
-	ThreadIDKey = attribute.Key("thread.id")
-
-	// ThreadNameKey is the attribute Key conforming to the "thread.name"
-	// semantic conventions. It represents the current thread name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'main'
-	ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
-	return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
-	return ThreadNameKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
-	// AWSLambdaInvokedARNKey is the attribute Key conforming to the
-	// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-	// invoked ARN as provided on the `Context` passed to the function
-	// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-	// `/runtime/invocation/next` applicable).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
-	// Note: This may be different from `cloud.resource_id` if an alias is
-	// involved.
-	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
-	return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
-	// CloudeventsEventIDKey is the attribute Key conforming to the
-	// "cloudevents.event_id" semantic conventions. It represents the
-	// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-	// uniquely identifies the event.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
-	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
-	// CloudeventsEventSourceKey is the attribute Key conforming to the
-	// "cloudevents.event_source" semantic conventions. It represents the
-	// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-	// identifies the context in which an event happened.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'https://github.com/cloudevents',
-	// '/cloudevents/spec/pull/123', 'my-service'
-	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
-	// CloudeventsEventSpecVersionKey is the attribute Key conforming to the
-	// "cloudevents.event_spec_version" semantic conventions. It represents the
-	// [version of the CloudEvents
-	// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-	// which the event uses.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '1.0'
-	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
-	// CloudeventsEventSubjectKey is the attribute Key conforming to the
-	// "cloudevents.event_subject" semantic conventions. It represents the
-	// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-	// of the event in the context of the event producer (identified by
-	// source).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'mynewfile.jpg'
-	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
-	// CloudeventsEventTypeKey is the attribute Key conforming to the
-	// "cloudevents.event_type" semantic conventions. It represents the
-	// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-	// contains a value describing the type of event related to the originating
-	// occurrence.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'com.github.pull_request.opened',
-	// 'com.example.object.deleted.v2'
-	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
-	return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
-	return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
-	return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
-	return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
-	return CloudeventsEventTypeKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
-	// OpentracingRefTypeKey is the attribute Key conforming to the
-	// "opentracing.ref_type" semantic conventions. It represents the
-	// parent-child Reference type
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Note: The causal relationship between a child Span and a parent Span.
-	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
-	// The parent Span depends on the child Span in some capacity
-	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
-	// The parent Span doesn't depend in any way on the result of the child Span
-	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
-	// OTelStatusCodeKey is the attribute Key conforming to the
-	// "otel.status_code" semantic conventions. It represents the name of the
-	// code, either "OK" or "ERROR". MUST NOT be set if the status code is
-	// UNSET.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	OTelStatusCodeKey = attribute.Key("otel.status_code")
-
-	// OTelStatusDescriptionKey is the attribute Key conforming to the
-	// "otel.status_description" semantic conventions. It represents the
-	// description of the Status if it has a value, otherwise not set.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'resource not found'
-	OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
-	// The operation has been validated by an Application developer or Operator to have completed successfully
-	OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
-	// The operation contains an error
-	OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
-	return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
-	// FaaSInvocationIDKey is the attribute Key conforming to the
-	// "faas.invocation_id" semantic conventions. It represents the invocation
-	// ID of the current function invocation.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
-	FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
-	return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
-	// FaaSDocumentCollectionKey is the attribute Key conforming to the
-	// "faas.document.collection" semantic conventions. It represents the name
-	// of the source on which the triggering operation was performed. For
-	// example, in Cloud Storage or S3 corresponds to the bucket name, and in
-	// Cosmos DB to the database name.
-	//
-	// Type: string
-	// RequirementLevel: Required
-	// Stability: experimental
-	// Examples: 'myBucketName', 'myDBName'
-	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
-	// FaaSDocumentNameKey is the attribute Key conforming to the
-	// "faas.document.name" semantic conventions. It represents the document
-	// name/table subjected to the operation. For example, in Cloud Storage or
-	// S3 is the name of the file, and in Cosmos DB the table name.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'myFile.txt', 'myTableName'
-	FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
-	// FaaSDocumentOperationKey is the attribute Key conforming to the
-	// "faas.document.operation" semantic conventions. It represents the
-	// describes the type of the operation that was performed on the data.
-	//
-	// Type: Enum
-	// RequirementLevel: Required
-	// Stability: experimental
-	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
-	// FaaSDocumentTimeKey is the attribute Key conforming to the
-	// "faas.document.time" semantic conventions. It represents a string
-	// containing the time when the data was accessed in the [ISO
-	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2020-01-23T13:47:06Z'
-	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-)
-
-var (
-	// When a new object is created
-	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
-	// When an object is modified
-	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
-	// When an object is deleted
-	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
-	return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
-	return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
-	return FaaSDocumentTimeKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
-	// FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
-	// conventions. It represents a string containing the schedule period as
-	// [Cron
-	// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '0/5 * * * ? *'
-	FaaSCronKey = attribute.Key("faas.cron")
-
-	// FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
-	// conventions. It represents a string containing the function invocation
-	// time in the [ISO
-	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '2020-01-23T13:47:06Z'
-	FaaSTimeKey = attribute.Key("faas.time")
-)
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
-	return FaaSCronKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
-	return FaaSTimeKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
-	// FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
-	// semantic conventions. It represents a boolean that is true if the
-	// serverless function is executed for the first time (aka cold-start).
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
-	return FaaSColdstartKey.Bool(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
-	// AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
-	// semantic conventions. It represents the AWS request ID as returned in
-	// the response headers `x-amz-request-id` or `x-amz-requestid`.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
-	AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
-	return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
-	// AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
-	// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-	// value of the `AttributesToGet` request parameter.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'lives', 'id'
-	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
-	// AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
-	// "aws.dynamodb.consistent_read" semantic conventions. It represents the
-	// value of the `ConsistentRead` request parameter.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
-	// AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
-	// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-	// JSON-serialized value of each item in the `ConsumedCapacity` response
-	// field.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
-	// "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
-	// "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
-	// { "CapacityUnits": number, "ReadCapacityUnits": number,
-	// "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
-	// { "CapacityUnits": number, "ReadCapacityUnits": number,
-	// "WriteCapacityUnits": number }, "TableName": "string",
-	// "WriteCapacityUnits": number }'
-	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
-	// AWSDynamoDBIndexNameKey is the attribute Key conforming to the
-	// "aws.dynamodb.index_name" semantic conventions. It represents the value
-	// of the `IndexName` request parameter.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'name_to_group'
-	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
-	// AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
-	// the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-	// represents the JSON-serialized value of the `ItemCollectionMetrics`
-	// response field.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
-	// blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
-	// "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
-	// "NULL": boolean, "S": "string", "SS": [ "string" ] } },
-	// "SizeEstimateRangeGB": [ number ] } ] }'
-	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
-	// AWSDynamoDBLimitKey is the attribute Key conforming to the
-	// "aws.dynamodb.limit" semantic conventions. It represents the value of
-	// the `Limit` request parameter.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 10
-	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
-	// AWSDynamoDBProjectionKey is the attribute Key conforming to the
-	// "aws.dynamodb.projection" semantic conventions. It represents the value
-	// of the `ProjectionExpression` request parameter.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Title', 'Title, Price, Color', 'Title, Description,
-	// RelatedItems, ProductReviews'
-	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
-	// AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
-	// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
-	// represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
-	// request parameter.
-	//
-	// Type: double
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1.0, 2.0
-	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
-	// AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
-	// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
-	// It represents the value of the
-	// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-	//
-	// Type: double
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 1.0, 2.0
-	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
-	// AWSDynamoDBSelectKey is the attribute Key conforming to the
-	// "aws.dynamodb.select" semantic conventions. It represents the value of
-	// the `Select` request parameter.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
-	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
-	// AWSDynamoDBTableNamesKey is the attribute Key conforming to the
-	// "aws.dynamodb.table_names" semantic conventions. It represents the keys
-	// in the `RequestItems` object field.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Users', 'Cats'
-	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-)
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
-	return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
-	return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
-	return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
-	return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
-	return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
-	return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
-	return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
-	return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
-	return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
-	return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
-	return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// DynamoDB.CreateTable
-const (
-	// AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
-	// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
-	// represents the JSON-serialized value of each item of the
-	// `GlobalSecondaryIndexes` request field
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
-	// "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
-	// "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
-	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
-	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
-	// AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
-	// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-	// represents the JSON-serialized value of each item of the
-	// `LocalSecondaryIndexes` request field.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '{ "IndexARN": "string", "IndexName": "string",
-	// "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
-	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
-	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
-	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
-	return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
-	return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
-	// AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
-	// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
-	// the value of the `ExclusiveStartTableName` request parameter.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'Users', 'CatsTable'
-	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
-	// AWSDynamoDBTableCountKey is the attribute Key conforming to the
-	// "aws.dynamodb.table_count" semantic conventions. It represents the the
-	// number of items in the `TableNames` response parameter.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 20
-	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
-	return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
-	return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
-	// AWSDynamoDBScanForwardKey is the attribute Key conforming to the
-	// "aws.dynamodb.scan_forward" semantic conventions. It represents the
-	// value of the `ScanIndexForward` request parameter.
-	//
-	// Type: boolean
-	// RequirementLevel: Optional
-	// Stability: experimental
-	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
-	return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
-	// AWSDynamoDBCountKey is the attribute Key conforming to the
-	// "aws.dynamodb.count" semantic conventions. It represents the value of
-	// the `Count` response parameter.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 10
-	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
-	// AWSDynamoDBScannedCountKey is the attribute Key conforming to the
-	// "aws.dynamodb.scanned_count" semantic conventions. It represents the
-	// value of the `ScannedCount` response parameter.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 50
-	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
-	// AWSDynamoDBSegmentKey is the attribute Key conforming to the
-	// "aws.dynamodb.segment" semantic conventions. It represents the value of
-	// the `Segment` request parameter.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 10
-	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
-	// AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
-	// "aws.dynamodb.total_segments" semantic conventions. It represents the
-	// value of the `TotalSegments` request parameter.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 100
-	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
-	return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
-	return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
-	return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
-	return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
-	// AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
-	// the "aws.dynamodb.attribute_definitions" semantic conventions. It
-	// represents the JSON-serialized value of each item in the
-	// `AttributeDefinitions` request field.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
-	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
-	// AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
-	// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-	// conventions. It represents the JSON-serialized value of each item in the
-	// the `GlobalSecondaryIndexUpdates` request field.
-	//
-	// Type: string[]
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
-	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
-	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
-	// "ProvisionedThroughput": { "ReadCapacityUnits": number,
-	// "WriteCapacityUnits": number } }'
-	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
-	return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
-	return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
-	// AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
-	// semantic conventions. It represents the S3 bucket name the request
-	// refers to. Corresponds to the `--bucket` parameter of the [S3
-	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-	// operations.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'some-bucket-name'
-	// Note: The `bucket` attribute is applicable to all S3 operations that
-	// reference a bucket, i.e. that require the bucket name as a mandatory
-	// parameter.
-	// This applies to almost all S3 operations except `list-buckets`.
-	AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
-	// AWSS3CopySourceKey is the attribute Key conforming to the
-	// "aws.s3.copy_source" semantic conventions. It represents the source
-	// object (in the form `bucket`/`key`) for the copy operation.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'someFile.yml'
-	// Note: The `copy_source` attribute applies to S3 copy operations and
-	// corresponds to the `--copy-source` parameter
-	// of the [copy-object operation within the S3
-	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
-	// This applies in particular to the following operations:
-	//
-	// -
-	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
-	// -
-	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
-	AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
-	// AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
-	// semantic conventions. It represents the delete request container that
-	// specifies the objects to be deleted.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples:
-	// 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
-	// Note: The `delete` attribute is only applicable to the
-	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
-	// operation.
-	// The `delete` attribute corresponds to the `--delete` parameter of the
-	// [delete-objects operation within the S3
-	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
-	AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
-	// AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
-	// conventions. It represents the S3 object key the request refers to.
-	// Corresponds to the `--key` parameter of the [S3
-	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-	// operations.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'someFile.yml'
-	// Note: The `key` attribute is applicable to all object-related S3
-	// operations, i.e. that require the object key as a mandatory parameter.
-	// This applies in particular to the following operations:
-	//
-	// -
-	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
-	// -
-	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
-	// -
-	// [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
-	// -
-	// [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
-	// -
-	// [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
-	// -
-	// [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
-	// -
-	// [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
-	// -
-	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
-	// -
-	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
-	// -
-	// [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
-	// -
-	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
-	// -
-	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
-	// -
-	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
-	AWSS3KeyKey = attribute.Key("aws.s3.key")
-
-	// AWSS3PartNumberKey is the attribute Key conforming to the
-	// "aws.s3.part_number" semantic conventions. It represents the part number
-	// of the part being uploaded in a multipart-upload operation. This is a
-	// positive integer between 1 and 10,000.
-	//
-	// Type: int
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 3456
-	// Note: The `part_number` attribute is only applicable to the
-	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
-	// and
-	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
-	// operations.
-	// The `part_number` attribute corresponds to the `--part-number` parameter
-	// of the
-	// [upload-part operation within the S3
-	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
-	AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
-	// AWSS3UploadIDKey is the attribute Key conforming to the
-	// "aws.s3.upload_id" semantic conventions. It represents the upload ID
-	// that identifies the multipart upload.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
-	// Note: The `upload_id` attribute applies to S3 multipart-upload
-	// operations and corresponds to the `--upload-id` parameter
-	// of the [S3
-	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-	// multipart operations.
-	// This applies in particular to the following operations:
-	//
-	// -
-	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
-	// -
-	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
-	// -
-	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
-	// -
-	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
-	// -
-	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
-	AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
-	return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
-	return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
-	return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
-	return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
-	return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
-	return AWSS3UploadIDKey.String(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
-	// GraphqlDocumentKey is the attribute Key conforming to the
-	// "graphql.document" semantic conventions. It represents the GraphQL
-	// document being executed.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'query findBookByID { bookByID(id: ?) { name } }'
-	// Note: The value may be sanitized to exclude sensitive information.
-	GraphqlDocumentKey = attribute.Key("graphql.document")
-
-	// GraphqlOperationNameKey is the attribute Key conforming to the
-	// "graphql.operation.name" semantic conventions. It represents the name of
-	// the operation being executed.
-	//
-	// Type: string
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'findBookByID'
-	GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
-	// GraphqlOperationTypeKey is the attribute Key conforming to the
-	// "graphql.operation.type" semantic conventions. It represents the type of
-	// the operation being executed.
-	//
-	// Type: Enum
-	// RequirementLevel: Optional
-	// Stability: experimental
-	// Examples: 'query', 'mutation', 'subscription'
-	GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
-	// GraphQL query
-	GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
-	// GraphQL mutation
-	GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
-	// GraphQL subscription
-	GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
-	return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
-	return GraphqlOperationNameKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
new file mode 100644
index 000000000..2de1fc3c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.26.0
+
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
new file mode 100644
index 000000000..d8dc822b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
@@ -0,0 +1,8996 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The Android platform on which the Android application is running.
+const (
+	// AndroidOSAPILevelKey is the attribute Key conforming to the
+	// "android.os.api_level" semantic conventions. It represents the uniquely
+	// identifies the framework API revision offered by a version
+	// (`os.version`) of the android operating system. More information can be
+	// found
+	// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '33', '32'
+	AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+	return AndroidOSAPILevelKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+	// AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+	// "aspnetcore.rate_limiting.result" semantic conventions. It represents
+	// the rate-limiting result, shows whether the lease was acquired or
+	// contains a rejection reason
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'acquired', 'request_canceled'
+	AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+	// AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+	// the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+	// represents the full type name of the
+	// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+	// implementation that handled the exception.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (if and only if the exception
+	// was handled by this handler.)
+	// Stability: stable
+	// Examples: 'Contoso.MyHandler'
+	AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+	// AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
+	// to the "aspnetcore.diagnostics.exception.result" semantic conventions.
+	// It represents the aSP.NET Core exception middleware handling result
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'handled', 'unhandled'
+	AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
+
+	// AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+	// "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+	// the rate limiting policy name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'fixed', 'sliding', 'token'
+	AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+	// AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+	// "aspnetcore.request.is_unhandled" semantic conventions. It represents
+	// the flag indicating if request was handled by the application pipeline.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: True
+	AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+	// AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+	// "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+	// value that indicates whether the matched route is a fallback route.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: True
+	AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+
+	// AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
+	// "aspnetcore.routing.match_status" semantic conventions. It represents
+	// the match result - success or failure
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'success', 'failure'
+	AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
+)
+
+var (
+	// Lease was acquired
+	AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+	// Lease request was rejected by the endpoint limiter
+	AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+	// Lease request was rejected by the global limiter
+	AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+	// Lease request was canceled
+	AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+var (
+	// Exception was handled by the exception handling middleware
+	AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
+	// Exception was not handled by the exception handling middleware
+	AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
+	// Exception handling was skipped because the response had started
+	AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
+	// Exception handling didn't run because the request was aborted
+	AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
+)
+
+var (
+	// Match succeeded
+	AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
+	// Match failed
+	AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+	return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+	return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+	return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+	return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// Generic attributes for AWS services.
+const (
+	// AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+	// semantic conventions. It represents the AWS request ID as returned in
+	// the response headers `x-amz-request-id` or `x-amz-requestid`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+	AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+	return AWSRequestIDKey.String(val)
+}
+
+// Attributes for AWS DynamoDB.
+const (
+	// AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+	// the "aws.dynamodb.attribute_definitions" semantic conventions. It
+	// represents the JSON-serialized value of each item in the
+	// `AttributeDefinitions` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+	// AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+	// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+	// value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+	// AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+	// "aws.dynamodb.consistent_read" semantic conventions. It represents the
+	// value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+	// AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+	// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+	// JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+	// "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number }, "TableName": "string",
+	// "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+	// AWSDynamoDBCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.count" semantic conventions. It represents the value of
+	// the `Count` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+	// AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+	// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+	// the value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+	// AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+	// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+	// conventions. It represents the JSON-serialized value of each item in the
+	// `GlobalSecondaryIndexUpdates` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+	// AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `GlobalSecondaryIndexes` request field
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+	// "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+	// "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+	// AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+	// "aws.dynamodb.index_name" semantic conventions. It represents the value
+	// of the `IndexName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+	// AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+	// the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+	// represents the JSON-serialized value of the `ItemCollectionMetrics`
+	// response field.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+	// blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+	// "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+	// "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+	// "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+	// AWSDynamoDBLimitKey is the attribute Key conforming to the
+	// "aws.dynamodb.limit" semantic conventions. It represents the value of
+	// the `Limit` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+	// AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `LocalSecondaryIndexes` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "IndexARN": "string", "IndexName": "string",
+	// "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+	// AWSDynamoDBProjectionKey is the attribute Key conforming to the
+	// "aws.dynamodb.projection" semantic conventions. It represents the value
+	// of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+	// RelatedItems, ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+	// AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+	// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+	// represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+	// request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+	// AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+	// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+	// It represents the value of the
+	// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+	// AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+	// "aws.dynamodb.scan_forward" semantic conventions. It represents the
+	// value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+	// AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.scanned_count" semantic conventions. It represents the
+	// value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+	// AWSDynamoDBSegmentKey is the attribute Key conforming to the
+	// "aws.dynamodb.segment" semantic conventions. It represents the value of
+	// the `Segment` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+	// AWSDynamoDBSelectKey is the attribute Key conforming to the
+	// "aws.dynamodb.select" semantic conventions. It represents the value of
+	// the `Select` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+	// AWSDynamoDBTableCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_count" semantic conventions. It represents the
+	// number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+	// AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_names" semantic conventions. It represents the keys
+	// in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+	// AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+	// "aws.dynamodb.total_segments" semantic conventions. It represents the
+	// value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+	return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+	return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+	return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+	return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+	return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+	return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+	return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+	return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+	return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+	return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+	return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+	return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+	return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+	return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+	return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// Attributes for AWS Elastic Container Service (ECS).
+const (
+	// AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+	// semantic conventions. It represents the ID of a running ECS task. The ID
+	// MUST be extracted from `task.arn`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
+	// populated.)
+	// Stability: experimental
+	// Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
+	// '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+	AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+	// AWSECSClusterARNKey is the attribute Key conforming to the
+	// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+	// [ECS
+	// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+	// AWSECSContainerARNKey is the attribute Key conforming to the
+	// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+	// Resource Name (ARN) of an [ECS container
+	// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+	// AWSECSLaunchtypeKey is the attribute Key conforming to the
+	// "aws.ecs.launchtype" semantic conventions. It represents the [launch
+	// type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+	// for an ECS task.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+	// AWSECSTaskARNKey is the attribute Key conforming to the
+	// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
+	// running [ECS
+	// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
+	// 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+	// AWSECSTaskFamilyKey is the attribute Key conforming to the
+	// "aws.ecs.task.family" semantic conventions. It represents the family
+	// name of the [ECS task
+	// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+	// used to create the ECS task.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+	// AWSECSTaskRevisionKey is the attribute Key conforming to the
+	// "aws.ecs.task.revision" semantic conventions. It represents the revision
+	// for the task definition used to create the ECS task.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the
+// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
+// ECS task. The ID MUST be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+	return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+	return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+	return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS
+// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+	return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+// used to create the ECS task.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+	return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+	return AWSECSTaskRevisionKey.String(val)
+}
+
+// Attributes for AWS Elastic Kubernetes Service (EKS).
+const (
+	// AWSEKSClusterARNKey is the attribute Key conforming to the
+	// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+	// EKS cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+	return AWSEKSClusterARNKey.String(val)
+}
+
+// Attributes for AWS Logs.
+const (
+	// AWSLogGroupARNsKey is the attribute Key conforming to the
+	// "aws.log.group.arns" semantic conventions. It represents the Amazon
+	// Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+	// AWSLogGroupNamesKey is the attribute Key conforming to the
+	// "aws.log.group.names" semantic conventions. It represents the name(s) of
+	// the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like
+	// multi-container applications, where a single application has sidecar
+	// containers, and each write to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+	// AWSLogStreamARNsKey is the attribute Key conforming to the
+	// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+	// the AWS log stream(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	// One log group can contain several log streams, so these ARNs necessarily
+	// identify both a log group and a log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+	// AWSLogStreamNamesKey is the attribute Key conforming to the
+	// "aws.log.stream.names" semantic conventions. It represents the name(s)
+	// of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+	return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+	return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+	return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+	return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Attributes for AWS Lambda.
+const (
+	// AWSLambdaInvokedARNKey is the attribute Key conforming to the
+	// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+	// invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+	// `/runtime/invocation/next` applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `cloud.resource_id` if an alias is
+	// involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+	return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for AWS S3.
+const (
+	// AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+	// semantic conventions. It represents the S3 bucket name the request
+	// refers to. Corresponds to the `--bucket` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'some-bucket-name'
+	// Note: The `bucket` attribute is applicable to all S3 operations that
+	// reference a bucket, i.e. that require the bucket name as a mandatory
+	// parameter.
+	// This applies to almost all S3 operations except `list-buckets`.
+	AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+	// AWSS3CopySourceKey is the attribute Key conforming to the
+	// "aws.s3.copy_source" semantic conventions. It represents the source
+	// object (in the form `bucket`/`key`) for the copy operation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'someFile.yml'
+	// Note: The `copy_source` attribute applies to S3 copy operations and
+	// corresponds to the `--copy-source` parameter
+	// of the [copy-object operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+	// AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+	// semantic conventions. It represents the delete request container that
+	// specifies the objects to be deleted.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+	// Note: The `delete` attribute is only applicable to the
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// operation.
+	// The `delete` attribute corresponds to the `--delete` parameter of the
+	// [delete-objects operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+	AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+	// AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+	// conventions. It represents the S3 object key the request refers to.
+	// Corresponds to the `--key` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'someFile.yml'
+	// Note: The `key` attribute is applicable to all object-related S3
+	// operations, i.e. that require the object key as a mandatory parameter.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// -
+	// [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+	// -
+	// [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+	// -
+	// [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+	// -
+	// [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+	// -
+	// [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+	// AWSS3PartNumberKey is the attribute Key conforming to the
+	// "aws.s3.part_number" semantic conventions. It represents the part number
+	// of the part being uploaded in a multipart-upload operation. This is a
+	// positive integer between 1 and 10,000.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3456
+	// Note: The `part_number` attribute is only applicable to the
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// and
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	// operations.
+	// The `part_number` attribute corresponds to the `--part-number` parameter
+	// of the
+	// [upload-part operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+	AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+	// AWSS3UploadIDKey is the attribute Key conforming to the
+	// "aws.s3.upload_id" semantic conventions. It represents the upload ID
+	// that identifies the multipart upload.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+	// Note: The `upload_id` attribute applies to S3 multipart-upload
+	// operations and corresponds to the `--upload-id` parameter
+	// of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// multipart operations.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+	return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+	return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+	return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+	return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+	return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+	return AWSS3UploadIDKey.String(val)
+}
+
+// The web browser attributes
+const (
+	// BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+	// semantic conventions. It represents the array of brand name and version
+	// separated by a space
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.brands`).
+	BrowserBrandsKey = attribute.Key("browser.brands")
+
+	// BrowserLanguageKey is the attribute Key conforming to the
+	// "browser.language" semantic conventions. It represents the preferred
+	// language of the user using the browser
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'en', 'en-US', 'fr', 'fr-FR'
+	// Note: This value is intended to be taken from the Navigator API
+	// `navigator.language`.
+	BrowserLanguageKey = attribute.Key("browser.language")
+
+	// BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+	// semantic conventions. It represents a boolean that is true if the
+	// browser is running on a mobile device
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.mobile`). If unavailable, this attribute
+	// SHOULD be left unset.
+	BrowserMobileKey = attribute.Key("browser.mobile")
+
+	// BrowserPlatformKey is the attribute Key conforming to the
+	// "browser.platform" semantic conventions. It represents the platform on
+	// which the browser is running
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Windows', 'macOS', 'Android'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.platform`). If unavailable, the legacy
+	// `navigator.platform` API SHOULD NOT be used instead and this attribute
+	// SHOULD be left unset in order for the values to be consistent.
+	// The list of possible values is defined in the [W3C User-Agent Client
+	// Hints
+	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+	// Note that some (but not all) of these values can overlap with values in
+	// the [`os.type` and `os.name` attributes](./os.md). However, for
+	// consistency, the values in the `browser.platform` attribute should
+	// capture the exact value that the user agent provides.
+	BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+	return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+	return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+	return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+	return BrowserPlatformKey.String(val)
+}
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+	// ClientAddressKey is the attribute Key conforming to the "client.address"
+	// semantic conventions. It represents the client address - domain name if
+	// available without reverse DNS lookup; otherwise, IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.address` SHOULD represent the client address
+	// behind any intermediaries,  for example proxies, if it's available.
+	ClientAddressKey = attribute.Key("client.address")
+
+	// ClientPortKey is the attribute Key conforming to the "client.port"
+	// semantic conventions. It represents the client port number.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.port` SHOULD represent the client port behind
+	// any intermediaries,  for example proxies, if it's available.
+	ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+	return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+	return ClientPortKey.Int(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+	// CloudAccountIDKey is the attribute Key conforming to the
+	// "cloud.account.id" semantic conventions. It represents the cloud account
+	// ID the resource is assigned to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+	// CloudAvailabilityZoneKey is the attribute Key conforming to the
+	// "cloud.availability_zone" semantic conventions. It represents the cloud
+	// regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the
+	// resource is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google
+	// Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+	// CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+	// semantic conventions. It represents the cloud platform in use.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+
+	// CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+	// semantic conventions. It represents the name of the cloud provider.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	CloudProviderKey = attribute.Key("cloud.provider")
+
+	// CloudRegionKey is the attribute Key conforming to the "cloud.region"
+	// semantic conventions. It represents the geographical region the resource
+	// is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for
+	// example [Alibaba Cloud
+	// regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+	// regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+	// [Azure
+	// regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+	// [Google Cloud regions](https://cloud.google.com/about/locations), or
+	// [Tencent Cloud
+	// regions](https://www.tencentcloud.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+
+	// CloudResourceIDKey is the attribute Key conforming to the
+	// "cloud.resource_id" semantic conventions. It represents the cloud
+	// provider-specific native identifier of the monitored cloud resource
+	// (e.g. an
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+	// on AWS, a [fully qualified resource
+	// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+	// on Azure, a [full resource
+	// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+	// on GCP)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+	// '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+	// '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+	// Note: On some cloud providers, it may not be possible to determine the
+	// full ID at startup,
+	// so it may be necessary to set `cloud.resource_id` as a span attribute
+	// instead.
+	//
+	// The exact value to use for `cloud.resource_id` depends on the cloud
+	// provider.
+	// The following well-known definitions MUST be used if you set this
+	// attribute and they apply:
+	//
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+	//   Take care not to use the "invoked ARN" directly but replace any
+	//   [alias
+	// suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+	//   with the resolved function version, as the same runtime instance may
+	// be invokable with
+	//   multiple different aliases.
+	// * **GCP:** The [URI of the
+	// resource](https://cloud.google.com/iam/docs/full-resource-names)
+	// * **Azure:** The [Fully Qualified Resource
+	// ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+	// of the invoked function,
+	//   *not* the function app, having the form
+	// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+	//   This means that a span attribute MUST be used, as an Azure function
+	// app can host multiple functions that would usually share
+	//   a TracerProvider.
+	CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// Red Hat OpenShift on Alibaba Cloud
+	CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Red Hat OpenShift on AWS (ROSA)
+	CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Apps
+	CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Azure Red Hat OpenShift
+	CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+	// Google Bare Metal Solution (BMS)
+	CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Red Hat OpenShift on Google Cloud
+	CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+	// Red Hat OpenShift on IBM Cloud
+	CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// Heroku Platform as a Service
+	CloudProviderHeroku = CloudProviderKey.String("heroku")
+	// IBM Cloud
+	CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+	return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+	return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+	return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+	return CloudResourceIDKey.String(val)
+}
+
+// Attributes for CloudEvents.
+const (
+	// CloudeventsEventIDKey is the attribute Key conforming to the
+	// "cloudevents.event_id" semantic conventions. It represents the
+	// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+	// uniquely identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+	// CloudeventsEventSourceKey is the attribute Key conforming to the
+	// "cloudevents.event_source" semantic conventions. It represents the
+	// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+	// identifies the context in which an event happened.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'https://github.com/cloudevents',
+	// '/cloudevents/spec/pull/123', 'my-service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+	// CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+	// "cloudevents.event_spec_version" semantic conventions. It represents the
+	// [version of the CloudEvents
+	// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+	// which the event uses.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+	// CloudeventsEventSubjectKey is the attribute Key conforming to the
+	// "cloudevents.event_subject" semantic conventions. It represents the
+	// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+	// of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+	// CloudeventsEventTypeKey is the attribute Key conforming to the
+	// "cloudevents.event_type" semantic conventions. It represents the
+	// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+	// contains a value describing the type of event related to the originating
+	// occurrence.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com.github.pull_request.opened',
+	// 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+	return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+	return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+	return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+	return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+	return CloudeventsEventTypeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+	// CodeColumnKey is the attribute Key conforming to the "code.column"
+	// semantic conventions. It represents the column number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 16
+	CodeColumnKey = attribute.Key("code.column")
+
+	// CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+	// semantic conventions. It represents the source code file name that
+	// identifies the code unit as uniquely as possible (preferably an absolute
+	// file path).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+
+	// CodeFunctionKey is the attribute Key conforming to the "code.function"
+	// semantic conventions. It represents the method or function name, or
+	// equivalent (usually rightmost part of the code unit's name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+
+	// CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+	// semantic conventions. It represents the line number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+
+	// CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+	// semantic conventions. It represents the "namespace" within which
+	// `code.function` is defined. Usually the qualified class or module name,
+	// such that `code.namespace` + some separator + `code.function` form a
+	// unique identifier for the code unit.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+
+	// CodeStacktraceKey is the attribute Key conforming to the
+	// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+	// string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'at
+	// com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+	return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+	return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+	return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+	return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+	return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+	return CodeStacktraceKey.String(val)
+}
+
+// A container instance.
+const (
+	// ContainerCommandKey is the attribute Key conforming to the
+	// "container.command" semantic conventions. It represents the command used
+	// to run the container (i.e. the command name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol'
+	// Note: If using embedded credentials or sensitive data, it is recommended
+	// to remove them to prevent potential leakage.
+	ContainerCommandKey = attribute.Key("container.command")
+
+	// ContainerCommandArgsKey is the attribute Key conforming to the
+	// "container.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) run by the
+	// container. [2]
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol, --config, config.yaml'
+	ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+	// ContainerCommandLineKey is the attribute Key conforming to the
+	// "container.command_line" semantic conventions. It represents the full
+	// command run by the container as a single string representing the full
+	// command. [2]
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol --config config.yaml'
+	ContainerCommandLineKey = attribute.Key("container.command_line")
+
+	// ContainerCPUStateKey is the attribute Key conforming to the
+	// "container.cpu.state" semantic conventions. It represents the CPU state
+	// for this data point.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'user', 'kernel'
+	ContainerCPUStateKey = attribute.Key("container.cpu.state")
+
+	// ContainerIDKey is the attribute Key conforming to the "container.id"
+	// semantic conventions. It represents the container ID. Usually a UUID, as
+	// for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+	// The UUID might be abbreviated.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+
+	// ContainerImageIDKey is the attribute Key conforming to the
+	// "container.image.id" semantic conventions. It represents the runtime
+	// specific image identifier. Usually a hash algorithm followed by a UUID.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+	// Note: Docker defines a sha256 of the image id; `container.image.id`
+	// corresponds to the `Image` field from the Docker container inspect
+	// [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+	// endpoint.
+	// K8S defines a link to the container registry repository with digest
+	// `"imageID": "registry.azurecr.io
+	// /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+	// The ID is assigned by the container runtime and can vary in different
+	// environments. Consider using `oci.manifest.digest` if it is important to
+	// identify the same image in different environments/runtimes.
+	ContainerImageIDKey = attribute.Key("container.image.id")
+
+	// ContainerImageNameKey is the attribute Key conforming to the
+	// "container.image.name" semantic conventions. It represents the name of
+	// the image the container was built on.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+
+	// ContainerImageRepoDigestsKey is the attribute Key conforming to the
+	// "container.image.repo_digests" semantic conventions. It represents the
+	// repo digests of the container image as provided by the container
+	// runtime.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+	// 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+	// Note:
+	// [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+	// and
+	// [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+	// report those under the `RepoDigests` field.
+	ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+	// ContainerImageTagsKey is the attribute Key conforming to the
+	// "container.image.tags" semantic conventions. It represents the container
+	// image tags. An example can be found in [Docker Image
+	// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+	// Should be only the `<tag>` section of the full name for example from
+	// `registry.example.com/my-org/my-image:<tag>`.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'v1.27.1', '3.5.7-0'
+	ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+	// ContainerNameKey is the attribute Key conforming to the "container.name"
+	// semantic conventions. It represents the container name used by container
+	// runtime.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+
+	// ContainerRuntimeKey is the attribute Key conforming to the
+	// "container.runtime" semantic conventions. It represents the container
+	// runtime managing this container.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+var (
+	// When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
+	ContainerCPUStateUser = ContainerCPUStateKey.String("user")
+	// When CPU is used by the system (host OS)
+	ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
+	// When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
+	ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+	return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+	return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+	return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+	return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+	return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+	return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+	return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+	return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+	return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+	return ContainerRuntimeKey.String(val)
+}
+
+// This group defines the attributes used to describe telemetry in the context
+// of databases.
+const (
+	// DBClientConnectionsPoolNameKey is the attribute Key conforming to the
+	// "db.client.connections.pool.name" semantic conventions. It represents
+	// the name of the connection pool; unique within the instrumented
+	// application. In case the connection pool implementation doesn't provide
+	// a name, instrumentation should use a combination of `server.address` and
+	// `server.port` attributes formatted as `server.address:server.port`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myDataSource'
+	DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
+
+	// DBClientConnectionsStateKey is the attribute Key conforming to the
+	// "db.client.connections.state" semantic conventions. It represents the
+	// state of a connection in the pool
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'idle'
+	DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
+
+	// DBCollectionNameKey is the attribute Key conforming to the
+	// "db.collection.name" semantic conventions. It represents the name of a
+	// collection (table, container) within the database.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'public.users', 'customers'
+	// Note: If the collection name is parsed from the query, it SHOULD match
+	// the value provided in the query and may be qualified with the schema and
+	// database name.
+	// It is RECOMMENDED to capture the value as provided by the application
+	// without attempting to do any case normalization.
+	DBCollectionNameKey = attribute.Key("db.collection.name")
+
+	// DBNamespaceKey is the attribute Key conforming to the "db.namespace"
+	// semantic conventions. It represents the name of the database, fully
+	// qualified within the server address and port.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'customers', 'test.users'
+	// Note: If a database system has multiple namespace components, they
+	// SHOULD be concatenated (potentially using database system specific
+	// conventions) from most general to most specific namespace component, and
+	// more specific namespaces SHOULD NOT be captured without the more general
+	// namespaces, to ensure that "startswith" queries for the more general
+	// namespaces will be valid.
+	// Semantic conventions for individual database systems SHOULD document
+	// what `db.namespace` means in the context of that system.
+	// It is RECOMMENDED to capture the value as provided by the application
+	// without attempting to do any case normalization.
+	DBNamespaceKey = attribute.Key("db.namespace")
+
+	// DBOperationNameKey is the attribute Key conforming to the
+	// "db.operation.name" semantic conventions. It represents the name of the
+	// operation or command being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: It is RECOMMENDED to capture the value as provided by the
+	// application without attempting to do any case normalization.
+	DBOperationNameKey = attribute.Key("db.operation.name")
+
+	// DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+	// semantic conventions. It represents the database query being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
+	// "WuValue"'
+	DBQueryTextKey = attribute.Key("db.query.text")
+
+	// DBSystemKey is the attribute Key conforming to the "db.system" semantic
+	// conventions. It represents the database management system (DBMS) product
+	// as identified by the client instrumentation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The actual DBMS may differ from the one identified by the client.
+	// For example, when using PostgreSQL client libraries to connect to a
+	// CockroachDB, the `db.system` is set to `postgresql` based on the
+	// instrumentation's best knowledge.
+	DBSystemKey = attribute.Key("db.system")
+)
+
+var (
+	// idle
+	DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
+	// used
+	DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// Microsoft SQL Server Compact
+	DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+	// OpenSearch
+	DBSystemOpensearch = DBSystemKey.String("opensearch")
+	// ClickHouse
+	DBSystemClickhouse = DBSystemKey.String("clickhouse")
+	// Cloud Spanner
+	DBSystemSpanner = DBSystemKey.String("spanner")
+	// Trino
+	DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
+// the "db.client.connections.pool.name" semantic conventions. It represents
+// the name of the connection pool; unique within the instrumented application.
+// In case the connection pool implementation doesn't provide a name,
+// instrumentation should use a combination of `server.address` and
+// `server.port` attributes formatted as `server.address:server.port`.
+func DBClientConnectionsPoolName(val string) attribute.KeyValue {
+	return DBClientConnectionsPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+	return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the
+// "db.namespace" semantic conventions. It represents the name of the database,
+// fully qualified within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+	return DBNamespaceKey.String(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+	return DBOperationNameKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the
+// "db.query.text" semantic conventions. It represents the database query being
+// executed.
+func DBQueryText(val string) attribute.KeyValue {
+	return DBQueryTextKey.String(val)
+}
+
+// This group defines attributes for Cassandra.
+const (
+	// DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+	// "db.cassandra.consistency_level" semantic conventions. It represents the
+	// consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+	// DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.dc" semantic conventions. It represents the
+	// data center of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+	// DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+	// of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+	// DBCassandraIdempotenceKey is the attribute Key conforming to the
+	// "db.cassandra.idempotence" semantic conventions. It represents the
+	// whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+	// DBCassandraPageSizeKey is the attribute Key conforming to the
+	// "db.cassandra.page_size" semantic conventions. It represents the fetch
+	// size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+	// DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+	// to the "db.cassandra.speculative_execution_count" semantic conventions.
+	// It represents the number of times a query was speculatively executed.
+	// Not set or `0` if the query was not executed speculatively.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+	return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+	return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+	return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// This group defines attributes for Azure Cosmos DB.
+const (
+	// DBCosmosDBClientIDKey is the attribute Key conforming to the
+	// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+	// Cosmos client instance id.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+	DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+	// DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+	// "db.cosmosdb.connection_mode" semantic conventions. It represents the
+	// cosmos client connection mode.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+	// DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+	// "db.cosmosdb.operation_type" semantic conventions. It represents the
+	// cosmosDB Operation Type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+	// DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+	// consumed for that operation
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 46.18, 1.0
+	DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+	// DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_content_length" semantic conventions. It represents
+	// the request payload size in bytes
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+	// DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+	// DB status code.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 200, 201
+	DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+	// DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+	// cosmos DB sub status code.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1000, 1002
+	DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+)
+
+var (
+	// Gateway (HTTP) connections mode
+	DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+	// Direct connection
+	DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+	// invalid
+	DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+	// create
+	DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+	// patch
+	DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+	// read
+	DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+	// read_feed
+	DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+	// delete
+	DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+	// replace
+	DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+	// execute
+	DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+	// query
+	DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+	// head
+	DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+	// head_feed
+	DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+	// upsert
+	DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+	// batch
+	DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+	// query_plan
+	DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+	// execute_javascript
+	DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+	return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+	return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+	return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// This group defines attributes for Elasticsearch.
+const (
+	// DBElasticsearchClusterNameKey is the attribute Key conforming to the
+	// "db.elasticsearch.cluster.name" semantic conventions. It represents the
+	// represents the identifier of an Elasticsearch cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+	DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+	// DBElasticsearchNodeNameKey is the attribute Key conforming to the
+	// "db.elasticsearch.node.name" semantic conventions. It represents the
+	// represents the human-readable identifier of the node/instance to which a
+	// request was routed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'instance-0000000001'
+	DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+)
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+	return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+	return DBElasticsearchNodeNameKey.String(val)
+}
+
+// Attributes for software deployments.
+const (
+	// DeploymentEnvironmentKey is the attribute Key conforming to the
+	// "deployment.environment" semantic conventions. It represents the name of
+	// the [deployment
+	// environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'staging', 'production'
+	// Note: `deployment.environment` does not affect the uniqueness
+	// constraints defined through
+	// the `service.namespace`, `service.name` and `service.instance.id`
+	// resource attributes.
+	// This implies that resources carrying the following attribute
+	// combinations MUST be
+	// considered to be identifying the same service:
+	//
+	// * `service.name=frontend`, `deployment.environment=production`
+	// * `service.name=frontend`, `deployment.environment=staging`.
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+	return DeploymentEnvironmentKey.String(val)
+}
+
+// Attributes that represents an occurrence of a lifecycle transition on the
+// Android platform.
+const (
+	// AndroidStateKey is the attribute Key conforming to the "android.state"
+	// semantic conventions. It represents the deprecated use the
+	// `device.app.lifecycle` event definition including `android.state` as a
+	// payload field instead.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The Android lifecycle states are defined in [Activity lifecycle
+	// callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+	// and from which the `OS identifiers` are derived.
+	AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+	// Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+	AndroidStateCreated = AndroidStateKey.String("created")
+	// Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+	AndroidStateBackground = AndroidStateKey.String("background")
+	// Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+	AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+	// DestinationAddressKey is the attribute Key conforming to the
+	// "destination.address" semantic conventions. It represents the
+	// destination address - domain name if available without reverse DNS
+	// lookup; otherwise, IP address or Unix domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the source side, and when communicating through
+	// an intermediary, `destination.address` SHOULD represent the destination
+	// address behind any intermediaries, for example proxies, if it's
+	// available.
+	DestinationAddressKey = attribute.Key("destination.address")
+
+	// DestinationPortKey is the attribute Key conforming to the
+	// "destination.port" semantic conventions. It represents the destination
+	// port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3389, 2888
+	DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+	return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+	return DestinationPortKey.Int(val)
+}
+
+// Describes device attributes.
+const (
+	// DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+	// conventions. It represents a unique identifier representing the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values
+	// outlined below. This value is not an advertising identifier and MUST NOT
+	// be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+	// to the [vendor
+	// identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+	// On Android (Java or Kotlin), this value MUST be equal to the Firebase
+	// Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on
+	// best practices and exact implementation details. Caution should be taken
+	// when storing personal data or anything which can identify a user. GDPR
+	// and data protection laws may apply, ensure you do your own due
+	// diligence.
+	DeviceIDKey = attribute.Key("device.id")
+
+	// DeviceManufacturerKey is the attribute Key conforming to the
+	// "device.manufacturer" semantic conventions. It represents the name of
+	// the device manufacturer
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+	// DeviceModelIdentifierKey is the attribute Key conforming to the
+	// "device.model.identifier" semantic conventions. It represents the model
+	// identifier for the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine-readable version
+	// of the model identifier rather than the market or consumer-friendly name
+	// of the device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+	// DeviceModelNameKey is the attribute Key conforming to the
+	// "device.model.name" semantic conventions. It represents the marketing
+	// name for the device model
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human-readable version of
+	// the device model rather than a machine-readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+	return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+	return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+	return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+	return DeviceModelNameKey.String(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+	// DiskIoDirectionKey is the attribute Key conforming to the
+	// "disk.io.direction" semantic conventions. It represents the disk IO
+	// operation direction.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'read'
+	DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+	// read
+	DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+	// write
+	DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report a DNS query.
+const (
+	// DNSQuestionNameKey is the attribute Key conforming to the
+	// "dns.question.name" semantic conventions. It represents the name being
+	// queried.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'www.example.com', 'opentelemetry.io'
+	// Note: If the name field contains non-printable characters (below 32 or
+	// above 126), those characters should be represented as escaped base 10
+	// integers (\DDD). Back slashes and quotes should be escaped. Tabs,
+	// carriage returns, and line feeds should be converted to \t, \r, and \n
+	// respectively.
+	DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+	return DNSQuestionNameKey.String(val)
+}
+
+// Attributes for operations with an authenticated and/or authorized enduser.
+const (
+	// EnduserIDKey is the attribute Key conforming to the "enduser.id"
+	// semantic conventions. It represents the username or client_id extracted
+	// from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+	// in the inbound request from outside the system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+
+	// EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+	// semantic conventions. It represents the actual/assumed role the client
+	// is making the request under extracted from token or application security
+	// context.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+
+	// EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+	// semantic conventions. It represents the scopes or granted authorities
+	// the client currently possesses extracted from token or application
+	// security context. The value would come from the scope associated with an
+	// [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+	// value in a [SAML 2.0
+	// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+	return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+	return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+	return EnduserScopeKey.String(val)
+}
+
+// The shared attributes used to report an error.
+const (
+	// ErrorTypeKey is the attribute Key conforming to the "error.type"
+	// semantic conventions. It represents the describes a class of error the
+	// operation ended with.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'timeout', 'java.net.UnknownHostException',
+	// 'server_certificate_invalid', '500'
+	// Note: The `error.type` SHOULD be predictable, and SHOULD have low
+	// cardinality.
+	//
+	// When `error.type` is set to a type (e.g., an exception type), its
+	// canonical class name identifying the type within the artifact SHOULD be
+	// used.
+	//
+	// Instrumentations SHOULD document the list of errors they report.
+	//
+	// The cardinality of `error.type` within one instrumentation library
+	// SHOULD be low.
+	// Telemetry consumers that aggregate data from multiple instrumentation
+	// libraries and applications
+	// should be prepared for `error.type` to have high cardinality at query
+	// time when no
+	// additional filters are applied.
+	//
+	// If the operation has completed successfully, instrumentations SHOULD NOT
+	// set `error.type`.
+	//
+	// If a specific domain defines its own set of error identifiers (such as
+	// HTTP or gRPC status codes),
+	// it's RECOMMENDED to:
+	//
+	// * Use a domain-specific attribute
+	// * Set `error.type` to capture all errors, regardless of whether they are
+	// defined within the domain-specific set or not.
+	ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+	// A fallback error value to be used when the instrumentation doesn't define a custom value
+	ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Attributes for Events represented using Log Records.
+const (
+	// EventNameKey is the attribute Key conforming to the "event.name"
+	// semantic conventions. It represents the identifies the class / type of
+	// event.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'browser.mouse.click', 'device.app.lifecycle'
+	// Note: Event names are subject to the same rules as [attribute
+	// names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
+	// Notably, event names are namespaced to avoid collisions and provide a
+	// clean separation of semantics for events in separate domains like
+	// browser, mobile, and kubernetes.
+	EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+	return EventNameKey.String(val)
+}
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+	// ExceptionEscapedKey is the attribute Key conforming to the
+	// "exception.escaped" semantic conventions. It represents the sHOULD be
+	// set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: An exception is considered to have escaped (or left) the scope of
+	// a span,
+	// if that span is ended while the exception is still logically "in
+	// flight".
+	// This may be actually "in flight" in some languages (e.g. if the
+	// exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most
+	// languages.
+	//
+	// It is usually not possible to determine at the point where an exception
+	// is thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending
+	// the span,
+	// as done in the [example for recording span
+	// exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
+	//
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+	// ExceptionMessageKey is the attribute Key conforming to the
+	// "exception.message" semantic conventions. It represents the exception
+	// message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Division by zero', "Can't convert 'int' object to str
+	// implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+
+	// ExceptionStacktraceKey is the attribute Key conforming to the
+	// "exception.stacktrace" semantic conventions. It represents a stacktrace
+	// as a string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+	// semantic conventions. It represents the type of the exception (its
+	// fully-qualified class name, if applicable). The dynamic type of the
+	// exception should be preferred over the static type in languages that
+	// support it.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+	return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+	return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+	return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+	return ExceptionTypeKey.String(val)
+}
+
+// FaaS attributes
+const (
+	// FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+	// semantic conventions. It represents a boolean that is true if the
+	// serverless function is executed for the first time (aka cold-start).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+	// FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+	// conventions. It represents a string containing the schedule period as
+	// [Cron
+	// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+
+	// FaaSDocumentCollectionKey is the attribute Key conforming to the
+	// "faas.document.collection" semantic conventions. It represents the name
+	// of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in
+	// Cosmos DB to the database name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+	// FaaSDocumentNameKey is the attribute Key conforming to the
+	// "faas.document.name" semantic conventions. It represents the document
+	// name/table subjected to the operation. For example, in Cloud Storage or
+	// S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+	// FaaSDocumentOperationKey is the attribute Key conforming to the
+	// "faas.document.operation" semantic conventions. It represents the
+	// describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+	// FaaSDocumentTimeKey is the attribute Key conforming to the
+	// "faas.document.time" semantic conventions. It represents a string
+	// containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+	// FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+	// semantic conventions. It represents the execution environment ID as a
+	// string, that will be potentially reused for other invocations to the
+	// same function/function version.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+
+	// FaaSInvocationIDKey is the attribute Key conforming to the
+	// "faas.invocation_id" semantic conventions. It represents the invocation
+	// ID of the current function invocation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+	// FaaSInvokedNameKey is the attribute Key conforming to the
+	// "faas.invoked_name" semantic conventions. It represents the name of the
+	// invoked function.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the
+	// invoked function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+	// FaaSInvokedProviderKey is the attribute Key conforming to the
+	// "faas.invoked_provider" semantic conventions. It represents the cloud
+	// provider of the invoked function.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+	// invoked function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+	// FaaSInvokedRegionKey is the attribute Key conforming to the
+	// "faas.invoked_region" semantic conventions. It represents the cloud
+	// region of the invoked function.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the
+	// invoked function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+	// FaaSMaxMemoryKey is the attribute Key conforming to the
+	// "faas.max_memory" semantic conventions. It represents the amount of
+	// memory available to the serverless function converted to Bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 134217728
+	// Note: It's recommended to set this attribute since e.g. too little
+	// memory can easily stop a Java AWS Lambda function from working
+	// correctly. On AWS Lambda, the environment variable
+	// `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+	// be multiplied by 1,048,576).
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+	// FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+	// conventions. It represents the name of the single function that this
+	// runtime instance executes.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+	// Note: This is the name of the function as configured/deployed on the
+	// FaaS
+	// platform and is usually different from the name of the callback
+	// function (which may be stored in the
+	// [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+	// span attributes).
+	//
+	// For some cloud providers, the above definition is ambiguous. The
+	// following
+	// definition of function name MUST be used for this attribute
+	// (and consequently the span name) for the listed cloud
+	// providers/products:
+	//
+	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+	//   followed by a forward slash followed by the function name (this form
+	//   can also be seen in the resource JSON for the function).
+	//   This means that a span attribute MUST be used, as an Azure function
+	//   app can host multiple functions that would usually share
+	//   a TracerProvider (see also the `cloud.resource_id` attribute).
+	FaaSNameKey = attribute.Key("faas.name")
+
+	// FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+	// conventions. It represents a string containing the function invocation
+	// time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+
+	// FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+	// semantic conventions. It represents the type of the trigger which caused
+	// this function invocation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+
+	// FaaSVersionKey is the attribute Key conforming to the "faas.version"
+	// semantic conventions. It represents the immutable version of the
+	// function being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+	//
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run (Services):** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+	return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+	return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+	return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+	return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+	return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+	return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+	return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+	return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+	return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+	return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+	return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+	return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+	return FaaSVersionKey.String(val)
+}
+
+// Attributes for Feature Flags.
+const (
+	// FeatureFlagKeyKey is the attribute Key conforming to the
+	// "feature_flag.key" semantic conventions. It represents the unique
+	// identifier of the feature flag.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'logo-color'
+	FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+	// FeatureFlagProviderNameKey is the attribute Key conforming to the
+	// "feature_flag.provider_name" semantic conventions. It represents the
+	// name of the service provider that performs the flag evaluation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Flag Manager'
+	FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+	// FeatureFlagVariantKey is the attribute Key conforming to the
+	// "feature_flag.variant" semantic conventions. It represents the sHOULD be
+	// a semantic identifier for a value. If one is unavailable, a stringified
+	// version of the value can be used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'red', 'true', 'on'
+	// Note: A semantic identifier, commonly referred to as a variant, provides
+	// a means
+	// for referring to a value without including the value itself. This can
+	// provide additional context for understanding the meaning behind a value.
+	// For example, the variant `red` maybe be used for the value `#c05543`.
+	//
+	// A stringified version of the value can be used in situations where a
+	// semantic identifier is unavailable. String representation of the value
+	// should be determined by the implementer.
+	FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+	return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+	return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+	return FeatureFlagVariantKey.String(val)
+}
+
+// Describes file attributes.
+const (
+	// FileDirectoryKey is the attribute Key conforming to the "file.directory"
+	// semantic conventions. It represents the directory where the file is
+	// located. It should include the drive letter, when appropriate.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/home/user', 'C:\\Program Files\\MyApp'
+	FileDirectoryKey = attribute.Key("file.directory")
+
+	// FileExtensionKey is the attribute Key conforming to the "file.extension"
+	// semantic conventions. It represents the file extension, excluding the
+	// leading dot.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'png', 'gz'
+	// Note: When the file name has multiple extensions (example.tar.gz), only
+	// the last one should be captured ("gz", not "tar.gz").
+	FileExtensionKey = attribute.Key("file.extension")
+
+	// FileNameKey is the attribute Key conforming to the "file.name" semantic
+	// conventions. It represents the name of the file including the extension,
+	// without the directory.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'example.png'
+	FileNameKey = attribute.Key("file.name")
+
+	// FilePathKey is the attribute Key conforming to the "file.path" semantic
+	// conventions. It represents the full path to the file, including the file
+	// name. It should include the drive letter, when appropriate.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/home/alice/example.png', 'C:\\Program
+	// Files\\MyApp\\myapp.exe'
+	FilePathKey = attribute.Key("file.path")
+
+	// FileSizeKey is the attribute Key conforming to the "file.size" semantic
+	// conventions. It represents the file size in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FileSizeKey = attribute.Key("file.size")
+)
+
+// FileDirectory returns an attribute KeyValue conforming to the
+// "file.directory" semantic conventions. It represents the directory where the
+// file is located. It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+	return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the
+// "file.extension" semantic conventions. It represents the file extension,
+// excluding the leading dot.
+func FileExtension(val string) attribute.KeyValue {
+	return FileExtensionKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name"
+// semantic conventions. It represents the name of the file including the
+// extension, without the directory.
+func FileName(val string) attribute.KeyValue {
+	return FileNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path"
+// semantic conventions. It represents the full path to the file, including the
+// file name. It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+	return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size"
+// semantic conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+	return FileSizeKey.Int(val)
+}
+
+// Attributes for Google Cloud Run.
+const (
+	// GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.execution" semantic conventions. It represents the
+	// name of the Cloud Run
+	// [execution](https://cloud.google.com/run/docs/managing/job-executions)
+	// being run for the Job, as set by the
+	// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'job-name-xxxx', 'sample-job-mdw84'
+	GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+	// GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+	// index for a task within an execution as provided by the
+	// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 1
+	GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+	return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+	return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Attributes for Google Compute Engine (GCE).
+const (
+	// GCPGceInstanceHostnameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.hostname" semantic conventions. It represents the
+	// hostname of a GCE instance. This is the full value of the default or
+	// [custom
+	// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-host1234.example.com',
+	// 'sample-vm.us-west1-b.c.my-project.internal'
+	GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+	// GCPGceInstanceNameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.name" semantic conventions. It represents the instance
+	// name of a GCE instance. This is the value provided by `host.name`, the
+	// visible name of the instance in the Cloud Console UI, and the prefix for
+	// the default hostname of the instance as defined by the [default internal
+	// DNS
+	// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'instance-1', 'my-vm-name'
+	GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+	return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+	return GCPGceInstanceNameKey.String(val)
+}
+
+// The attributes used to describe telemetry in the context of LLM (Large
+// Language Models) requests and responses.
+const (
+	// GenAiCompletionKey is the attribute Key conforming to the
+	// "gen_ai.completion" semantic conventions. It represents the full
+	// response received from the LLM.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: "[{'role': 'assistant', 'content': 'The capital of France is
+	// Paris.'}]"
+	// Note: It's RECOMMENDED to format completions as JSON string matching
+	// [OpenAI messages
+	// format](https://platform.openai.com/docs/guides/text-generation)
+	GenAiCompletionKey = attribute.Key("gen_ai.completion")
+
+	// GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
+	// semantic conventions. It represents the full prompt sent to an LLM.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: "[{'role': 'user', 'content': 'What is the capital of
+	// France?'}]"
+	// Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
+	// messages
+	// format](https://platform.openai.com/docs/guides/text-generation)
+	GenAiPromptKey = attribute.Key("gen_ai.prompt")
+
+	// GenAiRequestMaxTokensKey is the attribute Key conforming to the
+	// "gen_ai.request.max_tokens" semantic conventions. It represents the
+	// maximum number of tokens the LLM generates for a request.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 100
+	GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+	// GenAiRequestModelKey is the attribute Key conforming to the
+	// "gen_ai.request.model" semantic conventions. It represents the name of
+	// the LLM a request is being made to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'gpt-4'
+	GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
+
+	// GenAiRequestTemperatureKey is the attribute Key conforming to the
+	// "gen_ai.request.temperature" semantic conventions. It represents the
+	// temperature setting for the LLM request.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0.0
+	GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+	// GenAiRequestTopPKey is the attribute Key conforming to the
+	// "gen_ai.request.top_p" semantic conventions. It represents the top_p
+	// sampling setting for the LLM request.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0
+	GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+	// GenAiResponseFinishReasonsKey is the attribute Key conforming to the
+	// "gen_ai.response.finish_reasons" semantic conventions. It represents the
+	// array of reasons the model stopped generating tokens, corresponding to
+	// each generation received.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'stop'
+	GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+	// GenAiResponseIDKey is the attribute Key conforming to the
+	// "gen_ai.response.id" semantic conventions. It represents the unique
+	// identifier for the completion.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'chatcmpl-123'
+	GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
+
+	// GenAiResponseModelKey is the attribute Key conforming to the
+	// "gen_ai.response.model" semantic conventions. It represents the name of
+	// the LLM a response was generated from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'gpt-4-0613'
+	GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
+
+	// GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
+	// semantic conventions. It represents the Generative AI product as
+	// identified by the client instrumentation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'openai'
+	// Note: The actual GenAI product may differ from the one identified by the
+	// client. For example, when using OpenAI client libraries to communicate
+	// with Mistral, the `gen_ai.system` is set to `openai` based on the
+	// instrumentation's best knowledge.
+	GenAiSystemKey = attribute.Key("gen_ai.system")
+
+	// GenAiUsageCompletionTokensKey is the attribute Key conforming to the
+	// "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+	// number of tokens used in the LLM response (completion).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 180
+	GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
+
+	// GenAiUsagePromptTokensKey is the attribute Key conforming to the
+	// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
+	// number of tokens used in the LLM prompt.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 100
+	GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
+)
+
+var (
+	// OpenAI
+	GenAiSystemOpenai = GenAiSystemKey.String("openai")
+)
+
+// GenAiCompletion returns an attribute KeyValue conforming to the
+// "gen_ai.completion" semantic conventions. It represents the full response
+// received from the LLM.
+func GenAiCompletion(val string) attribute.KeyValue {
+	return GenAiCompletionKey.String(val)
+}
+
+// GenAiPrompt returns an attribute KeyValue conforming to the
+// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
+// an LLM.
+func GenAiPrompt(val string) attribute.KeyValue {
+	return GenAiPromptKey.String(val)
+}
+
+// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the LLM generates for a request.
+func GenAiRequestMaxTokens(val int) attribute.KeyValue {
+	return GenAiRequestMaxTokensKey.Int(val)
+}
+
+// GenAiRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// LLM a request is being made to.
+func GenAiRequestModel(val string) attribute.KeyValue {
+	return GenAiRequestModelKey.String(val)
+}
+
+// GenAiRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the LLM request.
+func GenAiRequestTemperature(val float64) attribute.KeyValue {
+	return GenAiRequestTemperatureKey.Float64(val)
+}
+
+// GenAiRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p
+// sampling setting for the LLM request.
+func GenAiRequestTopP(val float64) attribute.KeyValue {
+	return GenAiRequestTopPKey.Float64(val)
+}
+
+// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
+// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
+// array of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
+	return GenAiResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAiResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique
+// identifier for the completion.
+func GenAiResponseID(val string) attribute.KeyValue {
+	return GenAiResponseIDKey.String(val)
+}
+
+// GenAiResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// LLM a response was generated from.
+func GenAiResponseModel(val string) attribute.KeyValue {
+	return GenAiResponseModelKey.String(val)
+}
+
+// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
+// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+// number of tokens used in the LLM response (completion).
+func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
+	return GenAiUsageCompletionTokensKey.Int(val)
+}
+
+// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
+// of tokens used in the LLM prompt.
+func GenAiUsagePromptTokens(val int) attribute.KeyValue {
+	return GenAiUsagePromptTokensKey.Int(val)
+}
+
+// Attributes for GraphQL.
+const (
+	// GraphqlDocumentKey is the attribute Key conforming to the
+	// "graphql.document" semantic conventions. It represents the GraphQL
+	// document being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+	// Note: The value may be sanitized to exclude sensitive information.
+	GraphqlDocumentKey = attribute.Key("graphql.document")
+
+	// GraphqlOperationNameKey is the attribute Key conforming to the
+	// "graphql.operation.name" semantic conventions. It represents the name of
+	// the operation being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'findBookByID'
+	GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+	// GraphqlOperationTypeKey is the attribute Key conforming to the
+	// "graphql.operation.type" semantic conventions. It represents the type of
+	// the operation being executed.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'query', 'mutation', 'subscription'
+	GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+	// GraphQL query
+	GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+	// GraphQL mutation
+	GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+	// GraphQL subscription
+	GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+	return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+	return GraphqlOperationNameKey.String(val)
+}
+
+// Attributes for the Android platform on which the Android application is
+// running.
+const (
+	// HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+	// semantic conventions. It represents the unique identifier for the
+	// application
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+	HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+	// HerokuReleaseCommitKey is the attribute Key conforming to the
+	// "heroku.release.commit" semantic conventions. It represents the commit
+	// hash for the current release
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+	HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+	// HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+	// "heroku.release.creation_timestamp" semantic conventions. It represents
+	// the time and date the release was created
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2022-10-23T18:00:42Z'
+	HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+	return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+	return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+	return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+	// HostArchKey is the attribute Key conforming to the "host.arch" semantic
+	// conventions. It represents the CPU architecture the host system is
+	// running on.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	HostArchKey = attribute.Key("host.arch")
+
+	// HostCPUCacheL2SizeKey is the attribute Key conforming to the
+	// "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+	// of level 2 memory cache available to the processor (in Bytes).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 12288000
+	HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+	// HostCPUFamilyKey is the attribute Key conforming to the
+	// "host.cpu.family" semantic conventions. It represents the family or
+	// generation of the CPU.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '6', 'PA-RISC 1.1e'
+	HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+	// HostCPUModelIDKey is the attribute Key conforming to the
+	// "host.cpu.model.id" semantic conventions. It represents the model
+	// identifier. It provides more granular information about the CPU,
+	// distinguishing it from other CPUs within the same family.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '6', '9000/778/B180L'
+	HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+	// HostCPUModelNameKey is the attribute Key conforming to the
+	// "host.cpu.model.name" semantic conventions. It represents the model
+	// designation of the processor.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+	HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+	// HostCPUSteppingKey is the attribute Key conforming to the
+	// "host.cpu.stepping" semantic conventions. It represents the stepping or
+	// core revisions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1', 'r1p1'
+	HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+	// HostCPUVendorIDKey is the attribute Key conforming to the
+	// "host.cpu.vendor.id" semantic conventions. It represents the processor
+	// manufacturer identifier. A maximum 12-character string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'GenuineIntel'
+	// Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+	// ID string in EBX, EDX and ECX registers. Writing these to memory in this
+	// order results in a 12-character string.
+	HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+	// HostIDKey is the attribute Key conforming to the "host.id" semantic
+	// conventions. It represents the unique host ID. For Cloud, this must be
+	// the instance_id assigned by the cloud provider. For non-containerized
+	// systems, this should be the `machine-id`. See the table below for the
+	// sources to use to determine the `machine-id` based on operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+	HostIDKey = attribute.Key("host.id")
+
+	// HostImageIDKey is the attribute Key conforming to the "host.image.id"
+	// semantic conventions. It represents the vM image ID or host OS image ID.
+	// For Cloud, this value is from the provider.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+
+	// HostImageNameKey is the attribute Key conforming to the
+	// "host.image.name" semantic conventions. It represents the name of the VM
+	// image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+
+	// HostImageVersionKey is the attribute Key conforming to the
+	// "host.image.version" semantic conventions. It represents the version
+	// string of the VM image or host OS as defined in [Version
+	// Attributes](/docs/resource/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+
+	// HostIPKey is the attribute Key conforming to the "host.ip" semantic
+	// conventions. It represents the available IP addresses of the host,
+	// excluding loopback interfaces.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+	// Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+	// addresses MUST be specified in the [RFC
+	// 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+	HostIPKey = attribute.Key("host.ip")
+
+	// HostMacKey is the attribute Key conforming to the "host.mac" semantic
+	// conventions. It represents the available MAC addresses of the host,
+	// excluding loopback interfaces.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+	// Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+	// form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+	// as hyphen-separated octets in uppercase hexadecimal form from most to
+	// least significant.
+	HostMacKey = attribute.Key("host.mac")
+
+	// HostNameKey is the attribute Key conforming to the "host.name" semantic
+	// conventions. It represents the name of the host. On Unix systems, it may
+	// contain what the hostname command returns, or the fully qualified
+	// hostname, or another name specified by the user.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+
+	// HostTypeKey is the attribute Key conforming to the "host.type" semantic
+	// conventions. It represents the type of host. For Cloud, this must be the
+	// machine type.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+	return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+	return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+	return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+	return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+	return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+	return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+	return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+	return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+	return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+	return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+	return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+	return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+	return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+	return HostTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+	// HTTPConnectionStateKey is the attribute Key conforming to the
+	// "http.connection.state" semantic conventions. It represents the state of
+	// the HTTP connection in the HTTP connection pool.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'active', 'idle'
+	HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+	// HTTPRequestBodySizeKey is the attribute Key conforming to the
+	// "http.request.body.size" semantic conventions. It represents the size of
+	// the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3495
+	HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+	// HTTPRequestMethodKey is the attribute Key conforming to the
+	// "http.request.method" semantic conventions. It represents the hTTP
+	// request method.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'GET', 'POST', 'HEAD'
+	// Note: HTTP request method value SHOULD be "known" to the
+	// instrumentation.
+	// By default, this convention defines "known" methods as the ones listed
+	// in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+	// and the PATCH method defined in
+	// [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+	//
+	// If the HTTP request method is not known to instrumentation, it MUST set
+	// the `http.request.method` attribute to `_OTHER`.
+	//
+	// If the HTTP instrumentation could end up converting valid HTTP request
+	// methods to `_OTHER`, then it MUST provide a way to override
+	// the list of known HTTP methods. If this override is done via environment
+	// variable, then the environment variable MUST be named
+	// OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+	// list of case-sensitive known HTTP methods
+	// (this list MUST be a full override of the default known method, it is
+	// not a list of known methods in addition to the defaults).
+	//
+	// HTTP method names are case-sensitive and `http.request.method` attribute
+	// value MUST match a known HTTP method name exactly.
+	// Instrumentations for specific web frameworks that consider HTTP methods
+	// to be case insensitive, SHOULD populate a canonical equivalent.
+	// Tracing instrumentations that do so, MUST also set
+	// `http.request.method_original` to the original value.
+	HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+	// HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+	// "http.request.method_original" semantic conventions. It represents the
+	// original HTTP method sent by the client in the request line.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'GeT', 'ACL', 'foo'
+	HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+	// HTTPRequestResendCountKey is the attribute Key conforming to the
+	// "http.request.resend_count" semantic conventions. It represents the
+	// ordinal number of request resending attempt (for any reason, including
+	// redirects).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3
+	// Note: The resend count SHOULD be updated each time an HTTP request gets
+	// resent by the client, regardless of what was the cause of the resending
+	// (e.g. redirection, authorization failure, 503 Server Unavailable,
+	// network issues, or any other).
+	HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+	// HTTPRequestSizeKey is the attribute Key conforming to the
+	// "http.request.size" semantic conventions. It represents the total size
+	// of the request in bytes. This should be the total number of bytes sent
+	// over the wire, including the request line (HTTP/1.1), framing (HTTP/2
+	// and HTTP/3), headers, and request body if any.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1437
+	HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+	// HTTPResponseBodySizeKey is the attribute Key conforming to the
+	// "http.response.body.size" semantic conventions. It represents the size
+	// of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3495
+	HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+	// HTTPResponseSizeKey is the attribute Key conforming to the
+	// "http.response.size" semantic conventions. It represents the total size
+	// of the response in bytes. This should be the total number of bytes sent
+	// over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
+	// HTTP/3), headers, and response body and trailers if any.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1437
+	HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+	// HTTPResponseStatusCodeKey is the attribute Key conforming to the
+	// "http.response.status_code" semantic conventions. It represents the
+	// [HTTP response status
+	// code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 200
+	HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+	// HTTPRouteKey is the attribute Key conforming to the "http.route"
+	// semantic conventions. It represents the matched route, that is, the path
+	// template in the format used by the respective server framework.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+	// Note: MUST NOT be populated when this is not supported by the HTTP
+	// server framework as the route attribute should have low-cardinality and
+	// the URI path can NOT substitute it.
+	// SHOULD include the [application
+	// root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+	HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+	// active state
+	HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+	// idle state
+	HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+var (
+	// CONNECT method
+	HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+	// DELETE method
+	HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+	// GET method
+	HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+	// HEAD method
+	HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+	// OPTIONS method
+	HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+	// PATCH method
+	HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+	// POST method
+	HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+	// PUT method
+	HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+	// TRACE method
+	HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+	// Any HTTP method that the instrumentation has no prior knowledge of
+	HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+	return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+	return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+	return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of
+// the request in bytes. This should be the total number of bytes sent over the
+// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+	return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+	return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of
+// the response in bytes. This should be the total number of bytes sent over
+// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+	return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+	return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+	return HTTPRouteKey.String(val)
+}
+
+// Java Virtual machine related attributes.
+const (
+	// JvmBufferPoolNameKey is the attribute Key conforming to the
+	// "jvm.buffer.pool.name" semantic conventions. It represents the name of
+	// the buffer pool.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mapped', 'direct'
+	// Note: Pool names are generally obtained via
+	// [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+	JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+
+	// JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
+	// semantic conventions. It represents the name of the garbage collector
+	// action.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'end of minor GC', 'end of major GC'
+	// Note: Garbage collector action is generally obtained via
+	// [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
+	JvmGcActionKey = attribute.Key("jvm.gc.action")
+
+	// JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
+	// semantic conventions. It represents the name of the garbage collector.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'G1 Young Generation', 'G1 Old Generation'
+	// Note: Garbage collector name is generally obtained via
+	// [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
+	JvmGcNameKey = attribute.Key("jvm.gc.name")
+
+	// JvmMemoryPoolNameKey is the attribute Key conforming to the
+	// "jvm.memory.pool.name" semantic conventions. It represents the name of
+	// the memory pool.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+	// Note: Pool names are generally obtained via
+	// [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+	JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+	// JvmMemoryTypeKey is the attribute Key conforming to the
+	// "jvm.memory.type" semantic conventions. It represents the type of
+	// memory.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'heap', 'non_heap'
+	JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+
+	// JvmThreadDaemonKey is the attribute Key conforming to the
+	// "jvm.thread.daemon" semantic conventions. It represents the whether the
+	// thread is daemon or not.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
+
+	// JvmThreadStateKey is the attribute Key conforming to the
+	// "jvm.thread.state" semantic conventions. It represents the state of the
+	// thread.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'runnable', 'blocked'
+	JvmThreadStateKey = attribute.Key("jvm.thread.state")
+)
+
+var (
+	// Heap memory
+	JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+	// Non-heap memory
+	JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+var (
+	// A thread that has not yet started is in this state
+	JvmThreadStateNew = JvmThreadStateKey.String("new")
+	// A thread executing in the Java virtual machine is in this state
+	JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
+	// A thread that is blocked waiting for a monitor lock is in this state
+	JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
+	// A thread that is waiting indefinitely for another thread to perform a particular action is in this state
+	JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
+	// A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
+	JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
+	// A thread that has exited is in this state
+	JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+	return JvmBufferPoolNameKey.String(val)
+}
+
+// JvmGcAction returns an attribute KeyValue conforming to the
+// "jvm.gc.action" semantic conventions. It represents the name of the garbage
+// collector action.
+func JvmGcAction(val string) attribute.KeyValue {
+	return JvmGcActionKey.String(val)
+}
+
+// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
+// semantic conventions. It represents the name of the garbage collector.
+func JvmGcName(val string) attribute.KeyValue {
+	return JvmGcNameKey.String(val)
+}
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+	return JvmMemoryPoolNameKey.String(val)
+}
+
+// JvmThreadDaemon returns an attribute KeyValue conforming to the
+// "jvm.thread.daemon" semantic conventions. It represents the whether the
+// thread is daemon or not.
+func JvmThreadDaemon(val bool) attribute.KeyValue {
+	return JvmThreadDaemonKey.Bool(val)
+}
+
+// Kubernetes resource attributes.
+const (
+	// K8SClusterNameKey is the attribute Key conforming to the
+	// "k8s.cluster.name" semantic conventions. It represents the name of the
+	// cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+	// K8SClusterUIDKey is the attribute Key conforming to the
+	// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+	// the cluster, set to the UID of the `kube-system` namespace.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+	// Note: K8S doesn't have support for obtaining a cluster ID. If this is
+	// ever
+	// added, we will recommend collecting the `k8s.cluster.uid` through the
+	// official APIs. In the meantime, we are able to use the `uid` of the
+	// `kube-system` namespace as a proxy for cluster ID. Read on for the
+	// rationale.
+	//
+	// Every object created in a K8S cluster is assigned a distinct UID. The
+	// `kube-system` namespace is used by Kubernetes itself and will exist
+	// for the lifetime of the cluster. Using the `uid` of the `kube-system`
+	// namespace is a reasonable proxy for the K8S ClusterID as it will only
+	// change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+	// UUIDs as standardized by
+	// [ISO/IEC 9834-8 and ITU-T
+	// X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+	// Which states:
+	//
+	// > If generated according to one of the mechanisms defined in Rec.
+	//   ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+	//   different from all other UUIDs generated before 3603 A.D., or is
+	//   extremely likely to be different (depending on the mechanism chosen).
+	//
+	// Therefore, UIDs between clusters should be extremely unlikely to
+	// conflict.
+	K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+	// K8SContainerNameKey is the attribute Key conforming to the
+	// "k8s.container.name" semantic conventions. It represents the name of the
+	// Container from Pod specification, must be unique within a Pod. Container
+	// runtime usually uses different globally unique name (`container.name`).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+	// K8SContainerRestartCountKey is the attribute Key conforming to the
+	// "k8s.container.restart_count" semantic conventions. It represents the
+	// number of times the container was restarted. This attribute can be used
+	// to identify a particular container (running or stopped) within a
+	// container spec.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+	// K8SContainerStatusLastTerminatedReasonKey is the attribute Key
+	// conforming to the "k8s.container.status.last_terminated_reason" semantic
+	// conventions. It represents the last terminated reason of the Container.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Evicted', 'Error'
+	K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+	// K8SCronJobNameKey is the attribute Key conforming to the
+	// "k8s.cronjob.name" semantic conventions. It represents the name of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+	// K8SCronJobUIDKey is the attribute Key conforming to the
+	// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+	// K8SDaemonSetNameKey is the attribute Key conforming to the
+	// "k8s.daemonset.name" semantic conventions. It represents the name of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+	// K8SDaemonSetUIDKey is the attribute Key conforming to the
+	// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+	// K8SDeploymentNameKey is the attribute Key conforming to the
+	// "k8s.deployment.name" semantic conventions. It represents the name of
+	// the Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+	// K8SDeploymentUIDKey is the attribute Key conforming to the
+	// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+	// Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+	// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+	// semantic conventions. It represents the name of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+
+	// K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+	// semantic conventions. It represents the UID of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+	// K8SNamespaceNameKey is the attribute Key conforming to the
+	// "k8s.namespace.name" semantic conventions. It represents the name of the
+	// namespace that the pod is running in.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+	// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+	// semantic conventions. It represents the name of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+	// K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+	// semantic conventions. It represents the UID of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+	// K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+	// semantic conventions. It represents the name of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+	// K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+	// semantic conventions. It represents the UID of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+	// K8SReplicaSetNameKey is the attribute Key conforming to the
+	// "k8s.replicaset.name" semantic conventions. It represents the name of
+	// the ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+	// K8SReplicaSetUIDKey is the attribute Key conforming to the
+	// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+	// ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+	// K8SStatefulSetNameKey is the attribute Key conforming to the
+	// "k8s.statefulset.name" semantic conventions. It represents the name of
+	// the StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+	// K8SStatefulSetUIDKey is the attribute Key conforming to the
+	// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+	// StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+	return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+	return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+	return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+	return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+	return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+	return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+	return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+	return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+	return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+	return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+	return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+	return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+	return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+	return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+	return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+	return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+	return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+	return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+	return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+	return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+	return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+	return K8SStatefulSetUIDKey.String(val)
+}
+
+// Log attributes
+const (
+	// LogIostreamKey is the attribute Key conforming to the "log.iostream"
+	// semantic conventions. It represents the stream associated with the log.
+	// See below for a list of well-known values.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+	// Logs from stdout stream
+	LogIostreamStdout = LogIostreamKey.String("stdout")
+	// Events from stderr stream
+	LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Attributes for a file to which log was emitted.
+const (
+	// LogFileNameKey is the attribute Key conforming to the "log.file.name"
+	// semantic conventions. It represents the basename of the file.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'audit.log'
+	LogFileNameKey = attribute.Key("log.file.name")
+
+	// LogFileNameResolvedKey is the attribute Key conforming to the
+	// "log.file.name_resolved" semantic conventions. It represents the
+	// basename of the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'uuid.log'
+	LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+	// LogFilePathKey is the attribute Key conforming to the "log.file.path"
+	// semantic conventions. It represents the full path to the file.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/var/log/mysql/audit.log'
+	LogFilePathKey = attribute.Key("log.file.path")
+
+	// LogFilePathResolvedKey is the attribute Key conforming to the
+	// "log.file.path_resolved" semantic conventions. It represents the full
+	// path to the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/var/lib/docker/uuid.log'
+	LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+	return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+	return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+	return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+	return LogFilePathResolvedKey.String(val)
+}
+
+// The generic attributes that may be used in any Log Record.
+const (
+	// LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+	// semantic conventions. It represents a unique identifier for the Log
+	// Record.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+	// Note: If an id is provided, other log records with the same id will be
+	// considered duplicates and can be removed safely. This means, that two
+	// distinguishable log records MUST have different values.
+	// The id MAY be an [Universally Unique Lexicographically Sortable
+	// Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+	// (e.g. UUID) may be used as needed.
+	LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+	return LogRecordUIDKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+	// MessagingBatchMessageCountKey is the attribute Key conforming to the
+	// "messaging.batch.message_count" semantic conventions. It represents the
+	// number of messages sent, received, or processed in the scope of the
+	// batching operation.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 1, 2
+	// Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+	// spans that operate with a single message. When a messaging client
+	// library supports both batch and single-message API for the same
+	// operation, instrumentations SHOULD use `messaging.batch.message_count`
+	// for batching APIs and SHOULD NOT use it for single-message APIs.
+	MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+	// MessagingClientIDKey is the attribute Key conforming to the
+	// "messaging.client.id" semantic conventions. It represents a unique
+	// identifier for the client that consumes or produces a message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'client-5', 'myhost@8742@s8083jm'
+	MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+	// MessagingDestinationAnonymousKey is the attribute Key conforming to the
+	// "messaging.destination.anonymous" semantic conventions. It represents a
+	// boolean that is true if the message destination is anonymous (could be
+	// unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+	// MessagingDestinationNameKey is the attribute Key conforming to the
+	// "messaging.destination.name" semantic conventions. It represents the
+	// message destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: Destination name SHOULD uniquely identify a specific queue, topic
+	// or other entity within the broker. If
+	// the broker doesn't have such notion, the destination name SHOULD
+	// uniquely identify the broker.
+	MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+	// MessagingDestinationPartitionIDKey is the attribute Key conforming to
+	// the "messaging.destination.partition.id" semantic conventions. It
+	// represents the identifier of the partition messages are sent to or
+	// received from, unique within the `messaging.destination.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1'
+	MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+	// MessagingDestinationTemplateKey is the attribute Key conforming to the
+	// "messaging.destination.template" semantic conventions. It represents the
+	// low cardinality representation of the messaging destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/customers/{customerID}'
+	// Note: Destination names could be constructed from templates. An example
+	// would be a destination name involving a user name or product id.
+	// Although the destination name in this case is of high cardinality, the
+	// underlying template is of low cardinality and can be effectively used
+	// for grouping and aggregation.
+	MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+	// MessagingDestinationTemporaryKey is the attribute Key conforming to the
+	// "messaging.destination.temporary" semantic conventions. It represents a
+	// boolean that is true if the message destination is temporary and might
+	// not exist anymore after messages are processed.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+	// MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+	// to the "messaging.destination_publish.anonymous" semantic conventions.
+	// It represents a boolean that is true if the publish message destination
+	// is anonymous (could be unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+	// MessagingDestinationPublishNameKey is the attribute Key conforming to
+	// the "messaging.destination_publish.name" semantic conventions. It
+	// represents the name of the original destination the message was
+	// published to
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: The name SHOULD uniquely identify a specific queue, topic, or
+	// other entity within the broker. If
+	// the broker doesn't have such notion, the original destination name
+	// SHOULD uniquely identify the broker.
+	MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+	// MessagingMessageBodySizeKey is the attribute Key conforming to the
+	// "messaging.message.body.size" semantic conventions. It represents the
+	// size of the message body in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1439
+	// Note: This can refer to both the compressed or uncompressed body size.
+	// If both sizes are known, the uncompressed
+	// body size should be used.
+	MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+	// MessagingMessageConversationIDKey is the attribute Key conforming to the
+	// "messaging.message.conversation_id" semantic conventions. It represents
+	// the conversation ID identifying the conversation to which the message
+	// belongs, represented as a string. Sometimes called "Correlation ID".
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyConversationID'
+	MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+	// MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+	// "messaging.message.envelope.size" semantic conventions. It represents
+	// the size of the message body and metadata in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2738
+	// Note: This can refer to both the compressed or uncompressed size. If
+	// both sizes are known, the uncompressed
+	// size should be used.
+	MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+	// MessagingMessageIDKey is the attribute Key conforming to the
+	// "messaging.message.id" semantic conventions. It represents a value used
+	// by the messaging system as an identifier for the message, represented as
+	// a string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+	// MessagingOperationNameKey is the attribute Key conforming to the
+	// "messaging.operation.name" semantic conventions. It represents the
+	// system-specific name of the messaging operation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ack', 'nack', 'send'
+	MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+	// MessagingOperationTypeKey is the attribute Key conforming to the
+	// "messaging.operation.type" semantic conventions. It represents a string
+	// identifying the type of the messaging operation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: If a custom value is used, it MUST be of low cardinality.
+	MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+	// MessagingSystemKey is the attribute Key conforming to the
+	// "messaging.system" semantic conventions. It represents the messaging
+	// system as identified by the client instrumentation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The actual messaging system may differ from the one known by the
+	// client. For example, when using Kafka client libraries to communicate
+	// with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
+	// the instrumentation's best knowledge.
+	MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+	// One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+	MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+	// A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+	MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+	// One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+	MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+	// One or more messages are delivered to or processed by a consumer
+	MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
+	// One or more messages are settled
+	MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+var (
+	// Apache ActiveMQ
+	MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+	// Amazon Simple Queue Service (SQS)
+	MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+	// Azure Event Grid
+	MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
+	// Azure Event Hubs
+	MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
+	// Azure Service Bus
+	MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
+	// Google Cloud Pub/Sub
+	MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+	// Java Message Service
+	MessagingSystemJms = MessagingSystemKey.String("jms")
+	// Apache Kafka
+	MessagingSystemKafka = MessagingSystemKey.String("kafka")
+	// RabbitMQ
+	MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+	// Apache RocketMQ
+	MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+	return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+	return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+	return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming
+// to the "messaging.destination.partition.id" semantic conventions. It
+// represents the identifier of the partition messages are sent to or received
+// from, unique within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+	return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+	return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+	return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+	return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+	return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+	return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+	return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+	return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+	return MessagingOperationNameKey.String(val)
+}
+
+// This group describes attributes specific to Apache Kafka.
+const (
+	// MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+	// "messaging.kafka.consumer.group" semantic conventions. It represents the
+	// name of the Kafka Consumer Group that is handling the message. Only
+	// applies to consumers, not producers.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+	// MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+	// "messaging.kafka.message.key" semantic conventions. It represents the
+	// message keys in Kafka are used for grouping alike messages to ensure
+	// they're processed on the same partition. They differ from
+	// `messaging.message.id` in that they're not unique. If the key is `null`,
+	// the attribute MUST NOT be set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to
+	// be supplied for the attribute. If the key has no unambiguous, canonical
+	// string form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+	// MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+	// "messaging.kafka.message.offset" semantic conventions. It represents the
+	// offset of a record in the corresponding Kafka partition.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+	// MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+	// "messaging.kafka.message.tombstone" semantic conventions. It represents
+	// a boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+	return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+	return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+	return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+	return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// This group describes attributes specific to RabbitMQ.
+const (
+	// MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+	// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+	// conventions. It represents the rabbitMQ message routing key.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myKey'
+	MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+	// MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
+	// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
+	// It represents the rabbitMQ message delivery tag
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 123
+	MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+	return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
+// conventions. It represents the rabbitMQ message delivery tag
+func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
+	return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
+}
+
+// This group describes attributes specific to RocketMQ.
+const (
+	// MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.client_group" semantic conventions. It represents
+	// the name of the RocketMQ producer/consumer group that is handling the
+	// message. The client type is identified by the SpanKind.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+	// MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+	// the "messaging.rocketmq.consumption_model" semantic conventions. It
+	// represents the model of message consumption. This only applies to
+	// consumer spans.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+	// MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+	// conventions. It represents the delay time level for delay message, which
+	// determines the message delay time.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3
+	MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+	// MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delivery_timestamp"
+	// semantic conventions. It represents the timestamp in milliseconds that
+	// the delay message is expected to be delivered to consumer.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1665987217045
+	MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+	// MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.group" semantic conventions. It represents
+	// the it is essential for FIFO message. Messages that belong to the same
+	// message group are always processed one by one within the same consumer
+	// group.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myMessageGroup'
+	MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+	// MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.keys" semantic conventions. It represents
+	// the key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+	// MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+	// secondary classifier of message besides topic.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+	// MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.type" semantic conventions. It represents
+	// the type of message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+	// MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+	// "messaging.rocketmq.namespace" semantic conventions. It represents the
+	// namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+	return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+	return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// This group describes attributes specific to GCP Pub/Sub.
+const (
+	// MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
+	// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
+	// It represents the ack deadline in seconds set for the modify ack
+	// deadline request.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+	// MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
+	// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+	// represents the ack id for a given message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ack_id'
+	MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+	// MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
+	// conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
+	// semantic conventions. It represents the delivery attempt for a given
+	// message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2
+	MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+	// MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+	// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+	// It represents the ordering key for a given message. If the attribute is
+	// not present, the message does not have an ordering key.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ordering_key'
+	MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+)
+
+// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
+// conventions. It represents the ack deadline in seconds set for the modify
+// ack deadline request.
+func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
+	return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+// represents the ack id for a given message.
+func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
+	return MessagingGCPPubsubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
+	return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+	return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// This group describes attributes specific to Azure Service Bus.
+const (
+	// MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
+	// conforming to the "messaging.servicebus.destination.subscription_name"
+	// semantic conventions. It represents the name of the subscription in the
+	// topic messages are received from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mySubscription'
+	MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
+
+	// MessagingServicebusDispositionStatusKey is the attribute Key conforming
+	// to the "messaging.servicebus.disposition_status" semantic conventions.
+	// It represents the describes the [settlement
+	// type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+	// MessagingServicebusMessageDeliveryCountKey is the attribute Key
+	// conforming to the "messaging.servicebus.message.delivery_count" semantic
+	// conventions. It represents the number of deliveries that have been
+	// attempted for this message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2
+	MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+	// MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
+	// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+	// conventions. It represents the UTC epoch seconds at which the message
+	// has been accepted and stored in the entity.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1701393730
+	MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+)
+
+var (
+	// Message is completed
+	MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
+	// Message is abandoned
+	MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
+	// Message is sent to dead letter queue
+	MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
+	// Message is deferred
+	MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
+)
+
+// MessagingServicebusDestinationSubscriptionName returns an attribute
+// KeyValue conforming to the
+// "messaging.servicebus.destination.subscription_name" semantic conventions.
+// It represents the name of the subscription in the topic messages are
+// received from.
+func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
+	return MessagingServicebusDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
+	return MessagingServicebusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
+	return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
+}
+
+// This group describes attributes specific to Azure Event Hubs.
+const (
+	// MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
+	// the "messaging.eventhubs.consumer.group" semantic conventions. It
+	// represents the name of the consumer group the event consumer is
+	// associated with.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'indexer'
+	MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
+
+	// MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
+	// to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
+	// It represents the UTC epoch seconds at which the message has been
+	// accepted and stored in the entity.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1701393730
+	MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+)
+
+// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.consumer.group" semantic conventions. It
+// represents the name of the consumer group the event consumer is associated
+// with.
+func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
+	return MessagingEventhubsConsumerGroupKey.String(val)
+}
+
+// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
+	return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetworkCarrierIccKey is the attribute Key conforming to the
+	// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+	// alpha-2 2-character country code associated with the mobile carrier
+	// network.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'DE'
+	NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+	// NetworkCarrierMccKey is the attribute Key conforming to the
+	// "network.carrier.mcc" semantic conventions. It represents the mobile
+	// carrier country code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '310'
+	NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+	// NetworkCarrierMncKey is the attribute Key conforming to the
+	// "network.carrier.mnc" semantic conventions. It represents the mobile
+	// carrier network code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '001'
+	NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+	// NetworkCarrierNameKey is the attribute Key conforming to the
+	// "network.carrier.name" semantic conventions. It represents the name of
+	// the mobile carrier.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'sprint'
+	NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+	// NetworkConnectionSubtypeKey is the attribute Key conforming to the
+	// "network.connection.subtype" semantic conventions. It represents the
+	// this describes more details regarding the connection.type. It may be the
+	// type of cell technology connection, but it could be used for describing
+	// details about a wifi connection.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'LTE'
+	NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+	// NetworkConnectionTypeKey is the attribute Key conforming to the
+	// "network.connection.type" semantic conventions. It represents the
+	// internet connection type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'wifi'
+	NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+	// NetworkIoDirectionKey is the attribute Key conforming to the
+	// "network.io.direction" semantic conventions. It represents the network
+	// IO operation direction.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'transmit'
+	NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+	// NetworkLocalAddressKey is the attribute Key conforming to the
+	// "network.local.address" semantic conventions. It represents the local
+	// address of the network connection - IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10.1.2.80', '/tmp/my.sock'
+	NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+	// NetworkLocalPortKey is the attribute Key conforming to the
+	// "network.local.port" semantic conventions. It represents the local port
+	// number of the network connection.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	NetworkLocalPortKey = attribute.Key("network.local.port")
+
+	// NetworkPeerAddressKey is the attribute Key conforming to the
+	// "network.peer.address" semantic conventions. It represents the peer
+	// address of the network connection - IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10.1.2.80', '/tmp/my.sock'
+	NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+	// NetworkPeerPortKey is the attribute Key conforming to the
+	// "network.peer.port" semantic conventions. It represents the peer port
+	// number of the network connection.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 65123
+	NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+	// NetworkProtocolNameKey is the attribute Key conforming to the
+	// "network.protocol.name" semantic conventions. It represents the [OSI
+	// application layer](https://osi-model.com/application-layer/) or non-OSI
+	// equivalent.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'amqp', 'http', 'mqtt'
+	// Note: The value SHOULD be normalized to lowercase.
+	NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+	// NetworkProtocolVersionKey is the attribute Key conforming to the
+	// "network.protocol.version" semantic conventions. It represents the
+	// actual version of the protocol used for network communication.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.1', '2'
+	// Note: If protocol version is subject to negotiation (for example using
+	// [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
+	// SHOULD be set to the negotiated version. If the actual protocol version
+	// is not known, this attribute SHOULD NOT be set.
+	NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+	// NetworkTransportKey is the attribute Key conforming to the
+	// "network.transport" semantic conventions. It represents the [OSI
+	// transport layer](https://osi-model.com/transport-layer/) or
+	// [inter-process communication
+	// method](https://wikipedia.org/wiki/Inter-process_communication).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'tcp', 'udp'
+	// Note: The value SHOULD be normalized to lowercase.
+	//
+	// Consider always setting the transport when setting a port number, since
+	// a port number is ambiguous without knowing the transport. For example
+	// different processes could be listening on TCP port 12345 and UDP port
+	// 12345.
+	NetworkTransportKey = attribute.Key("network.transport")
+
+	// NetworkTypeKey is the attribute Key conforming to the "network.type"
+	// semantic conventions. It represents the [OSI network
+	// layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ipv4', 'ipv6'
+	// Note: The value SHOULD be normalized to lowercase.
+	NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+	// GPRS
+	NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+	// wifi
+	NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+	// wired
+	NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+	// cell
+	NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+	// unavailable
+	NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+	// unknown
+	NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+	// transmit
+	NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+	// receive
+	NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+	// TCP
+	NetworkTransportTCP = NetworkTransportKey.String("tcp")
+	// UDP
+	NetworkTransportUDP = NetworkTransportKey.String("udp")
+	// Named or anonymous pipe
+	NetworkTransportPipe = NetworkTransportKey.String("pipe")
+	// Unix domain socket
+	NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+	// IPv4
+	NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+	// IPv6
+	NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+	return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+	return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+	return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+	return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+	return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+	return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+	return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+	return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+	return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+	return NetworkProtocolVersionKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+	// OciManifestDigestKey is the attribute Key conforming to the
+	// "oci.manifest.digest" semantic conventions. It represents the digest of
+	// the OCI image manifest. For container images specifically is the digest
+	// by which the container image is known.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+	// Note: Follows [OCI Image Manifest
+	// Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+	// and specifically the [Digest
+	// property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+	// An example can be found in [Example Image
+	// Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+	OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+	return OciManifestDigestKey.String(val)
+}
+
+// Attributes used by the OpenTracing Shim layer.
+const (
+	// OpentracingRefTypeKey is the attribute Key conforming to the
+	// "opentracing.ref_type" semantic conventions. It represents the
+	// parent-child Reference type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span doesn't depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+	// OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+	// semantic conventions. It represents the unique identifier for a
+	// particular build or compilation of the operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+	OSBuildIDKey = attribute.Key("os.build_id")
+
+	// OSDescriptionKey is the attribute Key conforming to the "os.description"
+	// semantic conventions. It represents the human readable (not intended to
+	// be parsed) OS version information, like e.g. reported by `ver` or
+	// `lsb_release -a` commands.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+	// LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+
+	// OSNameKey is the attribute Key conforming to the "os.name" semantic
+	// conventions. It represents the human readable operating system name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+
+	// OSTypeKey is the attribute Key conforming to the "os.type" semantic
+	// conventions. It represents the operating system type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	OSTypeKey = attribute.Key("os.type")
+
+	// OSVersionKey is the attribute Key conforming to the "os.version"
+	// semantic conventions. It represents the version string of the operating
+	// system as defined in [Version
+	// Attributes](/docs/resource/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// SunOS, Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+	return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+	return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+	return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+	return OSVersionKey.String(val)
+}
+
+// Attributes reserved for OpenTelemetry
+const (
+	// OTelStatusCodeKey is the attribute Key conforming to the
+	// "otel.status_code" semantic conventions. It represents the name of the
+	// code, either "OK" or "ERROR". MUST NOT be set if the status code is
+	// UNSET.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+	// OTelStatusDescriptionKey is the attribute Key conforming to the
+	// "otel.status_description" semantic conventions. It represents the
+	// description of the Status if it has a value, otherwise not set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'resource not found'
+	OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+	// The operation has been validated by an Application developer or Operator to have completed successfully
+	OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+	// The operation contains an error
+	OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+	return OTelStatusDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+	// OTelScopeNameKey is the attribute Key conforming to the
+	// "otel.scope.name" semantic conventions. It represents the name of the
+	// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+	// OTelScopeVersionKey is the attribute Key conforming to the
+	// "otel.scope.version" semantic conventions. It represents the version of
+	// the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.0.0'
+	OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+	return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+	return OTelScopeVersionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+	// PeerServiceKey is the attribute Key conforming to the "peer.service"
+	// semantic conventions. It represents the
+	// [`service.name`](/docs/resource/README.md#service) of the remote
+	// service. SHOULD be equal to the actual `service.name` resource attribute
+	// of the remote service if any.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+	return PeerServiceKey.String(val)
+}
+
+// An operating system process.
+const (
+	// ProcessCommandKey is the attribute Key conforming to the
+	// "process.command" semantic conventions. It represents the command used
+	// to launch the process (i.e. the command name). On Linux based systems,
+	// can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+	// be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+
+	// ProcessCommandArgsKey is the attribute Key conforming to the
+	// "process.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited
+	// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+	// this would be the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+	// ProcessCommandLineKey is the attribute Key conforming to the
+	// "process.command_line" semantic conventions. It represents the full
+	// command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`.
+	// Do not set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+
+	// ProcessContextSwitchTypeKey is the attribute Key conforming to the
+	// "process.context_switch_type" semantic conventions. It represents the
+	// specifies whether the context switches for this data point were
+	// voluntary or involuntary.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+	// ProcessCreationTimeKey is the attribute Key conforming to the
+	// "process.creation.time" semantic conventions. It represents the date and
+	// time the process was created, in ISO 8601 format.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2023-11-21T09:25:34.853Z'
+	ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+	// ProcessExecutableNameKey is the attribute Key conforming to the
+	// "process.executable.name" semantic conventions. It represents the name
+	// of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+	// of `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+	// ProcessExecutablePathKey is the attribute Key conforming to the
+	// "process.executable.path" semantic conventions. It represents the full
+	// path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+	// ProcessExitCodeKey is the attribute Key conforming to the
+	// "process.exit.code" semantic conventions. It represents the exit code of
+	// the process.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 127
+	ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+	// ProcessExitTimeKey is the attribute Key conforming to the
+	// "process.exit.time" semantic conventions. It represents the date and
+	// time the process exited, in ISO 8601 format.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2023-11-21T09:26:12.315Z'
+	ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+	// ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+	// "process.group_leader.pid" semantic conventions. It represents the PID
+	// of the process's group leader. This is also the process group ID (PGID)
+	// of the process.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 23
+	ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+	// ProcessInteractiveKey is the attribute Key conforming to the
+	// "process.interactive" semantic conventions. It represents the whether
+	// the process is connected to an interactive shell.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	ProcessInteractiveKey = attribute.Key("process.interactive")
+
+	// ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+	// semantic conventions. It represents the username of the user that owns
+	// the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+
+	// ProcessPagingFaultTypeKey is the attribute Key conforming to the
+	// "process.paging.fault_type" semantic conventions. It represents the type
+	// of page fault for this data point. Type `major` is for major/hard page
+	// faults, and `minor` is for minor/soft page faults.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+	// ProcessParentPIDKey is the attribute Key conforming to the
+	// "process.parent_pid" semantic conventions. It represents the parent
+	// Process identifier (PPID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 111
+	ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+	// ProcessPIDKey is the attribute Key conforming to the "process.pid"
+	// semantic conventions. It represents the process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+
+	// ProcessRealUserIDKey is the attribute Key conforming to the
+	// "process.real_user.id" semantic conventions. It represents the real user
+	// ID (RUID) of the process.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1000
+	ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+	// ProcessRealUserNameKey is the attribute Key conforming to the
+	// "process.real_user.name" semantic conventions. It represents the
+	// username of the real user of the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'operator'
+	ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+	// ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+	// "process.runtime.description" semantic conventions. It represents an
+	// additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+	// ProcessRuntimeNameKey is the attribute Key conforming to the
+	// "process.runtime.name" semantic conventions. It represents the name of
+	// the runtime of this process. For compiled native binaries, this SHOULD
+	// be the name of the compiler.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+	// ProcessRuntimeVersionKey is the attribute Key conforming to the
+	// "process.runtime.version" semantic conventions. It represents the
+	// version of the runtime of this process, as returned by the runtime
+	// without modification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+	// ProcessSavedUserIDKey is the attribute Key conforming to the
+	// "process.saved_user.id" semantic conventions. It represents the saved
+	// user ID (SUID) of the process.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1002
+	ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+	// ProcessSavedUserNameKey is the attribute Key conforming to the
+	// "process.saved_user.name" semantic conventions. It represents the
+	// username of the saved user.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'operator'
+	ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+	// ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+	// "process.session_leader.pid" semantic conventions. It represents the PID
+	// of the process's session leader. This is also the session ID (SID) of
+	// the process.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 14
+	ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+	// ProcessUserIDKey is the attribute Key conforming to the
+	// "process.user.id" semantic conventions. It represents the effective user
+	// ID (EUID) of the process.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1001
+	ProcessUserIDKey = attribute.Key("process.user.id")
+
+	// ProcessUserNameKey is the attribute Key conforming to the
+	// "process.user.name" semantic conventions. It represents the username of
+	// the effective user of the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'root'
+	ProcessUserNameKey = attribute.Key("process.user.name")
+
+	// ProcessVpidKey is the attribute Key conforming to the "process.vpid"
+	// semantic conventions. It represents the virtual process identifier.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 12
+	// Note: The process ID within a PID namespace. This is not necessarily
+	// unique across all processes on the host but it is unique within the
+	// process namespace that the process exists within.
+	ProcessVpidKey = attribute.Key("process.vpid")
+)
+
+var (
+	// voluntary
+	ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+	// involuntary
+	ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+var (
+	// major
+	ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+	// minor
+	ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+	return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+	return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+	return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and
+// time the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+	return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+	return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+	return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+	return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time
+// the process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+	return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of
+// the process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+	return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+	return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+	return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+	return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+	return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+	return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+	return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+	return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+	return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+	return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user
+// ID (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+	return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username
+// of the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+	return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+	return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+	return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+	return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the
+// "process.vpid" semantic conventions. It represents the virtual process
+// identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+	return ProcessVpidKey.Int(val)
+}
+
+// Attributes for process CPU
+const (
+	// ProcessCPUStateKey is the attribute Key conforming to the
+	// "process.cpu.state" semantic conventions. It represents the CPU state of
+	// the process.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	ProcessCPUStateKey = attribute.Key("process.cpu.state")
+)
+
+var (
+	// system
+	ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
+	// user
+	ProcessCPUStateUser = ProcessCPUStateKey.String("user")
+	// wait
+	ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
+)
+
+// Attributes for remote procedure calls.
+const (
+	// RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+	// "rpc.connect_rpc.error_code" semantic conventions. It represents the
+	// [error codes](https://connect.build/docs/protocol/#error-codes) of the
+	// Connect request. Error codes are always string values.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+	// RPCGRPCStatusCodeKey is the attribute Key conforming to the
+	// "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+	// status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+	// the gRPC request.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+	// RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+	// RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+	// RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+	// property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be
+	// cast to string for simplicity. Use empty string in case of `null` value.
+	// Omit entirely if this is a notification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+	// RPCJsonrpcVersionKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+	// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+	// doesn't specify this, the value can be omitted.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+	// RPCMessageCompressedSizeKey is the attribute Key conforming to the
+	// "rpc.message.compressed_size" semantic conventions. It represents the
+	// compressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+	// RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+	// semantic conventions. It represents the mUST be calculated as two
+	// different counters starting from `1` one for sent messages and one for
+	// received message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+	// RPCMessageTypeKey is the attribute Key conforming to the
+	// "rpc.message.type" semantic conventions. It represents the whether this
+	// is a received or sent message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+	// RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+	// "rpc.message.uncompressed_size" semantic conventions. It represents the
+	// uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+	// RPCMethodKey is the attribute Key conforming to the "rpc.method"
+	// semantic conventions. It represents the name of the (logical) method
+	// being called, must be equal to the $method part in the span name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the
+	// latter (e.g., method actually executing the call on the server side, RPC
+	// client stub method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+
+	// RPCServiceKey is the attribute Key conforming to the "rpc.service"
+	// semantic conventions. It represents the full (logical) name of the
+	// service being called, including its package name, if applicable.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// class. The `code.namespace` attribute may be used to store the latter
+	// (despite the attribute name, it may include a class name; e.g., class
+	// with method actually executing the call on the server side, RPC client
+	// stub class on the client side).
+	RPCServiceKey = attribute.Key("rpc.service")
+
+	// RPCSystemKey is the attribute Key conforming to the "rpc.system"
+	// semantic conventions. It represents a string identifying the remoting
+	// system. See below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+	// cancelled
+	RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+	// unknown
+	RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+	// invalid_argument
+	RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+	// deadline_exceeded
+	RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+	// not_found
+	RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+	// already_exists
+	RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+	// permission_denied
+	RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+	// resource_exhausted
+	RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+	// failed_precondition
+	RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+	// aborted
+	RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+	// out_of_range
+	RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+	// unimplemented
+	RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+	// internal
+	RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+	// unavailable
+	RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+	// data_loss
+	RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+	// unauthenticated
+	RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+	// sent
+	RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+	// received
+	RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+	// Connect RPC
+	RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+	return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+	return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+	return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+	return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+	return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the
+// "rpc.message.id" semantic conventions. It represents the mUST be calculated
+// as two different counters starting from `1` one for sent messages and one
+// for received message.
+func RPCMessageID(val int) attribute.KeyValue {
+	return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
+// the "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+	return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+	return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+	return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+	// ServerAddressKey is the attribute Key conforming to the "server.address"
+	// semantic conventions. It represents the server domain name if available
+	// without reverse DNS lookup; otherwise, IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the client side, and when communicating through
+	// an intermediary, `server.address` SHOULD represent the server address
+	// behind any intermediaries, for example proxies, if it's available.
+	ServerAddressKey = attribute.Key("server.address")
+
+	// ServerPortKey is the attribute Key conforming to the "server.port"
+	// semantic conventions. It represents the server port number.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 80, 8080, 443
+	// Note: When observed from the client side, and when communicating through
+	// an intermediary, `server.port` SHOULD represent the server port behind
+	// any intermediaries, for example proxies, if it's available.
+	ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+	return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+	return ServerPortKey.Int(val)
+}
+
+// A service instance.
+const (
+	// ServiceInstanceIDKey is the attribute Key conforming to the
+	// "service.instance.id" semantic conventions. It represents the string ID
+	// of the service instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be
+	// globally unique). The ID helps to
+	// distinguish instances of the same service that exist at the same time
+	// (e.g. instances of a horizontally scaled
+	// service).
+	//
+	// Implementations, such as SDKs, are recommended to generate a random
+	// Version 1 or Version 4 [RFC
+	// 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
+	// inherent unique ID as the source of
+	// this value if stability is desirable. In that case, the ID SHOULD be
+	// used as source of a UUID Version 5 and
+	// SHOULD use the following UUID as the namespace:
+	// `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+	//
+	// UUIDs are typically recommended, as only an opaque value for the
+	// purposes of identifying a service instance is
+	// needed. Similar to what can be seen in the man page for the
+	// [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
+	// file, the underlying
+	// data, such as pod name and namespace should be treated as confidential,
+	// being the user's choice to expose it
+	// or not via another resource attribute.
+	//
+	// For applications running behind an application server (like unicorn), we
+	// do not recommend using one identifier
+	// for all processes participating in the application. Instead, it's
+	// recommended each division (e.g. a worker
+	// thread in unicorn) to have its own instance.id.
+	//
+	// It's not recommended for a Collector to set `service.instance.id` if it
+	// can't unambiguously determine the
+	// service instance that is generating that telemetry. For instance,
+	// creating an UUID based on `pod.name` will
+	// likely be wrong, as the Collector might not know from which container
+	// within that pod the telemetry originated.
+	// However, Collectors can set the `service.instance.id` if they can
+	// unambiguously determine the service instance
+	// for that telemetry. This is typically the case for scraping receivers,
+	// as they know the target address and
+	// port.
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+	// ServiceNameKey is the attribute Key conforming to the "service.name"
+	// semantic conventions. It represents the logical name of the service.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled
+	// services. If the value was not specified, SDKs MUST fallback to
+	// `unknown_service:` concatenated with
+	// [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
+	// `process.executable.name` is not available, the value MUST be set to
+	// `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+
+	// ServiceNamespaceKey is the attribute Key conforming to the
+	// "service.namespace" semantic conventions. It represents a namespace for
+	// `service.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group
+	// of services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name`
+	// is expected to be unique for all services that have no explicit
+	// namespace defined (so the empty/unspecified namespace is simply one more
+	// valid namespace). Zero-length namespace string is assumed equal to
+	// unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+
+	// ServiceVersionKey is the attribute Key conforming to the
+	// "service.version" semantic conventions. It represents the version string
+	// of the service API or implementation. The format is not defined by these
+	// conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2.0.0', 'a01dbef8a'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+	return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+	return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+	return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+	return ServiceVersionKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+	// SessionIDKey is the attribute Key conforming to the "session.id"
+	// semantic conventions. It represents a unique id to identify a session.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
+	SessionIDKey = attribute.Key("session.id")
+
+	// SessionPreviousIDKey is the attribute Key conforming to the
+	// "session.previous_id" semantic conventions. It represents the previous
+	// `session.id` for this user, when known.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
+	SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+	return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+	return SessionPreviousIDKey.String(val)
+}
+
+// SignalR attributes
+const (
+	// SignalrConnectionStatusKey is the attribute Key conforming to the
+	// "signalr.connection.status" semantic conventions. It represents the
+	// signalR HTTP connection closure status.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'app_shutdown', 'timeout'
+	SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+	// SignalrTransportKey is the attribute Key conforming to the
+	// "signalr.transport" semantic conventions. It represents the [SignalR
+	// transport
+	// type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'web_sockets', 'long_polling'
+	SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+	// The connection was closed normally
+	SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+	// The connection was closed due to a timeout
+	SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+	// The connection was closed because the app is shutting down
+	SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+	// ServerSentEvents protocol
+	SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+	// LongPolling protocol
+	SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+	// WebSockets protocol
+	SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+	// SourceAddressKey is the attribute Key conforming to the "source.address"
+	// semantic conventions. It represents the source address - domain name if
+	// available without reverse DNS lookup; otherwise, IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the destination side, and when communicating
+	// through an intermediary, `source.address` SHOULD represent the source
+	// address behind any intermediaries, for example proxies, if it's
+	// available.
+	SourceAddressKey = attribute.Key("source.address")
+
+	// SourcePortKey is the attribute Key conforming to the "source.port"
+	// semantic conventions. It represents the source port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3389, 2888
+	SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+	return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+	return SourcePortKey.Int(val)
+}
+
+// Describes System attributes
+const (
+	// SystemDeviceKey is the attribute Key conforming to the "system.device"
+	// semantic conventions. It represents the device identifier
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '(identifier)'
+	SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+	return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU attributes
+const (
+	// SystemCPULogicalNumberKey is the attribute Key conforming to the
+	// "system.cpu.logical_number" semantic conventions. It represents the
+	// logical CPU number [0..n-1]
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1
+	SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+	// SystemCPUStateKey is the attribute Key conforming to the
+	// "system.cpu.state" semantic conventions. It represents the state of the
+	// CPU
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'idle', 'interrupt'
+	SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+	// user
+	SystemCPUStateUser = SystemCPUStateKey.String("user")
+	// system
+	SystemCPUStateSystem = SystemCPUStateKey.String("system")
+	// nice
+	SystemCPUStateNice = SystemCPUStateKey.String("nice")
+	// idle
+	SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+	// iowait
+	SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+	// interrupt
+	SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+	// steal
+	SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+	return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory attributes
+const (
+	// SystemMemoryStateKey is the attribute Key conforming to the
+	// "system.memory.state" semantic conventions. It represents the memory
+	// state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'free', 'cached'
+	SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+	// used
+	SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+	// free
+	SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+	// shared
+	SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+	// buffers
+	SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+	// cached
+	SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging attributes
+const (
+	// SystemPagingDirectionKey is the attribute Key conforming to the
+	// "system.paging.direction" semantic conventions. It represents the paging
+	// access direction
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'in'
+	SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+	// SystemPagingStateKey is the attribute Key conforming to the
+	// "system.paging.state" semantic conventions. It represents the memory
+	// paging state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'free'
+	SystemPagingStateKey = attribute.Key("system.paging.state")
+
+	// SystemPagingTypeKey is the attribute Key conforming to the
+	// "system.paging.type" semantic conventions. It represents the memory
+	// paging type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'minor'
+	SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+	// in
+	SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+	// out
+	SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+	// used
+	SystemPagingStateUsed = SystemPagingStateKey.String("used")
+	// free
+	SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+	// major
+	SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+	// minor
+	SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem attributes
+const (
+	// SystemFilesystemModeKey is the attribute Key conforming to the
+	// "system.filesystem.mode" semantic conventions. It represents the
+	// filesystem mode
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'rw, ro'
+	SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+	// SystemFilesystemMountpointKey is the attribute Key conforming to the
+	// "system.filesystem.mountpoint" semantic conventions. It represents the
+	// filesystem mount path
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/mnt/data'
+	SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+	// SystemFilesystemStateKey is the attribute Key conforming to the
+	// "system.filesystem.state" semantic conventions. It represents the
+	// filesystem state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'used'
+	SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+	// SystemFilesystemTypeKey is the attribute Key conforming to the
+	// "system.filesystem.type" semantic conventions. It represents the
+	// filesystem type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ext4'
+	SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+	// used
+	SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+	// free
+	SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+	// reserved
+	SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+	// fat32
+	SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+	// exfat
+	SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+	// ntfs
+	SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+	// refs
+	SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+	// hfsplus
+	SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+	// ext4
+	SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+	return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+	return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network attributes
+const (
+	// SystemNetworkStateKey is the attribute Key conforming to the
+	// "system.network.state" semantic conventions. It represents a stateless
+	// protocol MUST NOT set this attribute
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'close_wait'
+	SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+	// close
+	SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+	// close_wait
+	SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+	// closing
+	SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+	// delete
+	SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+	// established
+	SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+	// fin_wait_1
+	SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+	// fin_wait_2
+	SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+	// last_ack
+	SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+	// listen
+	SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+	// syn_recv
+	SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+	// syn_sent
+	SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+	// time_wait
+	SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process attributes
+const (
+	// SystemProcessStatusKey is the attribute Key conforming to the
+	// "system.process.status" semantic conventions. It represents the process
+	// state, e.g., [Linux Process State
+	// Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'running'
+	SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+var (
+	// running
+	SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+	// sleeping
+	SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+	// stopped
+	SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+	// defunct
+	SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Attributes for telemetry SDK.
+const (
+	// TelemetrySDKLanguageKey is the attribute Key conforming to the
+	// "telemetry.sdk.language" semantic conventions. It represents the
+	// language of the telemetry SDK.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+	// TelemetrySDKNameKey is the attribute Key conforming to the
+	// "telemetry.sdk.name" semantic conventions. It represents the name of the
+	// telemetry SDK as defined above.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	// Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+	// to `opentelemetry`.
+	// If another SDK, like a fork or a vendor-provided implementation, is
+	// used, this SDK MUST set the
+	// `telemetry.sdk.name` attribute to the fully-qualified class or module
+	// name of this SDK's main entry point
+	// or another suitable identifier depending on the language.
+	// The identifier `opentelemetry` is reserved and MUST NOT be used in this
+	// case.
+	// All custom identifiers SHOULD be stable across different versions of an
+	// implementation.
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+	// TelemetrySDKVersionKey is the attribute Key conforming to the
+	// "telemetry.sdk.version" semantic conventions. It represents the version
+	// string of the telemetry SDK.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+	// TelemetryDistroNameKey is the attribute Key conforming to the
+	// "telemetry.distro.name" semantic conventions. It represents the name of
+	// the auto instrumentation agent or distribution, if used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'parts-unlimited-java'
+	// Note: Official auto instrumentation agents and distributions SHOULD set
+	// the `telemetry.distro.name` attribute to
+	// a string starting with `opentelemetry-`, e.g.
+	// `opentelemetry-java-instrumentation`.
+	TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+	// TelemetryDistroVersionKey is the attribute Key conforming to the
+	// "telemetry.distro.version" semantic conventions. It represents the
+	// version string of the auto instrumentation agent or distribution, if
+	// used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.2.3'
+	TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// rust
+	TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+	return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+	return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+	return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+	return TelemetryDistroVersionKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+	// ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+	// conventions. It represents the current "managed" thread ID (as opposed
+	// to OS thread ID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+
+	// ThreadNameKey is the attribute Key conforming to the "thread.name"
+	// semantic conventions. It represents the current thread name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+	return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+	return ThreadNameKey.String(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+	// TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+	// semantic conventions. It represents the string indicating the
+	// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+	// used during the current connection.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+	// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+	// Note: The values allowed for `tls.cipher` MUST be one of the
+	// `Descriptions` of the [registered TLS Cipher
+	// Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+	TLSCipherKey = attribute.Key("tls.cipher")
+
+	// TLSClientCertificateKey is the attribute Key conforming to the
+	// "tls.client.certificate" semantic conventions. It represents the
+	// pEM-encoded stand-alone certificate offered by the client. This is
+	// usually mutually-exclusive of `client.certificate_chain` since this
+	// value also exists in that list.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...'
+	TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+	// TLSClientCertificateChainKey is the attribute Key conforming to the
+	// "tls.client.certificate_chain" semantic conventions. It represents the
+	// array of PEM-encoded certificates that make up the certificate chain
+	// offered by the client. This is usually mutually-exclusive of
+	// `client.certificate` since that value should be the first certificate in
+	// the chain.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...', 'MI...'
+	TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+	// TLSClientHashMd5Key is the attribute Key conforming to the
+	// "tls.client.hash.md5" semantic conventions. It represents the
+	// certificate fingerprint using the MD5 digest of DER-encoded version of
+	// certificate offered by the client. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+	TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+	// TLSClientHashSha1Key is the attribute Key conforming to the
+	// "tls.client.hash.sha1" semantic conventions. It represents the
+	// certificate fingerprint using the SHA1 digest of DER-encoded version of
+	// certificate offered by the client. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+	TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+	// TLSClientHashSha256Key is the attribute Key conforming to the
+	// "tls.client.hash.sha256" semantic conventions. It represents the
+	// certificate fingerprint using the SHA256 digest of DER-encoded version
+	// of certificate offered by the client. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+	TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+	// TLSClientIssuerKey is the attribute Key conforming to the
+	// "tls.client.issuer" semantic conventions. It represents the
+	// distinguished name of
+	// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+	// of the issuer of the x.509 certificate presented by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+	// DC=com'
+	TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+	// TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+	// semantic conventions. It represents a hash that identifies clients based
+	// on how they perform an SSL/TLS handshake.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'd4e5b18d6b55c71272893221c96ba240'
+	TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+	// TLSClientNotAfterKey is the attribute Key conforming to the
+	// "tls.client.not_after" semantic conventions. It represents the date/Time
+	// indicating when client certificate is no longer considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021-01-01T00:00:00.000Z'
+	TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+	// TLSClientNotBeforeKey is the attribute Key conforming to the
+	// "tls.client.not_before" semantic conventions. It represents the
+	// date/Time indicating when client certificate is first considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1970-01-01T00:00:00.000Z'
+	TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+	// TLSClientServerNameKey is the attribute Key conforming to the
+	// "tls.client.server_name" semantic conventions. It represents the also
+	// called an SNI, this tells the server which hostname to which the client
+	// is attempting to connect to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry.io'
+	TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+	// TLSClientSubjectKey is the attribute Key conforming to the
+	// "tls.client.subject" semantic conventions. It represents the
+	// distinguished name of subject of the x.509 certificate presented by the
+	// client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+	TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+	// TLSClientSupportedCiphersKey is the attribute Key conforming to the
+	// "tls.client.supported_ciphers" semantic conventions. It represents the
+	// array of ciphers offered by the client during the client hello.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+	// "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+	TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+	// TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+	// conventions. It represents the string indicating the curve used for the
+	// given cipher, when applicable
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'secp256r1'
+	TLSCurveKey = attribute.Key("tls.curve")
+
+	// TLSEstablishedKey is the attribute Key conforming to the
+	// "tls.established" semantic conventions. It represents the boolean flag
+	// indicating if the TLS negotiation was successful and transitioned to an
+	// encrypted tunnel.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: True
+	TLSEstablishedKey = attribute.Key("tls.established")
+
+	// TLSNextProtocolKey is the attribute Key conforming to the
+	// "tls.next_protocol" semantic conventions. It represents the string
+	// indicating the protocol being tunneled. Per the values in the [IANA
+	// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+	// this string should be lower case.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'http/1.1'
+	TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+	// TLSProtocolNameKey is the attribute Key conforming to the
+	// "tls.protocol.name" semantic conventions. It represents the normalized
+	// lowercase protocol name parsed from original string of the negotiated
+	// [SSL/TLS protocol
+	// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+	// TLSProtocolVersionKey is the attribute Key conforming to the
+	// "tls.protocol.version" semantic conventions. It represents the numeric
+	// part of the version parsed from the original string of the negotiated
+	// [SSL/TLS protocol
+	// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.2', '3'
+	TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+	// TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+	// semantic conventions. It represents the boolean flag indicating if this
+	// TLS connection was resumed from an existing TLS negotiation.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: True
+	TLSResumedKey = attribute.Key("tls.resumed")
+
+	// TLSServerCertificateKey is the attribute Key conforming to the
+	// "tls.server.certificate" semantic conventions. It represents the
+	// pEM-encoded stand-alone certificate offered by the server. This is
+	// usually mutually-exclusive of `server.certificate_chain` since this
+	// value also exists in that list.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...'
+	TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+	// TLSServerCertificateChainKey is the attribute Key conforming to the
+	// "tls.server.certificate_chain" semantic conventions. It represents the
+	// array of PEM-encoded certificates that make up the certificate chain
+	// offered by the server. This is usually mutually-exclusive of
+	// `server.certificate` since that value should be the first certificate in
+	// the chain.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MII...', 'MI...'
+	TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+	// TLSServerHashMd5Key is the attribute Key conforming to the
+	// "tls.server.hash.md5" semantic conventions. It represents the
+	// certificate fingerprint using the MD5 digest of DER-encoded version of
+	// certificate offered by the server. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+	TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+	// TLSServerHashSha1Key is the attribute Key conforming to the
+	// "tls.server.hash.sha1" semantic conventions. It represents the
+	// certificate fingerprint using the SHA1 digest of DER-encoded version of
+	// certificate offered by the server. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+	TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+	// TLSServerHashSha256Key is the attribute Key conforming to the
+	// "tls.server.hash.sha256" semantic conventions. It represents the
+	// certificate fingerprint using the SHA256 digest of DER-encoded version
+	// of certificate offered by the server. For consistency with other hash
+	// values, this value should be formatted as an uppercase hash.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+	TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+	// TLSServerIssuerKey is the attribute Key conforming to the
+	// "tls.server.issuer" semantic conventions. It represents the
+	// distinguished name of
+	// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+	// of the issuer of the x.509 certificate presented by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+	// DC=com'
+	TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+	// TLSServerJa3sKey is the attribute Key conforming to the
+	// "tls.server.ja3s" semantic conventions. It represents a hash that
+	// identifies servers based on how they perform an SSL/TLS handshake.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'd4e5b18d6b55c71272893221c96ba240'
+	TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+	// TLSServerNotAfterKey is the attribute Key conforming to the
+	// "tls.server.not_after" semantic conventions. It represents the date/Time
+	// indicating when server certificate is no longer considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021-01-01T00:00:00.000Z'
+	TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+	// TLSServerNotBeforeKey is the attribute Key conforming to the
+	// "tls.server.not_before" semantic conventions. It represents the
+	// date/Time indicating when server certificate is first considered valid.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1970-01-01T00:00:00.000Z'
+	TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+	// TLSServerSubjectKey is the attribute Key conforming to the
+	// "tls.server.subject" semantic conventions. It represents the
+	// distinguished name of subject of the x.509 certificate presented by the
+	// server.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+	TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+	// ssl
+	TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+	// tls
+	TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+	return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+	return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+	return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+	return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+	return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+	return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+	return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+	return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+	return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+	return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+	return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+	return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+	return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+	return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+	return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+	return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+	return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+	return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+	return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+	return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+	return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+	return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+	return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+	return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+	return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+	return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+	return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+	return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+	// URLDomainKey is the attribute Key conforming to the "url.domain"
+	// semantic conventions. It represents the domain extracted from the
+	// `url.full`, such as "opentelemetry.io".
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
+	// '[1080:0:0:0:8:800:200C:417A]'
+	// Note: In some cases a URL may refer to an IP and/or port directly,
+	// without a domain name. In this case, the IP address would go to the
+	// domain field. If the URL contains a [literal IPv6
+	// address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
+	// `[` and `]`, the `[` and `]` characters should also be captured in the
+	// domain field.
+	URLDomainKey = attribute.Key("url.domain")
+
+	// URLExtensionKey is the attribute Key conforming to the "url.extension"
+	// semantic conventions. It represents the file extension extracted from
+	// the `url.full`, excluding the leading dot.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'png', 'gz'
+	// Note: The file extension is only set if it exists, as not every url has
+	// a file extension. When the file name has multiple extensions
+	// `example.tar.gz`, only the last one should be captured `gz`, not
+	// `tar.gz`.
+	URLExtensionKey = attribute.Key("url.extension")
+
+	// URLFragmentKey is the attribute Key conforming to the "url.fragment"
+	// semantic conventions. It represents the [URI
+	// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'SemConv'
+	URLFragmentKey = attribute.Key("url.fragment")
+
+	// URLFullKey is the attribute Key conforming to the "url.full" semantic
+	// conventions. It represents the absolute URL describing a network
+	// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+	// '//localhost'
+	// Note: For network calls, URL usually has
+	// `scheme://host[:port][path][?query][#fragment]` format, where the
+	// fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+	// included nevertheless.
+	// `url.full` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case username and
+	// password SHOULD be redacted and attribute's value SHOULD be
+	// `https://REDACTED:REDACTED@www.example.com/`.
+	// `url.full` SHOULD capture the absolute URL when it is available (or can
+	// be reconstructed). Sensitive content provided in `url.full` SHOULD be
+	// scrubbed when instrumentations can identify it.
+	URLFullKey = attribute.Key("url.full")
+
+	// URLOriginalKey is the attribute Key conforming to the "url.original"
+	// semantic conventions. It represents the unmodified original URL as seen
+	// in the event source.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+	// 'search?q=OpenTelemetry'
+	// Note: In network monitoring, the observed URL may be a full URL, whereas
+	// in access logs, the URL is often just represented as a path. This field
+	// is meant to represent the URL as it was observed, complete or not.
+	// `url.original` might contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case password and
+	// username SHOULD NOT be redacted and attribute's value SHOULD remain the
+	// same.
+	URLOriginalKey = attribute.Key("url.original")
+
+	// URLPathKey is the attribute Key conforming to the "url.path" semantic
+	// conventions. It represents the [URI
+	// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/search'
+	// Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+	// instrumentations can identify it.
+	URLPathKey = attribute.Key("url.path")
+
+	// URLPortKey is the attribute Key conforming to the "url.port" semantic
+	// conventions. It represents the port extracted from the `url.full`
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 443
+	URLPortKey = attribute.Key("url.port")
+
+	// URLQueryKey is the attribute Key conforming to the "url.query" semantic
+	// conventions. It represents the [URI
+	// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'q=OpenTelemetry'
+	// Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+	// instrumentations can identify it.
+	URLQueryKey = attribute.Key("url.query")
+
+	// URLRegisteredDomainKey is the attribute Key conforming to the
+	// "url.registered_domain" semantic conventions. It represents the highest
+	// registered url domain, stripped of the subdomain.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'example.com', 'foo.co.uk'
+	// Note: This value can be determined precisely with the [public suffix
+	// list](http://publicsuffix.org). For example, the registered domain for
+	// `foo.example.com` is `example.com`. Trying to approximate this by simply
+	// taking the last two labels will not work well for TLDs such as `co.uk`.
+	URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+	// URLSchemeKey is the attribute Key conforming to the "url.scheme"
+	// semantic conventions. It represents the [URI
+	// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+	// identifying the used protocol.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'https', 'ftp', 'telnet'
+	URLSchemeKey = attribute.Key("url.scheme")
+
+	// URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+	// semantic conventions. It represents the subdomain portion of a fully
+	// qualified domain name includes all of the names except the host name
+	// under the registered_domain. In a partially qualified domain, or if the
+	// qualification level of the full name cannot be determined, subdomain
+	// contains all of the names below the registered domain.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'east', 'sub2.sub1'
+	// Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
+	// the domain has multiple levels of subdomain, such as
+	// `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
+	// with no trailing period.
+	URLSubdomainKey = attribute.Key("url.subdomain")
+
+	// URLTemplateKey is the attribute Key conforming to the "url.template"
+	// semantic conventions. It represents the low-cardinality template of an
+	// [absolute path
+	// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/users/{id}', '/users/:id', '/users?id={id}'
+	URLTemplateKey = attribute.Key("url.template")
+
+	// URLTopLevelDomainKey is the attribute Key conforming to the
+	// "url.top_level_domain" semantic conventions. It represents the effective
+	// top level domain (eTLD), also known as the domain suffix, is the last
+	// part of the domain name. For example, the top level domain for
+	// example.com is `com`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com', 'co.uk'
+	// Note: This value can be determined precisely with the [public suffix
+	// list](http://publicsuffix.org).
+	URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the
+// `url.full`, such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+	return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the
+// "url.extension" semantic conventions. It represents the file extension
+// extracted from the `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+	return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+	return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+	return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the
+// "url.original" semantic conventions. It represents the unmodified original
+// URL as seen in the event source.
+func URLOriginal(val string) attribute.KeyValue {
+	return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+	return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port"
+// semantic conventions. It represents the port extracted from the `url.full`
+func URLPort(val int) attribute.KeyValue {
+	return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+	return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+	return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+	return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the
+// "url.subdomain" semantic conventions. It represents the subdomain portion of
+// a fully qualified domain name includes all of the names except the host name
+// under the registered_domain. In a partially qualified domain, or if the
+// qualification level of the full name cannot be determined, subdomain
+// contains all of the names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+	return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the
+// "url.template" semantic conventions. It represents the low-cardinality
+// template of an [absolute path
+// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+func URLTemplate(val string) attribute.KeyValue {
+	return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of
+// the domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+	return URLTopLevelDomainKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+	// UserAgentNameKey is the attribute Key conforming to the
+	// "user_agent.name" semantic conventions. It represents the name of the
+	// user-agent extracted from original. Usually refers to the browser's
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Safari', 'YourApp'
+	// Note: [Example](https://www.whatsmyua.info) of extracting browser's name
+	// from original string. In the case of using a user-agent for non-browser
+	// products, such as microservices with multiple names/versions inside the
+	// `user_agent.original`, the most significant name SHOULD be selected. In
+	// such a scenario it should align with `user_agent.version`
+	UserAgentNameKey = attribute.Key("user_agent.name")
+
+	// UserAgentOriginalKey is the attribute Key conforming to the
+	// "user_agent.original" semantic conventions. It represents the value of
+	// the [HTTP
+	// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+	// header sent by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+	// iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+	// Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
+	// grpc-java-okhttp/1.27.2'
+	UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+	// UserAgentVersionKey is the attribute Key conforming to the
+	// "user_agent.version" semantic conventions. It represents the version of
+	// the user-agent extracted from original. Usually refers to the browser's
+	// version
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.1.2', '1.0.0'
+	// Note: [Example](https://www.whatsmyua.info) of extracting browser's
+	// version from original string. In the case of using a user-agent for
+	// non-browser products, such as microservices with multiple names/versions
+	// inside the `user_agent.original`, the most significant version SHOULD be
+	// selected. In such a scenario it should align with `user_agent.name`
+	UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+	return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+	return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version
+func UserAgentVersion(val string) attribute.KeyValue {
+	return UserAgentVersionKey.String(val)
+}
+
+// The attributes used to describe the packaged software running the
+// application code.
+const (
+	// WebEngineDescriptionKey is the attribute Key conforming to the
+	// "webengine.description" semantic conventions. It represents the
+	// additional description of the web engine (e.g. detailed version and
+	// edition information).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+	// 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+	// WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+	// semantic conventions. It represents the name of the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+
+	// WebEngineVersionKey is the attribute Key conforming to the
+	// "webengine.version" semantic conventions. It represents the version of
+	// the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+	return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+	return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+	return WebEngineVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
similarity index 96%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
index d27e8a8f8..d031bbea7 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
@@ -4,6 +4,6 @@
 // Package semconv implements OpenTelemetry semantic conventions.
 //
 // OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.24.0
+// patterns for OpenTelemetry things. This package represents the v1.26.0
 // version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
similarity index 98%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
index 7235bb51d..bfaee0d56 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
@@ -1,7 +1,7 @@
 // Copyright The OpenTelemetry Authors
 // SPDX-License-Identifier: Apache-2.0
 
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
 
 const (
 	// ExceptionEventName is the name of the Span event representing an exception.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
similarity index 77%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
index a6b953f62..fcdb9f485 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
@@ -3,109 +3,259 @@
 
 // Code generated from semantic convention specification. DO NOT EDIT.
 
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
 
 const (
 
-	// DBClientConnectionsUsage is the metric conforming to the
-	// "db.client.connections.usage" semantic conventions. It represents the number
+	// ContainerCPUTime is the metric conforming to the "container.cpu.time"
+	// semantic conventions. It represents the total CPU time consumed.
+	// Instrument: counter
+	// Unit: s
+	// Stability: Experimental
+	ContainerCPUTimeName        = "container.cpu.time"
+	ContainerCPUTimeUnit        = "s"
+	ContainerCPUTimeDescription = "Total CPU time consumed"
+
+	// ContainerMemoryUsage is the metric conforming to the
+	// "container.memory.usage" semantic conventions. It represents the memory
+	// usage of the container.
+	// Instrument: counter
+	// Unit: By
+	// Stability: Experimental
+	ContainerMemoryUsageName        = "container.memory.usage"
+	ContainerMemoryUsageUnit        = "By"
+	ContainerMemoryUsageDescription = "Memory usage of the container."
+
+	// ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
+	// conventions. It represents the disk bytes for the container.
+	// Instrument: counter
+	// Unit: By
+	// Stability: Experimental
+	ContainerDiskIoName        = "container.disk.io"
+	ContainerDiskIoUnit        = "By"
+	ContainerDiskIoDescription = "Disk bytes for the container."
+
+	// ContainerNetworkIo is the metric conforming to the "container.network.io"
+	// semantic conventions. It represents the network bytes for the container.
+	// Instrument: counter
+	// Unit: By
+	// Stability: Experimental
+	ContainerNetworkIoName        = "container.network.io"
+	ContainerNetworkIoUnit        = "By"
+	ContainerNetworkIoDescription = "Network bytes for the container."
+
+	// DBClientOperationDuration is the metric conforming to the
+	// "db.client.operation.duration" semantic conventions. It represents the
+	// duration of database client operations.
+	// Instrument: histogram
+	// Unit: s
+	// Stability: Experimental
+	DBClientOperationDurationName        = "db.client.operation.duration"
+	DBClientOperationDurationUnit        = "s"
+	DBClientOperationDurationDescription = "Duration of database client operations."
+
+	// DBClientConnectionCount is the metric conforming to the
+	// "db.client.connection.count" semantic conventions. It represents the number
 	// of connections that are currently in state described by the `state`
 	// attribute.
 	// Instrument: updowncounter
 	// Unit: {connection}
 	// Stability: Experimental
+	DBClientConnectionCountName        = "db.client.connection.count"
+	DBClientConnectionCountUnit        = "{connection}"
+	DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+	// DBClientConnectionIdleMax is the metric conforming to the
+	// "db.client.connection.idle.max" semantic conventions. It represents the
+	// maximum number of idle open connections allowed.
+	// Instrument: updowncounter
+	// Unit: {connection}
+	// Stability: Experimental
+	DBClientConnectionIdleMaxName        = "db.client.connection.idle.max"
+	DBClientConnectionIdleMaxUnit        = "{connection}"
+	DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+	// DBClientConnectionIdleMin is the metric conforming to the
+	// "db.client.connection.idle.min" semantic conventions. It represents the
+	// minimum number of idle open connections allowed.
+	// Instrument: updowncounter
+	// Unit: {connection}
+	// Stability: Experimental
+	DBClientConnectionIdleMinName        = "db.client.connection.idle.min"
+	DBClientConnectionIdleMinUnit        = "{connection}"
+	DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
+
+	// DBClientConnectionMax is the metric conforming to the
+	// "db.client.connection.max" semantic conventions. It represents the maximum
+	// number of open connections allowed.
+	// Instrument: updowncounter
+	// Unit: {connection}
+	// Stability: Experimental
+	DBClientConnectionMaxName        = "db.client.connection.max"
+	DBClientConnectionMaxUnit        = "{connection}"
+	DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
+
+	// DBClientConnectionPendingRequests is the metric conforming to the
+	// "db.client.connection.pending_requests" semantic conventions. It represents
+	// the number of pending requests for an open connection, cumulative for the
+	// entire pool.
+	// Instrument: updowncounter
+	// Unit: {request}
+	// Stability: Experimental
+	DBClientConnectionPendingRequestsName        = "db.client.connection.pending_requests"
+	DBClientConnectionPendingRequestsUnit        = "{request}"
+	DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+	// DBClientConnectionTimeouts is the metric conforming to the
+	// "db.client.connection.timeouts" semantic conventions. It represents the
+	// number of connection timeouts that have occurred trying to obtain a
+	// connection from the pool.
+	// Instrument: counter
+	// Unit: {timeout}
+	// Stability: Experimental
+	DBClientConnectionTimeoutsName        = "db.client.connection.timeouts"
+	DBClientConnectionTimeoutsUnit        = "{timeout}"
+	DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+	// DBClientConnectionCreateTime is the metric conforming to the
+	// "db.client.connection.create_time" semantic conventions. It represents the
+	// time it took to create a new connection.
+	// Instrument: histogram
+	// Unit: s
+	// Stability: Experimental
+	DBClientConnectionCreateTimeName        = "db.client.connection.create_time"
+	DBClientConnectionCreateTimeUnit        = "s"
+	DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
+
+	// DBClientConnectionWaitTime is the metric conforming to the
+	// "db.client.connection.wait_time" semantic conventions. It represents the
+	// time it took to obtain an open connection from the pool.
+	// Instrument: histogram
+	// Unit: s
+	// Stability: Experimental
+	DBClientConnectionWaitTimeName        = "db.client.connection.wait_time"
+	DBClientConnectionWaitTimeUnit        = "s"
+	DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+	// DBClientConnectionUseTime is the metric conforming to the
+	// "db.client.connection.use_time" semantic conventions. It represents the time
+	// between borrowing a connection and returning it to the pool.
+	// Instrument: histogram
+	// Unit: s
+	// Stability: Experimental
+	DBClientConnectionUseTimeName        = "db.client.connection.use_time"
+	DBClientConnectionUseTimeUnit        = "s"
+	DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+	// DBClientConnectionsUsage is the metric conforming to the
+	// "db.client.connections.usage" semantic conventions. It represents the
+	// deprecated, use `db.client.connection.count` instead.
+	// Instrument: updowncounter
+	// Unit: {connection}
+	// Stability: Experimental
 	DBClientConnectionsUsageName        = "db.client.connections.usage"
 	DBClientConnectionsUsageUnit        = "{connection}"
-	DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute"
+	DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
 
 	// DBClientConnectionsIdleMax is the metric conforming to the
 	// "db.client.connections.idle.max" semantic conventions. It represents the
-	// maximum number of idle open connections allowed.
+	// deprecated, use `db.client.connection.idle.max` instead.
 	// Instrument: updowncounter
 	// Unit: {connection}
 	// Stability: Experimental
 	DBClientConnectionsIdleMaxName        = "db.client.connections.idle.max"
 	DBClientConnectionsIdleMaxUnit        = "{connection}"
-	DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed"
+	DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
 
 	// DBClientConnectionsIdleMin is the metric conforming to the
 	// "db.client.connections.idle.min" semantic conventions. It represents the
-	// minimum number of idle open connections allowed.
+	// deprecated, use `db.client.connection.idle.min` instead.
 	// Instrument: updowncounter
 	// Unit: {connection}
 	// Stability: Experimental
 	DBClientConnectionsIdleMinName        = "db.client.connections.idle.min"
 	DBClientConnectionsIdleMinUnit        = "{connection}"
-	DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed"
+	DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
 
 	// DBClientConnectionsMax is the metric conforming to the
-	// "db.client.connections.max" semantic conventions. It represents the maximum
-	// number of open connections allowed.
+	// "db.client.connections.max" semantic conventions. It represents the
+	// deprecated, use `db.client.connection.max` instead.
 	// Instrument: updowncounter
 	// Unit: {connection}
 	// Stability: Experimental
 	DBClientConnectionsMaxName        = "db.client.connections.max"
 	DBClientConnectionsMaxUnit        = "{connection}"
-	DBClientConnectionsMaxDescription = "The maximum number of open connections allowed"
+	DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
 
 	// DBClientConnectionsPendingRequests is the metric conforming to the
 	// "db.client.connections.pending_requests" semantic conventions. It represents
-	// the number of pending requests for an open connection, cumulative for the
-	// entire pool.
+	// the deprecated, use `db.client.connection.pending_requests` instead.
 	// Instrument: updowncounter
 	// Unit: {request}
 	// Stability: Experimental
 	DBClientConnectionsPendingRequestsName        = "db.client.connections.pending_requests"
 	DBClientConnectionsPendingRequestsUnit        = "{request}"
-	DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+	DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
 
 	// DBClientConnectionsTimeouts is the metric conforming to the
 	// "db.client.connections.timeouts" semantic conventions. It represents the
-	// number of connection timeouts that have occurred trying to obtain a
-	// connection from the pool.
+	// deprecated, use `db.client.connection.timeouts` instead.
 	// Instrument: counter
 	// Unit: {timeout}
 	// Stability: Experimental
 	DBClientConnectionsTimeoutsName        = "db.client.connections.timeouts"
 	DBClientConnectionsTimeoutsUnit        = "{timeout}"
-	DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+	DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
 
 	// DBClientConnectionsCreateTime is the metric conforming to the
 	// "db.client.connections.create_time" semantic conventions. It represents the
-	// time it took to create a new connection.
+	// deprecated, use `db.client.connection.create_time` instead. Note: the unit
+	// also changed from `ms` to `s`.
 	// Instrument: histogram
 	// Unit: ms
 	// Stability: Experimental
 	DBClientConnectionsCreateTimeName        = "db.client.connections.create_time"
 	DBClientConnectionsCreateTimeUnit        = "ms"
-	DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection"
+	DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
 
 	// DBClientConnectionsWaitTime is the metric conforming to the
 	// "db.client.connections.wait_time" semantic conventions. It represents the
-	// time it took to obtain an open connection from the pool.
+	// deprecated, use `db.client.connection.wait_time` instead. Note: the unit
+	// also changed from `ms` to `s`.
 	// Instrument: histogram
 	// Unit: ms
 	// Stability: Experimental
 	DBClientConnectionsWaitTimeName        = "db.client.connections.wait_time"
 	DBClientConnectionsWaitTimeUnit        = "ms"
-	DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+	DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
 
 	// DBClientConnectionsUseTime is the metric conforming to the
 	// "db.client.connections.use_time" semantic conventions. It represents the
-	// time between borrowing a connection and returning it to the pool.
+	// deprecated, use `db.client.connection.use_time` instead. Note: the unit also
+	// changed from `ms` to `s`.
 	// Instrument: histogram
 	// Unit: ms
 	// Stability: Experimental
 	DBClientConnectionsUseTimeName        = "db.client.connections.use_time"
 	DBClientConnectionsUseTimeUnit        = "ms"
-	DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+	DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
+
+	// DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+	// semantic conventions. It represents the measures the time taken to perform a
+	// DNS lookup.
+	// Instrument: histogram
+	// Unit: s
+	// Stability: Experimental
+	DNSLookupDurationName        = "dns.lookup.duration"
+	DNSLookupDurationUnit        = "s"
+	DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
 
 	// AspnetcoreRoutingMatchAttempts is the metric conforming to the
 	// "aspnetcore.routing.match_attempts" semantic conventions. It represents the
 	// number of requests that were attempted to be matched to an endpoint.
 	// Instrument: counter
 	// Unit: {match_attempt}
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreRoutingMatchAttemptsName        = "aspnetcore.routing.match_attempts"
 	AspnetcoreRoutingMatchAttemptsUnit        = "{match_attempt}"
 	AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
@@ -115,7 +265,7 @@ const (
 	// number of exceptions caught by exception handling middleware.
 	// Instrument: counter
 	// Unit: {exception}
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreDiagnosticsExceptionsName        = "aspnetcore.diagnostics.exceptions"
 	AspnetcoreDiagnosticsExceptionsUnit        = "{exception}"
 	AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
@@ -126,7 +276,7 @@ const (
 	// that hold a rate limiting lease.
 	// Instrument: updowncounter
 	// Unit: {request}
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreRateLimitingActiveRequestLeasesName        = "aspnetcore.rate_limiting.active_request_leases"
 	AspnetcoreRateLimitingActiveRequestLeasesUnit        = "{request}"
 	AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
@@ -137,7 +287,7 @@ const (
 	// server.
 	// Instrument: histogram
 	// Unit: s
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreRateLimitingRequestLeaseDurationName        = "aspnetcore.rate_limiting.request_lease.duration"
 	AspnetcoreRateLimitingRequestLeaseDurationUnit        = "s"
 	AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
@@ -148,7 +298,7 @@ const (
 	// limiting lease.
 	// Instrument: histogram
 	// Unit: s
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreRateLimitingRequestTimeInQueueName        = "aspnetcore.rate_limiting.request.time_in_queue"
 	AspnetcoreRateLimitingRequestTimeInQueueUnit        = "s"
 	AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
@@ -159,7 +309,7 @@ const (
 	// acquire a rate limiting lease.
 	// Instrument: updowncounter
 	// Unit: {request}
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreRateLimitingQueuedRequestsName        = "aspnetcore.rate_limiting.queued_requests"
 	AspnetcoreRateLimitingQueuedRequestsUnit        = "{request}"
 	AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
@@ -169,69 +319,17 @@ const (
 	// number of requests that tried to acquire a rate limiting lease.
 	// Instrument: counter
 	// Unit: {request}
-	// Stability: Experimental
+	// Stability: Stable
 	AspnetcoreRateLimitingRequestsName        = "aspnetcore.rate_limiting.requests"
 	AspnetcoreRateLimitingRequestsUnit        = "{request}"
 	AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
 
-	// DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
-	// semantic conventions. It represents the measures the time taken to perform a
-	// DNS lookup.
-	// Instrument: histogram
-	// Unit: s
-	// Stability: Experimental
-	DNSLookupDurationName        = "dns.lookup.duration"
-	DNSLookupDurationUnit        = "s"
-	DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
-	// HTTPClientOpenConnections is the metric conforming to the
-	// "http.client.open_connections" semantic conventions. It represents the
-	// number of outbound HTTP connections that are currently active or idle on the
-	// client.
-	// Instrument: updowncounter
-	// Unit: {connection}
-	// Stability: Experimental
-	HTTPClientOpenConnectionsName        = "http.client.open_connections"
-	HTTPClientOpenConnectionsUnit        = "{connection}"
-	HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
-	// HTTPClientConnectionDuration is the metric conforming to the
-	// "http.client.connection.duration" semantic conventions. It represents the
-	// duration of the successfully established outbound HTTP connections.
-	// Instrument: histogram
-	// Unit: s
-	// Stability: Experimental
-	HTTPClientConnectionDurationName        = "http.client.connection.duration"
-	HTTPClientConnectionDurationUnit        = "s"
-	HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
-	// HTTPClientActiveRequests is the metric conforming to the
-	// "http.client.active_requests" semantic conventions. It represents the number
-	// of active HTTP requests.
-	// Instrument: updowncounter
-	// Unit: {request}
-	// Stability: Experimental
-	HTTPClientActiveRequestsName        = "http.client.active_requests"
-	HTTPClientActiveRequestsUnit        = "{request}"
-	HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
-	// HTTPClientRequestTimeInQueue is the metric conforming to the
-	// "http.client.request.time_in_queue" semantic conventions. It represents the
-	// amount of time requests spent on a queue waiting for an available
-	// connection.
-	// Instrument: histogram
-	// Unit: s
-	// Stability: Experimental
-	HTTPClientRequestTimeInQueueName        = "http.client.request.time_in_queue"
-	HTTPClientRequestTimeInQueueUnit        = "s"
-	HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection."
-
 	// KestrelActiveConnections is the metric conforming to the
 	// "kestrel.active_connections" semantic conventions. It represents the number
 	// of connections that are currently active on the server.
 	// Instrument: updowncounter
 	// Unit: {connection}
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelActiveConnectionsName        = "kestrel.active_connections"
 	KestrelActiveConnectionsUnit        = "{connection}"
 	KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
@@ -241,7 +339,7 @@ const (
 	// duration of connections on the server.
 	// Instrument: histogram
 	// Unit: s
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelConnectionDurationName        = "kestrel.connection.duration"
 	KestrelConnectionDurationUnit        = "s"
 	KestrelConnectionDurationDescription = "The duration of connections on the server."
@@ -251,7 +349,7 @@ const (
 	// number of connections rejected by the server.
 	// Instrument: counter
 	// Unit: {connection}
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelRejectedConnectionsName        = "kestrel.rejected_connections"
 	KestrelRejectedConnectionsUnit        = "{connection}"
 	KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
@@ -261,7 +359,7 @@ const (
 	// of connections that are currently queued and are waiting to start.
 	// Instrument: updowncounter
 	// Unit: {connection}
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelQueuedConnectionsName        = "kestrel.queued_connections"
 	KestrelQueuedConnectionsUnit        = "{connection}"
 	KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
@@ -272,7 +370,7 @@ const (
 	// currently queued and are waiting to start.
 	// Instrument: updowncounter
 	// Unit: {request}
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelQueuedRequestsName        = "kestrel.queued_requests"
 	KestrelQueuedRequestsUnit        = "{request}"
 	KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
@@ -282,7 +380,7 @@ const (
 	// number of connections that are currently upgraded (WebSockets). .
 	// Instrument: updowncounter
 	// Unit: {connection}
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelUpgradedConnectionsName        = "kestrel.upgraded_connections"
 	KestrelUpgradedConnectionsUnit        = "{connection}"
 	KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
@@ -292,7 +390,7 @@ const (
 	// duration of TLS handshakes on the server.
 	// Instrument: histogram
 	// Unit: s
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelTLSHandshakeDurationName        = "kestrel.tls_handshake.duration"
 	KestrelTLSHandshakeDurationUnit        = "s"
 	KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
@@ -302,7 +400,7 @@ const (
 	// number of TLS handshakes that are currently in progress on the server.
 	// Instrument: updowncounter
 	// Unit: {handshake}
-	// Stability: Experimental
+	// Stability: Stable
 	KestrelActiveTLSHandshakesName        = "kestrel.active_tls_handshakes"
 	KestrelActiveTLSHandshakesUnit        = "{handshake}"
 	KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
@@ -312,7 +410,7 @@ const (
 	// duration of connections on the server.
 	// Instrument: histogram
 	// Unit: s
-	// Stability: Experimental
+	// Stability: Stable
 	SignalrServerConnectionDurationName        = "signalr.server.connection.duration"
 	SignalrServerConnectionDurationUnit        = "s"
 	SignalrServerConnectionDurationDescription = "The duration of connections on the server."
@@ -322,7 +420,7 @@ const (
 	// number of connections that are currently active on the server.
 	// Instrument: updowncounter
 	// Unit: {connection}
-	// Stability: Experimental
+	// Stability: Stable
 	SignalrServerActiveConnectionsName        = "signalr.server.active_connections"
 	SignalrServerActiveConnectionsUnit        = "{connection}"
 	SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
@@ -481,6 +579,37 @@ const (
 	HTTPClientResponseBodySizeUnit        = "By"
 	HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
 
+	// HTTPClientOpenConnections is the metric conforming to the
+	// "http.client.open_connections" semantic conventions. It represents the
+	// number of outbound HTTP connections that are currently active or idle on the
+	// client.
+	// Instrument: updowncounter
+	// Unit: {connection}
+	// Stability: Experimental
+	HTTPClientOpenConnectionsName        = "http.client.open_connections"
+	HTTPClientOpenConnectionsUnit        = "{connection}"
+	HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+	// HTTPClientConnectionDuration is the metric conforming to the
+	// "http.client.connection.duration" semantic conventions. It represents the
+	// duration of the successfully established outbound HTTP connections.
+	// Instrument: histogram
+	// Unit: s
+	// Stability: Experimental
+	HTTPClientConnectionDurationName        = "http.client.connection.duration"
+	HTTPClientConnectionDurationUnit        = "s"
+	HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+	// HTTPClientActiveRequests is the metric conforming to the
+	// "http.client.active_requests" semantic conventions. It represents the number
+	// of active HTTP requests.
+	// Instrument: updowncounter
+	// Unit: {request}
+	// Stability: Experimental
+	HTTPClientActiveRequestsName        = "http.client.active_requests"
+	HTTPClientActiveRequestsUnit        = "{request}"
+	HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
 	// JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
 	// conventions. It represents the measure of initial memory requested.
 	// Instrument: updowncounter
@@ -673,15 +802,15 @@ const (
 	MessagingReceiveDurationUnit        = "s"
 	MessagingReceiveDurationDescription = "Measures the duration of receive operation."
 
-	// MessagingDeliverDuration is the metric conforming to the
-	// "messaging.deliver.duration" semantic conventions. It represents the
-	// measures the duration of deliver operation.
+	// MessagingProcessDuration is the metric conforming to the
+	// "messaging.process.duration" semantic conventions. It represents the
+	// measures the duration of process operation.
 	// Instrument: histogram
 	// Unit: s
 	// Stability: Experimental
-	MessagingDeliverDurationName        = "messaging.deliver.duration"
-	MessagingDeliverDurationUnit        = "s"
-	MessagingDeliverDurationDescription = "Measures the duration of deliver operation."
+	MessagingProcessDurationName        = "messaging.process.duration"
+	MessagingProcessDurationUnit        = "s"
+	MessagingProcessDurationDescription = "Measures the duration of process operation."
 
 	// MessagingPublishMessages is the metric conforming to the
 	// "messaging.publish.messages" semantic conventions. It represents the
@@ -703,15 +832,112 @@ const (
 	MessagingReceiveMessagesUnit        = "{message}"
 	MessagingReceiveMessagesDescription = "Measures the number of received messages."
 
-	// MessagingDeliverMessages is the metric conforming to the
-	// "messaging.deliver.messages" semantic conventions. It represents the
-	// measures the number of delivered messages.
+	// MessagingProcessMessages is the metric conforming to the
+	// "messaging.process.messages" semantic conventions. It represents the
+	// measures the number of processed messages.
 	// Instrument: counter
 	// Unit: {message}
 	// Stability: Experimental
-	MessagingDeliverMessagesName        = "messaging.deliver.messages"
-	MessagingDeliverMessagesUnit        = "{message}"
-	MessagingDeliverMessagesDescription = "Measures the number of delivered messages."
+	MessagingProcessMessagesName        = "messaging.process.messages"
+	MessagingProcessMessagesUnit        = "{message}"
+	MessagingProcessMessagesDescription = "Measures the number of processed messages."
+
+	// ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
+	// conventions. It represents the total CPU seconds broken down by different
+	// states.
+	// Instrument: counter
+	// Unit: s
+	// Stability: Experimental
+	ProcessCPUTimeName        = "process.cpu.time"
+	ProcessCPUTimeUnit        = "s"
+	ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
+
+	// ProcessCPUUtilization is the metric conforming to the
+	// "process.cpu.utilization" semantic conventions. It represents the difference
+	// in process.cpu.time since the last measurement, divided by the elapsed time
+	// and number of CPUs available to the process.
+	// Instrument: gauge
+	// Unit: 1
+	// Stability: Experimental
+	ProcessCPUUtilizationName        = "process.cpu.utilization"
+	ProcessCPUUtilizationUnit        = "1"
+	ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
+
+	// ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
+	// semantic conventions. It represents the amount of physical memory in use.
+	// Instrument: updowncounter
+	// Unit: By
+	// Stability: Experimental
+	ProcessMemoryUsageName        = "process.memory.usage"
+	ProcessMemoryUsageUnit        = "By"
+	ProcessMemoryUsageDescription = "The amount of physical memory in use."
+
+	// ProcessMemoryVirtual is the metric conforming to the
+	// "process.memory.virtual" semantic conventions. It represents the amount of
+	// committed virtual memory.
+	// Instrument: updowncounter
+	// Unit: By
+	// Stability: Experimental
+	ProcessMemoryVirtualName        = "process.memory.virtual"
+	ProcessMemoryVirtualUnit        = "By"
+	ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
+
+	// ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
+	// conventions. It represents the disk bytes transferred.
+	// Instrument: counter
+	// Unit: By
+	// Stability: Experimental
+	ProcessDiskIoName        = "process.disk.io"
+	ProcessDiskIoUnit        = "By"
+	ProcessDiskIoDescription = "Disk bytes transferred."
+
+	// ProcessNetworkIo is the metric conforming to the "process.network.io"
+	// semantic conventions. It represents the network bytes transferred.
+	// Instrument: counter
+	// Unit: By
+	// Stability: Experimental
+	ProcessNetworkIoName        = "process.network.io"
+	ProcessNetworkIoUnit        = "By"
+	ProcessNetworkIoDescription = "Network bytes transferred."
+
+	// ProcessThreadCount is the metric conforming to the "process.thread.count"
+	// semantic conventions. It represents the process threads count.
+	// Instrument: updowncounter
+	// Unit: {thread}
+	// Stability: Experimental
+	ProcessThreadCountName        = "process.thread.count"
+	ProcessThreadCountUnit        = "{thread}"
+	ProcessThreadCountDescription = "Process threads count."
+
+	// ProcessOpenFileDescriptorCount is the metric conforming to the
+	// "process.open_file_descriptor.count" semantic conventions. It represents the
+	// number of file descriptors in use by the process.
+	// Instrument: updowncounter
+	// Unit: {count}
+	// Stability: Experimental
+	ProcessOpenFileDescriptorCountName        = "process.open_file_descriptor.count"
+	ProcessOpenFileDescriptorCountUnit        = "{count}"
+	ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
+
+	// ProcessContextSwitches is the metric conforming to the
+	// "process.context_switches" semantic conventions. It represents the number of
+	// times the process has been context switched.
+	// Instrument: counter
+	// Unit: {count}
+	// Stability: Experimental
+	ProcessContextSwitchesName        = "process.context_switches"
+	ProcessContextSwitchesUnit        = "{count}"
+	ProcessContextSwitchesDescription = "Number of times the process has been context switched."
+
+	// ProcessPagingFaults is the metric conforming to the "process.paging.faults"
+	// semantic conventions. It represents the number of page faults the process
+	// has made.
+	// Instrument: counter
+	// Unit: {fault}
+	// Stability: Experimental
+	ProcessPagingFaultsName        = "process.paging.faults"
+	ProcessPagingFaultsUnit        = "{fault}"
+	ProcessPagingFaultsDescription = "Number of page faults the process has made."
 
 	// RPCServerDuration is the metric conforming to the "rpc.server.duration"
 	// semantic conventions. It represents the measures the duration of inbound
@@ -883,6 +1109,16 @@ const (
 	SystemMemoryLimitUnit        = "By"
 	SystemMemoryLimitDescription = "Total memory available in the system."
 
+	// SystemMemoryShared is the metric conforming to the "system.memory.shared"
+	// semantic conventions. It represents the shared memory used (mostly by
+	// tmpfs).
+	// Instrument: updowncounter
+	// Unit: By
+	// Stability: Experimental
+	SystemMemorySharedName        = "system.memory.shared"
+	SystemMemorySharedUnit        = "By"
+	SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
+
 	// SystemMemoryUtilization is the metric conforming to the
 	// "system.memory.utilization" semantic conventions.
 	// Instrument: gauge
@@ -1038,25 +1274,25 @@ const (
 	SystemNetworkConnectionsName = "system.network.connections"
 	SystemNetworkConnectionsUnit = "{connection}"
 
-	// SystemProcessesCount is the metric conforming to the
-	// "system.processes.count" semantic conventions. It represents the total
-	// number of processes in each state.
+	// SystemProcessCount is the metric conforming to the "system.process.count"
+	// semantic conventions. It represents the total number of processes in each
+	// state.
 	// Instrument: updowncounter
 	// Unit: {process}
 	// Stability: Experimental
-	SystemProcessesCountName        = "system.processes.count"
-	SystemProcessesCountUnit        = "{process}"
-	SystemProcessesCountDescription = "Total number of processes in each state"
+	SystemProcessCountName        = "system.process.count"
+	SystemProcessCountUnit        = "{process}"
+	SystemProcessCountDescription = "Total number of processes in each state"
 
-	// SystemProcessesCreated is the metric conforming to the
-	// "system.processes.created" semantic conventions. It represents the total
+	// SystemProcessCreated is the metric conforming to the
+	// "system.process.created" semantic conventions. It represents the total
 	// number of processes created over uptime of the host.
 	// Instrument: counter
 	// Unit: {process}
 	// Stability: Experimental
-	SystemProcessesCreatedName        = "system.processes.created"
-	SystemProcessesCreatedUnit        = "{process}"
-	SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host"
+	SystemProcessCreatedName        = "system.process.created"
+	SystemProcessCreatedUnit        = "{process}"
+	SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
 
 	// SystemLinuxMemoryAvailable is the metric conforming to the
 	// "system.linux.memory.available" semantic conventions. It represents an
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
similarity index 85%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
index fe80b1731..4c87c7adc 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
@@ -1,9 +1,9 @@
 // Copyright The OpenTelemetry Authors
 // SPDX-License-Identifier: Apache-2.0
 
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
 
 // SchemaURL is the schema URL that matches the version of the semantic conventions
 // that this package defines. Semconv packages starting from v1.4.0 must declare
 // non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
+const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
index 5650a174b..8c45a7107 100644
--- a/vendor/go.opentelemetry.io/otel/trace/context.go
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont
 	return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
 }
 
-// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
 // as a remote SpanContext and as the current Span. The Span implementation
 // that wraps rsc is non-recording and performs no operations other than to
 // return rsc as the SpanContext from the SpanContext method.
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
index d661c5d10..cdbf41d6d 100644
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -96,7 +96,7 @@ can embed the API interface directly.
 
 This option is not recommended. It will lead to publishing packages that
 contain runtime panics when users update to newer versions of
-[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+[go.opentelemetry.io/otel/trace], which may be done with a transitive
 dependency.
 
 Finally, an author can embed another implementation in theirs. The embedded
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 000000000..ef85cb70c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.TracerProvider
+
+	// Tracer returns a unique Tracer scoped to be used by instrumentation code
+	// to trace computational workflows. The scope and identity of that
+	// instrumentation code is uniquely defined by the name and options passed.
+	//
+	// The passed name needs to uniquely identify instrumentation code.
+	// Therefore, it is recommended that name is the Go package name of the
+	// library providing instrumentation (note: not the code being
+	// instrumented). Instrumentation libraries can have multiple versions,
+	// therefore, the WithInstrumentationVersion option should be used to
+	// distinguish these different codebases. Additionally, instrumentation
+	// libraries may sometimes use traces to communicate different domains of
+	// workflow data (i.e. using spans to communicate workflow events only). If
+	// this is the case, the WithScopeAttributes option should be used to
+	// uniquely identify Tracers that handle the different domains of workflow
+	// data.
+	//
+	// If the same name and options are passed multiple times, the same Tracer
+	// will be returned (it is up to the implementation if this will be the
+	// same underlying instance of that Tracer or not). It is not necessary to
+	// call this multiple times with the same name and options to get an
+	// up-to-date Tracer. All implementations will ensure any TracerProvider
+	// configuration changes are propagated to all provided Tracers.
+	//
+	// If name is empty, then an implementation defined default name will be
+	// used instead.
+	//
+	// This method is safe to call concurrently.
+	Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 000000000..d3aa476ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Span
+
+	// End completes the Span. The Span is considered complete and ready to be
+	// delivered through the rest of the telemetry pipeline after this method
+	// is called. Therefore, updates to the Span are not allowed after this
+	// method has been called.
+	End(options ...SpanEndOption)
+
+	// AddEvent adds an event with the provided name and options.
+	AddEvent(name string, options ...EventOption)
+
+	// AddLink adds a link.
+	// Adding links at span creation using WithLinks is preferred to calling AddLink
+	// later, for contexts that are available during span creation, because head
+	// sampling decisions can only consider information present during span creation.
+	AddLink(link Link)
+
+	// IsRecording returns the recording state of the Span. It will return
+	// true if the Span is active and events can be recorded.
+	IsRecording() bool
+
+	// RecordError will record err as an exception span event for this span. An
+	// additional call to SetStatus is required if the Status of the Span should
+	// be set to Error, as this method does not change the Span status. If this
+	// span is not being recorded or err is nil then this method does nothing.
+	RecordError(err error, options ...EventOption)
+
+	// SpanContext returns the SpanContext of the Span. The returned SpanContext
+	// is usable even after the End method has been called for the Span.
+	SpanContext() SpanContext
+
+	// SetStatus sets the status of the Span in the form of a code and a
+	// description, provided the status hasn't already been set to a higher
+	// value before (OK > Error > Unset). The description is only included in a
+	// status when the code is for an error.
+	SetStatus(code codes.Code, description string)
+
+	// SetName sets the Span name.
+	SetName(name string)
+
+	// SetAttributes sets kv as attributes of the Span. If a key from kv
+	// already exists for an attribute of the Span it will be overwritten with
+	// the value contained in kv.
+	SetAttributes(kv ...attribute.KeyValue)
+
+	// TracerProvider returns a TracerProvider that can be used to generate
+	// additional Spans on the same telemetry pipeline as the current Span.
+	TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+//  1. Batch Processing: A batch of operations may contain operations
+//     associated with one or more traces/spans. Since there can only be one
+//     parent SpanContext, a Link is used to keep reference to the
+//     SpanContext of all operations in the batch.
+//  2. Public Endpoint: A SpanContext for an in incoming client request on a
+//     public endpoint should be considered untrusted. In such a case, a new
+//     trace with its own identity and sampling decision needs to be created,
+//     but this new trace needs to be related to the original trace in some
+//     form. A Link is used to keep reference to the original SpanContext and
+//     track the relationship.
+type Link struct {
+	// SpanContext of the linked Span.
+	SpanContext SpanContext
+
+	// Attributes describe the aspects of the link.
+	Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+	return Link{
+		SpanContext: SpanContextFromContext(ctx),
+		Attributes:  attrs,
+	}
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+	// SpanKindUnspecified is an unspecified SpanKind and is not a valid
+	// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+	// if it is received.
+	SpanKindUnspecified SpanKind = 0
+	// SpanKindInternal is a SpanKind for a Span that represents an internal
+	// operation within an application.
+	SpanKindInternal SpanKind = 1
+	// SpanKindServer is a SpanKind for a Span that represents the operation
+	// of handling a request from a client.
+	SpanKindServer SpanKind = 2
+	// SpanKindClient is a SpanKind for a Span that represents the operation
+	// of client making a request to a server.
+	SpanKindClient SpanKind = 3
+	// SpanKindProducer is a SpanKind for a Span that represents the operation
+	// of a producer sending a message to a message broker. Unlike
+	// SpanKindClient and SpanKindServer, there is often no direct
+	// relationship between this kind of Span and a SpanKindConsumer kind. A
+	// SpanKindProducer Span will end once the message is accepted by the
+	// message broker which might not overlap with the processing of that
+	// message.
+	SpanKindProducer SpanKind = 4
+	// SpanKindConsumer is a SpanKind for a Span that represents the operation
+	// of a consumer receiving a message from a message broker. Like
+	// SpanKindProducer Spans, there is often no direct relationship between
+	// this Span and the Span that produced the message.
+	SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value.  This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+	switch spanKind {
+	case SpanKindInternal,
+		SpanKindServer,
+		SpanKindClient,
+		SpanKindProducer,
+		SpanKindConsumer:
+		// valid
+		return spanKind
+	default:
+		return SpanKindInternal
+	}
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+	switch sk {
+	case SpanKindInternal:
+		return "internal"
+	case SpanKindServer:
+		return "server"
+	case SpanKindClient:
+		return "client"
+	case SpanKindProducer:
+		return "producer"
+	case SpanKindConsumer:
+		return "consumer"
+	default:
+		return "unspecified"
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index 28877d4ab..d49adf671 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace"
 
 import (
 	"bytes"
-	"context"
 	"encoding/hex"
 	"encoding/json"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/codes"
-	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 const (
@@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
 		Remote:     sc.remote,
 	})
 }
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
-	// Users of the interface can ignore this. This embedded type is only used
-	// by implementations of this interface. See the "API Implementations"
-	// section of the package documentation for more information.
-	embedded.Span
-
-	// End completes the Span. The Span is considered complete and ready to be
-	// delivered through the rest of the telemetry pipeline after this method
-	// is called. Therefore, updates to the Span are not allowed after this
-	// method has been called.
-	End(options ...SpanEndOption)
-
-	// AddEvent adds an event with the provided name and options.
-	AddEvent(name string, options ...EventOption)
-
-	// AddLink adds a link.
-	// Adding links at span creation using WithLinks is preferred to calling AddLink
-	// later, for contexts that are available during span creation, because head
-	// sampling decisions can only consider information present during span creation.
-	AddLink(link Link)
-
-	// IsRecording returns the recording state of the Span. It will return
-	// true if the Span is active and events can be recorded.
-	IsRecording() bool
-
-	// RecordError will record err as an exception span event for this span. An
-	// additional call to SetStatus is required if the Status of the Span should
-	// be set to Error, as this method does not change the Span status. If this
-	// span is not being recorded or err is nil then this method does nothing.
-	RecordError(err error, options ...EventOption)
-
-	// SpanContext returns the SpanContext of the Span. The returned SpanContext
-	// is usable even after the End method has been called for the Span.
-	SpanContext() SpanContext
-
-	// SetStatus sets the status of the Span in the form of a code and a
-	// description, provided the status hasn't already been set to a higher
-	// value before (OK > Error > Unset). The description is only included in a
-	// status when the code is for an error.
-	SetStatus(code codes.Code, description string)
-
-	// SetName sets the Span name.
-	SetName(name string)
-
-	// SetAttributes sets kv as attributes of the Span. If a key from kv
-	// already exists for an attribute of the Span it will be overwritten with
-	// the value contained in kv.
-	SetAttributes(kv ...attribute.KeyValue)
-
-	// TracerProvider returns a TracerProvider that can be used to generate
-	// additional Spans on the same telemetry pipeline as the current Span.
-	TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-//  1. Batch Processing: A batch of operations may contain operations
-//     associated with one or more traces/spans. Since there can only be one
-//     parent SpanContext, a Link is used to keep reference to the
-//     SpanContext of all operations in the batch.
-//  2. Public Endpoint: A SpanContext for an in incoming client request on a
-//     public endpoint should be considered untrusted. In such a case, a new
-//     trace with its own identity and sampling decision needs to be created,
-//     but this new trace needs to be related to the original trace in some
-//     form. A Link is used to keep reference to the original SpanContext and
-//     track the relationship.
-type Link struct {
-	// SpanContext of the linked Span.
-	SpanContext SpanContext
-
-	// Attributes describe the aspects of the link.
-	Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
-	return Link{
-		SpanContext: SpanContextFromContext(ctx),
-		Attributes:  attrs,
-	}
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
-	// SpanKindUnspecified is an unspecified SpanKind and is not a valid
-	// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
-	// if it is received.
-	SpanKindUnspecified SpanKind = 0
-	// SpanKindInternal is a SpanKind for a Span that represents an internal
-	// operation within an application.
-	SpanKindInternal SpanKind = 1
-	// SpanKindServer is a SpanKind for a Span that represents the operation
-	// of handling a request from a client.
-	SpanKindServer SpanKind = 2
-	// SpanKindClient is a SpanKind for a Span that represents the operation
-	// of client making a request to a server.
-	SpanKindClient SpanKind = 3
-	// SpanKindProducer is a SpanKind for a Span that represents the operation
-	// of a producer sending a message to a message broker. Unlike
-	// SpanKindClient and SpanKindServer, there is often no direct
-	// relationship between this kind of Span and a SpanKindConsumer kind. A
-	// SpanKindProducer Span will end once the message is accepted by the
-	// message broker which might not overlap with the processing of that
-	// message.
-	SpanKindProducer SpanKind = 4
-	// SpanKindConsumer is a SpanKind for a Span that represents the operation
-	// of a consumer receiving a message from a message broker. Like
-	// SpanKindProducer Spans, there is often no direct relationship between
-	// this Span and the Span that produced the message.
-	SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value.  This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
-	switch spanKind {
-	case SpanKindInternal,
-		SpanKindServer,
-		SpanKindClient,
-		SpanKindProducer,
-		SpanKindConsumer:
-		// valid
-		return spanKind
-	default:
-		return SpanKindInternal
-	}
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
-	switch sk {
-	case SpanKindInternal:
-		return "internal"
-	case SpanKindServer:
-		return "server"
-	case SpanKindClient:
-		return "client"
-	case SpanKindProducer:
-		return "producer"
-	case SpanKindConsumer:
-		return "consumer"
-	default:
-		return "unspecified"
-	}
-}
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
-	// Users of the interface can ignore this. This embedded type is only used
-	// by implementations of this interface. See the "API Implementations"
-	// section of the package documentation for more information.
-	embedded.Tracer
-
-	// Start creates a span and a context.Context containing the newly-created span.
-	//
-	// If the context.Context provided in `ctx` contains a Span then the newly-created
-	// Span will be a child of that span, otherwise it will be a root span. This behavior
-	// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
-	// newly-created Span to be a root span even if `ctx` contains a Span.
-	//
-	// When creating a Span it is recommended to provide all known span attributes using
-	// the `WithAttributes()` SpanOption as samplers will only have access to the
-	// attributes provided when a Span is created.
-	//
-	// Any Span that is created MUST also be ended. This is the responsibility of the user.
-	// Implementations of this API may leak memory or other resources if Spans are not ended.
-	Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
-	// Users of the interface can ignore this. This embedded type is only used
-	// by implementations of this interface. See the "API Implementations"
-	// section of the package documentation for more information.
-	embedded.TracerProvider
-
-	// Tracer returns a unique Tracer scoped to be used by instrumentation code
-	// to trace computational workflows. The scope and identity of that
-	// instrumentation code is uniquely defined by the name and options passed.
-	//
-	// The passed name needs to uniquely identify instrumentation code.
-	// Therefore, it is recommended that name is the Go package name of the
-	// library providing instrumentation (note: not the code being
-	// instrumented). Instrumentation libraries can have multiple versions,
-	// therefore, the WithInstrumentationVersion option should be used to
-	// distinguish these different codebases. Additionally, instrumentation
-	// libraries may sometimes use traces to communicate different domains of
-	// workflow data (i.e. using spans to communicate workflow events only). If
-	// this is the case, the WithScopeAttributes option should be used to
-	// uniquely identify Tracers that handle the different domains of workflow
-	// data.
-	//
-	// If the same name and options are passed multiple times, the same Tracer
-	// will be returned (it is up to the implementation if this will be the
-	// same underlying instance of that Tracer or not). It is not necessary to
-	// call this multiple times with the same name and options to get an
-	// up-to-date Tracer. All implementations will ensure any TracerProvider
-	// configuration changes are propagated to all provided Tracers.
-	//
-	// If name is empty, then an implementation defined default name will be
-	// used instead.
-	//
-	// This method is safe to call concurrently.
-	Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 000000000..77952d2a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Tracer
+
+	// Start creates a span and a context.Context containing the newly-created span.
+	//
+	// If the context.Context provided in `ctx` contains a Span then the newly-created
+	// Span will be a child of that span, otherwise it will be a root span. This behavior
+	// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+	// newly-created Span to be a root span even if `ctx` contains a Span.
+	//
+	// When creating a Span it is recommended to provide all known span attributes using
+	// the `WithAttributes()` SpanOption as samplers will only have access to the
+	// attributes provided when a Span is created.
+	//
+	// Any Span that is created MUST also be ended. This is the responsibility of the user.
+	// Implementations of this API may leak memory or other resources if Spans are not ended.
+	Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index 20b5cf243..dc5e34cad 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string {
 	return ""
 }
 
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+	for _, m := range ts.list {
+		if !f(m.Key, m.Value) {
+			break
+		}
+	}
+}
+
 // Insert adds a new list-member defined by the key/value pair to the
 // TraceState. If a list-member already exists for the given key, that
 // list-member's value is updated. The new or updated list-member is always
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
deleted file mode 100644
index e57bf57fc..000000000
--- a/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-cd $(dirname $0)
-TOOLS_DIR=$(pwd)/.tools
-
-if [ -z "${GOPATH}" ] ; then
-	printf "GOPATH is not defined.\n"
-	exit -1
-fi
-
-if [ ! -d "${GOPATH}" ] ; then
-	printf "GOPATH ${GOPATH} is invalid \n"
-	exit -1
-fi
-
-# Pre-requisites
-if ! git diff --quiet; then \
-	git status
-	printf "\n\nError: working tree is not clean\n"
-	exit -1
-fi
-
-if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
-	printf "$(git log -1)"
-	printf "\n\nError: HEAD is not pointing to a tagged version"
-fi
-
-make ${TOOLS_DIR}/gojq
-
-DIR_TMP="${GOPATH}/src/oteltmp/"
-rm -rf $DIR_TMP
-mkdir -p $DIR_TMP
-
-printf "Copy examples to ${DIR_TMP}\n"
-cp -a ./example ${DIR_TMP}
-
-# Update go.mod files
-printf "Update go.mod: rename module and remove replace\n"
-
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
-	printf "  Update go.mod for $dir\n"
-	(cd "${DIR_TMP}/${dir}" && \
-	 # replaces is ("mod1" "mod2" …)
-	 replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
-	 # strip double quotes
-	 replaces=("${replaces[@]%\"}") && \
-	 replaces=("${replaces[@]#\"}") && \
-	 # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
-	 dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
-	 go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
-	 go mod tidy)
-done
-printf "Update done:\n\n"
-
-# Build directories that contain main package. These directories are different than
-# directories that contain go.mod files.
-printf "Build examples:\n"
-EXAMPLES=$(./get_main_pkgs.sh ./example)
-for ex in $EXAMPLES; do
-	printf "  Build $ex in ${DIR_TMP}/${ex}\n"
-	(cd "${DIR_TMP}/${ex}" && \
-	 go build .)
-done
-
-# Cleanup
-printf "Remove copied files.\n"
-rm -rf $DIR_TMP
diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
new file mode 100644
index 000000000..c9b7cdbbf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+TARGET="${1:?Must provide target ref}"
+
+FILE="CHANGELOG.md"
+TEMP_DIR=$(mktemp -d)
+echo "Temp folder: $TEMP_DIR"
+
+# Only the latest commit of the feature branch is available
+# automatically. To diff with the base branch, we need to
+# fetch that too (and we only need its latest commit).
+git fetch origin "${TARGET}" --depth=1
+
+# Checkout the previous version on the base branch of the changelog to tmpfolder
+git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
+
+PREVIOUS_FILE="$TEMP_DIR/$FILE"
+CURRENT_FILE="$FILE"
+PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
+CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
+
+# Extract released sections from the previous version
+awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
+
+# Extract released sections from the current version
+awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
+
+# Compare the released sections
+if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
+    echo "Error: The released sections of the changelog file have been modified."
+    diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
+    rm -rf "$TEMP_DIR"
+    false
+fi
+
+rm -rf "$TEMP_DIR"
+echo "The released sections remain unchanged."
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index ab2896052..6d3c7b1f4 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
 
 // Version is the current release version of OpenTelemetry in use.
 func Version() string {
-	return "1.28.0"
+	return "1.31.0"
 }
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 241cfc82a..cdebdb5eb 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
 
 module-sets:
   stable-v1:
-    version: v1.28.0
+    version: v1.31.0
     modules:
       - go.opentelemetry.io/otel
       - go.opentelemetry.io/otel/bridge/opencensus
@@ -29,21 +29,21 @@ module-sets:
       - go.opentelemetry.io/otel/sdk/metric
       - go.opentelemetry.io/otel/trace
   experimental-metrics:
-    version: v0.50.0
+    version: v0.53.0
     modules:
       - go.opentelemetry.io/otel/example/prometheus
       - go.opentelemetry.io/otel/exporters/prometheus
   experimental-logs:
-    version: v0.4.0
+    version: v0.7.0
     modules:
       - go.opentelemetry.io/otel/log
       - go.opentelemetry.io/otel/sdk/log
+      - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
       - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
       - go.opentelemetry.io/otel/exporters/stdout/stdoutlog
   experimental-schema:
-    version: v0.0.8
+    version: v0.0.10
     modules:
       - go.opentelemetry.io/otel/schema
 excluded-modules:
   - go.opentelemetry.io/otel/internal/tools
-  - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
index 6a66aea5e..2a7cf70da 100644
--- a/vendor/golang.org/x/exp/LICENSE
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go
index ecc0dabb7..c25939b92 100644
--- a/vendor/golang.org/x/exp/maps/maps.go
+++ b/vendor/golang.org/x/exp/maps/maps.go
@@ -5,9 +5,20 @@
 // Package maps defines various functions useful with maps of any type.
 package maps
 
+import "maps"
+
+// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
+// these functions except Keys and Values should be annotated
+// (provisionally with "//go:fix inline") so that tools can safely and
+// automatically replace calls to exp/maps with calls to std maps by
+// inlining them.
+
 // Keys returns the keys of the map m.
 // The keys will be in an indeterminate order.
 func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+	// The simplest true equivalent using std is:
+	// return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)).
+
 	r := make([]K, 0, len(m))
 	for k := range m {
 		r = append(r, k)
@@ -18,6 +29,9 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
 // Values returns the values of the map m.
 // The values will be in an indeterminate order.
 func Values[M ~map[K]V, K comparable, V any](m M) []V {
+	// The simplest true equivalent using std is:
+	// return slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)).
+
 	r := make([]V, 0, len(m))
 	for _, v := range m {
 		r = append(r, v)
@@ -28,50 +42,24 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
 // Equal reports whether two maps contain the same key/value pairs.
 // Values are compared using ==.
 func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
-	if len(m1) != len(m2) {
-		return false
-	}
-	for k, v1 := range m1 {
-		if v2, ok := m2[k]; !ok || v1 != v2 {
-			return false
-		}
-	}
-	return true
+	return maps.Equal(m1, m2)
 }
 
 // EqualFunc is like Equal, but compares values using eq.
 // Keys are still compared with ==.
 func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
-	if len(m1) != len(m2) {
-		return false
-	}
-	for k, v1 := range m1 {
-		if v2, ok := m2[k]; !ok || !eq(v1, v2) {
-			return false
-		}
-	}
-	return true
+	return maps.EqualFunc(m1, m2, eq)
 }
 
 // Clear removes all entries from m, leaving it empty.
 func Clear[M ~map[K]V, K comparable, V any](m M) {
-	for k := range m {
-		delete(m, k)
-	}
+	clear(m)
 }
 
 // Clone returns a copy of m.  This is a shallow clone:
 // the new keys and values are set using ordinary assignment.
 func Clone[M ~map[K]V, K comparable, V any](m M) M {
-	// Preserve nil in case it matters.
-	if m == nil {
-		return nil
-	}
-	r := make(M, len(m))
-	for k, v := range m {
-		r[k] = v
-	}
-	return r
+	return maps.Clone(m)
 }
 
 // Copy copies all key/value pairs in src adding them to dst.
@@ -79,16 +67,10 @@ func Clone[M ~map[K]V, K comparable, V any](m M) M {
 // the value in dst will be overwritten by the value associated
 // with the key in src.
 func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
-	for k, v := range src {
-		dst[k] = v
-	}
+	maps.Copy(dst, src)
 }
 
 // DeleteFunc deletes any key/value pairs from m for which del returns true.
 func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
-	for k, v := range m {
-		if del(k, v) {
-			delete(m, k)
-		}
-	}
+	maps.DeleteFunc(m, del)
 }
diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE
index 6a66aea5e..2a7cf70da 100644
--- a/vendor/golang.org/x/mod/LICENSE
+++ b/vendor/golang.org/x/mod/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go
index 220568259..de1b98211 100644
--- a/vendor/golang.org/x/mod/modfile/read.go
+++ b/vendor/golang.org/x/mod/modfile/read.go
@@ -226,8 +226,9 @@ func (x *FileSyntax) Cleanup() {
 				continue
 			}
 			if ww == 1 && len(stmt.RParen.Comments.Before) == 0 {
-				// Collapse block into single line.
-				line := &Line{
+				// Collapse block into single line but keep the Line reference used by the
+				// parsed File structure.
+				*stmt.Line[0] = Line{
 					Comments: Comments{
 						Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
 						Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
@@ -235,7 +236,7 @@ func (x *FileSyntax) Cleanup() {
 					},
 					Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
 				}
-				x.Stmt[w] = line
+				x.Stmt[w] = stmt.Line[0]
 				w++
 				continue
 			}
diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go
index 66dcaf980..3e4a1d0ab 100644
--- a/vendor/golang.org/x/mod/modfile/rule.go
+++ b/vendor/golang.org/x/mod/modfile/rule.go
@@ -43,6 +43,7 @@ type File struct {
 	Exclude   []*Exclude
 	Replace   []*Replace
 	Retract   []*Retract
+	Tool      []*Tool
 
 	Syntax *FileSyntax
 }
@@ -93,6 +94,12 @@ type Retract struct {
 	Syntax    *Line
 }
 
+// A Tool is a single tool statement.
+type Tool struct {
+	Path   string
+	Syntax *Line
+}
+
 // A VersionInterval represents a range of versions with upper and lower bounds.
 // Intervals are closed: both bounds are included. When Low is equal to High,
 // the interval may refer to a single version ('v1.2.3') or an interval
@@ -297,7 +304,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse
 					})
 				}
 				continue
-			case "module", "godebug", "require", "exclude", "replace", "retract":
+			case "module", "godebug", "require", "exclude", "replace", "retract", "tool":
 				for _, l := range x.Line {
 					f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
 				}
@@ -509,6 +516,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
 			Syntax:          line,
 		}
 		f.Retract = append(f.Retract, retract)
+
+	case "tool":
+		if len(args) != 1 {
+			errorf("tool directive expects exactly one argument")
+			return
+		}
+		s, err := parseString(&args[0])
+		if err != nil {
+			errorf("invalid quoted string: %v", err)
+			return
+		}
+		f.Tool = append(f.Tool, &Tool{
+			Path:   s,
+			Syntax: line,
+		})
 	}
 }
 
@@ -1567,6 +1589,36 @@ func (f *File) DropRetract(vi VersionInterval) error {
 	return nil
 }
 
+// AddTool adds a new tool directive with the given path.
+// It does nothing if the tool line already exists.
+func (f *File) AddTool(path string) error {
+	for _, t := range f.Tool {
+		if t.Path == path {
+			return nil
+		}
+	}
+
+	f.Tool = append(f.Tool, &Tool{
+		Path:   path,
+		Syntax: f.Syntax.addLine(nil, "tool", path),
+	})
+
+	f.SortBlocks()
+	return nil
+}
+
+// RemoveTool removes a tool directive with the given path.
+// It does nothing if no such tool directive exists.
+func (f *File) DropTool(path string) error {
+	for _, t := range f.Tool {
+		if t.Path == path {
+			t.Syntax.markRemoved()
+			*t = Tool{}
+		}
+	}
+	return nil
+}
+
 func (f *File) SortBlocks() {
 	f.removeDups() // otherwise sorting is unsafe
 
@@ -1593,9 +1645,9 @@ func (f *File) SortBlocks() {
 	}
 }
 
-// removeDups removes duplicate exclude and replace directives.
+// removeDups removes duplicate exclude, replace and tool directives.
 //
-// Earlier exclude directives take priority.
+// Earlier exclude and tool directives take priority.
 //
 // Later replace directives take priority.
 //
@@ -1605,10 +1657,10 @@ func (f *File) SortBlocks() {
 // retract directives are not de-duplicated since comments are
 // meaningful, and versions may be retracted multiple times.
 func (f *File) removeDups() {
-	removeDups(f.Syntax, &f.Exclude, &f.Replace)
+	removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool)
 }
 
-func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
+func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) {
 	kill := make(map[*Line]bool)
 
 	// Remove duplicate excludes.
@@ -1649,6 +1701,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
 	}
 	*replace = repl
 
+	if tool != nil {
+		haveTool := make(map[string]bool)
+		for _, t := range *tool {
+			if haveTool[t.Path] {
+				kill[t.Syntax] = true
+				continue
+			}
+			haveTool[t.Path] = true
+		}
+		var newTool []*Tool
+		for _, t := range *tool {
+			if !kill[t.Syntax] {
+				newTool = append(newTool, t)
+			}
+		}
+		*tool = newTool
+	}
+
 	// Duplicate require and retract directives are not removed.
 
 	// Drop killed statements from the syntax tree.
diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go
index 8f54897cf..5387d0c26 100644
--- a/vendor/golang.org/x/mod/modfile/work.go
+++ b/vendor/golang.org/x/mod/modfile/work.go
@@ -331,5 +331,5 @@ func (f *WorkFile) SortBlocks() {
 // retract directives are not de-duplicated since comments are
 // meaningful, and versions may be retracted multiple times.
 func (f *WorkFile) removeDups() {
-	removeDups(f.Syntax, nil, &f.Replace)
+	removeDups(f.Syntax, nil, &f.Replace, nil)
 }
diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go
index cac1a899e..2a364b229 100644
--- a/vendor/golang.org/x/mod/module/module.go
+++ b/vendor/golang.org/x/mod/module/module.go
@@ -506,7 +506,6 @@ var badWindowsNames = []string{
 	"PRN",
 	"AUX",
 	"NUL",
-	"COM0",
 	"COM1",
 	"COM2",
 	"COM3",
@@ -516,7 +515,6 @@ var badWindowsNames = []string{
 	"COM7",
 	"COM8",
 	"COM9",
-	"LPT0",
 	"LPT1",
 	"LPT2",
 	"LPT3",
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
index 07df71e98..ed90060c3 100644
--- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -101,6 +101,22 @@ var severityName = []string{
 	fatalLog:   "FATAL",
 }
 
+// sprintf is fmt.Sprintf.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintf = fmt.Sprintf
+
+// sprint is fmt.Sprint.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprint = fmt.Sprint
+
+// sprintln is fmt.Sprintln.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintln = fmt.Sprintln
+
+// exit is os.Exit.
+// This var exists to make it possible to test functions calling os.Exit.
+var exit = os.Exit
+
 // loggerT is the default logger used by grpclog.
 type loggerT struct {
 	m          []*log.Logger
@@ -111,7 +127,7 @@ type loggerT struct {
 func (g *loggerT) output(severity int, s string) {
 	sevStr := severityName[severity]
 	if !g.jsonFormat {
-		g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+		g.m[severity].Output(2, sevStr+": "+s)
 		return
 	}
 	// TODO: we can also include the logging component, but that needs more
@@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) {
 	g.m[severity].Output(2, string(b))
 }
 
+func (g *loggerT) printf(severity int, format string, args ...any) {
+	// Note the discard check is duplicated in each print func, rather than in
+	// output, to avoid the expensive Sprint calls.
+	// De-duplicating this by moving to output would be a significant performance regression!
+	if lg := g.m[severity]; lg.Writer() == io.Discard {
+		return
+	}
+	g.output(severity, sprintf(format, args...))
+}
+
+func (g *loggerT) print(severity int, v ...any) {
+	if lg := g.m[severity]; lg.Writer() == io.Discard {
+		return
+	}
+	g.output(severity, sprint(v...))
+}
+
+func (g *loggerT) println(severity int, v ...any) {
+	if lg := g.m[severity]; lg.Writer() == io.Discard {
+		return
+	}
+	g.output(severity, sprintln(v...))
+}
+
 func (g *loggerT) Info(args ...any) {
-	g.output(infoLog, fmt.Sprint(args...))
+	g.print(infoLog, args...)
 }
 
 func (g *loggerT) Infoln(args ...any) {
-	g.output(infoLog, fmt.Sprintln(args...))
+	g.println(infoLog, args...)
 }
 
 func (g *loggerT) Infof(format string, args ...any) {
-	g.output(infoLog, fmt.Sprintf(format, args...))
+	g.printf(infoLog, format, args...)
 }
 
 func (g *loggerT) Warning(args ...any) {
-	g.output(warningLog, fmt.Sprint(args...))
+	g.print(warningLog, args...)
 }
 
 func (g *loggerT) Warningln(args ...any) {
-	g.output(warningLog, fmt.Sprintln(args...))
+	g.println(warningLog, args...)
 }
 
 func (g *loggerT) Warningf(format string, args ...any) {
-	g.output(warningLog, fmt.Sprintf(format, args...))
+	g.printf(warningLog, format, args...)
 }
 
 func (g *loggerT) Error(args ...any) {
-	g.output(errorLog, fmt.Sprint(args...))
+	g.print(errorLog, args...)
 }
 
 func (g *loggerT) Errorln(args ...any) {
-	g.output(errorLog, fmt.Sprintln(args...))
+	g.println(errorLog, args...)
 }
 
 func (g *loggerT) Errorf(format string, args ...any) {
-	g.output(errorLog, fmt.Sprintf(format, args...))
+	g.printf(errorLog, format, args...)
 }
 
 func (g *loggerT) Fatal(args ...any) {
-	g.output(fatalLog, fmt.Sprint(args...))
-	os.Exit(1)
+	g.print(fatalLog, args...)
+	exit(1)
 }
 
 func (g *loggerT) Fatalln(args ...any) {
-	g.output(fatalLog, fmt.Sprintln(args...))
-	os.Exit(1)
+	g.println(fatalLog, args...)
+	exit(1)
 }
 
 func (g *loggerT) Fatalf(format string, args ...any) {
-	g.output(fatalLog, fmt.Sprintf(format, args...))
-	os.Exit(1)
+	g.printf(fatalLog, format, args...)
+	exit(1)
 }
 
 func (g *loggerT) V(l int) bool {
@@ -186,19 +226,42 @@ type LoggerV2Config struct {
 	FormatJSON bool
 }
 
+// combineLoggers returns a combined logger for both higher & lower severity logs,
+// or only one if the other is io.Discard.
+//
+// This uses io.Discard instead of io.MultiWriter when all loggers
+// are set to io.Discard. Both this package and the standard log package have
+// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
+// this writing).
+func combineLoggers(lower, higher io.Writer) io.Writer {
+	if lower == io.Discard {
+		return higher
+	}
+	if higher == io.Discard {
+		return lower
+	}
+	return io.MultiWriter(lower, higher)
+}
+
 // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
 // The infoW, warningW, and errorW writers are used to write log messages of
 // different severity levels.
 func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
-	var m []*log.Logger
 	flag := log.LstdFlags
 	if c.FormatJSON {
 		flag = 0
 	}
-	m = append(m, log.New(infoW, "", flag))
-	m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
-	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
-	m = append(m, log.New(ew, "", flag))
-	m = append(m, log.New(ew, "", flag))
+
+	warningW = combineLoggers(infoW, warningW)
+	errorW = combineLoggers(errorW, warningW)
+
+	fatalW := errorW
+
+	m := []*log.Logger{
+		log.New(infoW, "", flag),
+		log.New(warningW, "", flag),
+		log.New(errorW, "", flag),
+		log.New(fatalW, "", flag),
+	}
 	return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
 }
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 7aae9240f..3afc18134 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -29,8 +29,6 @@ import (
 )
 
 var (
-	// WithHealthCheckFunc is set by dialoptions.go
-	WithHealthCheckFunc any // func (HealthChecker) DialOption
 	// HealthCheckFunc is used to provide client-side LB channel health checking
 	HealthCheckFunc HealthChecker
 	// BalancerUnregister is exported by package balancer to unregister a balancer.
@@ -149,6 +147,20 @@ var (
 	// other features, including the CSDS service.
 	NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
 
+	// NewXDSResolverWithClientForTesting creates a new xDS resolver builder
+	// using the provided xDS client instead of creating a new one using the
+	// bootstrap configuration specified by the supported environment variables.
+	// The resolver.Builder is meant to be used in conjunction with the
+	// grpc.WithResolvers DialOption. The resolver.Builder does not take
+	// ownership of the provided xDS client and it is the responsibility of the
+	// caller to close the client when no longer required.
+	//
+	// Testing Only
+	//
+	// This function should ONLY be used for testing and may not work with some
+	// other features, including the CSDS service.
+	NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
+
 	// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
 	// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
 	// variable.
@@ -191,6 +203,8 @@ var (
 	// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
 	ExitIdleModeForTesting any // func(*grpc.ClientConn) error
 
+	// ChannelzTurnOffForTesting disables the Channelz service for testing
+	// purposes.
 	ChannelzTurnOffForTesting func()
 
 	// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
@@ -205,10 +219,6 @@ var (
 	// default resolver scheme.
 	UserSetDefaultScheme = false
 
-	// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
-	// is the number of elements.  swap swaps the elements with indexes i and j.
-	ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
-
 	// ConnectedAddress returns the connected address for a SubConnState. The
 	// address is only valid if the state is READY.
 	ConnectedAddress any // func (scs SubConnState) resolver.Address
@@ -235,7 +245,7 @@ var (
 //
 // The implementation is expected to create a health checking RPC stream by
 // calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
 //
 // The health checking protocol is defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
@@ -257,3 +267,9 @@ const (
 // It currently has an experimental suffix which would be removed once
 // end-to-end testing of the policy is completed.
 const RLSLoadBalancingPolicyName = "rls_experimental"
+
+// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
+// embedding.
+type EnforceSubConnEmbedding interface {
+	enforceSubConnEmbedding()
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 757925381..1186f1e9a 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
 
 // Details returns a slice of details messages attached to the status.
 // If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
 func (s *Status) Details() []any {
 	if s == nil || s.s == nil {
 		return nil
@@ -160,7 +162,38 @@ func (s *Status) Details() []any {
 			details = append(details, err)
 			continue
 		}
-		details = append(details, detail)
+		// The call to MessageV1Of is required to unwrap the proto message if
+		// it implemented only the MessageV1 API. The proto message would have
+		// been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+		// added to a global registry used by any.UnmarshalNew().
+		// MessageV1Of has the following behaviour:
+		// 1. If the given message is a wrapped MessageV1, it returns the
+		//   unwrapped value.
+		// 2. If the given message already implements MessageV1, it returns it
+		//   as is.
+		// 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+		//
+		// Since the Status.WithDetails() API only accepts MessageV1, calling
+		// MessageV1Of ensures we return the same type that was given to
+		// WithDetails:
+		// * If the give type implemented only MessageV1, the unwrapping from
+		//   point 1 above will restore the type.
+		// * If the given type implemented both MessageV1 and MessageV2, point 2
+		//   above will ensure no wrapping is performed.
+		// * If the given type implemented only MessageV2 and was wrapped using
+		//   MessageV1Of before passing to WithDetails(), it would be unwrapped
+		//   in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+		//   that the type is wrapped in a MessageV1 wrapper again before
+		//   returning. Note that protoc-gen-go doesn't generate code which
+		//   implements ONLY MessageV2 at the time of writing.
+		//
+		// NOTE: Status details can also be added using the FromProto method.
+		// This could theoretically allow passing a Detail message that only
+		// implements the V2 API. In such a case the message will be wrapped in
+		// a MessageV1 wrapper when fetched using Details().
+		// Since protoc-gen-go generates only code that implements both V1 and
+		// V2 APIs for backward compatibility, this is not a concern.
+		details = append(details, protoadapt.MessageV1Of(detail))
 	}
 	return details
 }
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index bb2966e3b..8f9e592f8 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
 		panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
 	}
 
-	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
 }
 
 func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 29846df22..0e72d8537 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
 		}
 
 		v := m.Get(fd)
-		isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
-		isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
-		if isProto2Scalar || isSingularMessage {
+		if fd.HasPresence() {
 			if m.skipNull {
 				continue
 			}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 4b177c820..e9fe10394 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa
 		switch tok.Kind() {
 		case json.ObjectClose:
 			if !found {
-				return d.newError(tok.Pos(), `missing "value" field`)
+				// We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
+				// for compatibility with other proto runtimes that have interpreted the spec differently.
+				if m.Descriptor().FullName() != genid.Empty_message_fullname {
+					return d.newError(tok.Pos(), `missing "value" field`)
+				}
 			}
 			return nil
 
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c8..024ffebd3 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
 // dependency on the descriptor proto package).
 package descopts
 
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // These variables are set by the init function in descriptor.pb.go via logic
 // in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
 //
 // Each variable is populated with a nil pointer to the options struct.
 var (
-	File           pref.ProtoMessage
-	Enum           pref.ProtoMessage
-	EnumValue      pref.ProtoMessage
-	Message        pref.ProtoMessage
-	Field          pref.ProtoMessage
-	Oneof          pref.ProtoMessage
-	ExtensionRange pref.ProtoMessage
-	Service        pref.ProtoMessage
-	Method         pref.ProtoMessage
+	File           protoreflect.ProtoMessage
+	Enum           protoreflect.ProtoMessage
+	EnumValue      protoreflect.ProtoMessage
+	Message        protoreflect.ProtoMessage
+	Field          protoreflect.ProtoMessage
+	Oneof          protoreflect.ProtoMessage
+	ExtensionRange protoreflect.ProtoMessage
+	Service        protoreflect.ProtoMessage
+	Method         protoreflect.ProtoMessage
 )
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index ff6a38360add36f53d48bb0863b701696e0d7b2d..5a57ef6f3c80a4a930b7bdb33b039ea94d1eb5f2 100644
GIT binary patch
literal 138
zcmd;*muO*EV!mX@pe4$|D8MAaq`<7fXux#Ijt$6VkYMDJmv|0Wz$CyZ!KlClRKN&Q
wzyMY7f?Y`%s2WL*1th1%ddZFnY{E-+C6MVz3P75fB^b3pHY+@1*LcYe04AXnGXMYp

literal 93
zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j
Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H

diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
deleted file mode 100644
index fbcd34920..000000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-// +build !go1.13
-
-package errors
-
-import "reflect"
-
-// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
-func Is(err, target error) bool {
-	if target == nil {
-		return err == target
-	}
-
-	isComparable := reflect.TypeOf(target).Comparable()
-	for {
-		if isComparable && err == target {
-			return true
-		}
-		if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
-			return true
-		}
-		if err = unwrap(err); err == nil {
-			return false
-		}
-	}
-}
-
-func unwrap(err error) error {
-	u, ok := err.(interface {
-		Unwrap() error
-	})
-	if !ok {
-		return nil
-	}
-	return u.Unwrap()
-}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
deleted file mode 100644
index 5e72f1cde..000000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-// +build go1.13
-
-package errors
-
-import "errors"
-
-// Is is errors.Is.
-func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index df53ff40b..378b826fa 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -32,6 +32,7 @@ const (
 	EditionProto2      Edition = 998
 	EditionProto3      Edition = 999
 	Edition2023        Edition = 1000
+	Edition2024        Edition = 1001
 	EditionUnsupported Edition = 100000
 )
 
@@ -77,31 +78,48 @@ type (
 		Locations SourceLocations
 	}
 
+	// EditionFeatures is a frequently-instantiated struct, so please take care
+	// to minimize padding when adding new fields to this struct (add them in
+	// the right place/order).
 	EditionFeatures struct {
+		// StripEnumPrefix determines if the plugin generates enum value
+		// constants as-is, with their prefix stripped, or both variants.
+		StripEnumPrefix int
+
 		// IsFieldPresence is true if field_presence is EXPLICIT
 		// https://protobuf.dev/editions/features/#field_presence
 		IsFieldPresence bool
+
 		// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
 		// https://protobuf.dev/editions/features/#field_presence
 		IsLegacyRequired bool
+
 		// IsOpenEnum is true if enum_type is OPEN
 		// https://protobuf.dev/editions/features/#enum_type
 		IsOpenEnum bool
+
 		// IsPacked is true if repeated_field_encoding is PACKED
 		// https://protobuf.dev/editions/features/#repeated_field_encoding
 		IsPacked bool
+
 		// IsUTF8Validated is true if utf_validation is VERIFY
 		// https://protobuf.dev/editions/features/#utf8_validation
 		IsUTF8Validated bool
+
 		// IsDelimitedEncoded is true if message_encoding is DELIMITED
 		// https://protobuf.dev/editions/features/#message_encoding
 		IsDelimitedEncoded bool
+
 		// IsJSONCompliant is true if json_format is ALLOW
 		// https://protobuf.dev/editions/features/#json_format
 		IsJSONCompliant bool
+
 		// GenerateLegacyUnmarshalJSON determines if the plugin generates the
 		// UnmarshalJSON([]byte) error method for enums.
 		GenerateLegacyUnmarshalJSON bool
+		// APILevel controls which API (Open, Hybrid or Opaque) should be used
+		// for generated code (.pb.go files).
+		APILevel int
 	}
 )
 
@@ -258,6 +276,7 @@ type (
 		StringName       stringName
 		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
 		IsWeak           bool // promoted from google.protobuf.FieldOptions
+		IsLazy           bool // promoted from google.protobuf.FieldOptions
 		Default          defaultValue
 		ContainingOneof  protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
 		Enum             protoreflect.EnumDescriptor
@@ -351,6 +370,7 @@ func (fd *Field) IsPacked() bool {
 }
 func (fd *Field) IsExtension() bool { return false }
 func (fd *Field) IsWeak() bool      { return fd.L1.IsWeak }
+func (fd *Field) IsLazy() bool      { return fd.L1.IsLazy }
 func (fd *Field) IsList() bool      { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
 func (fd *Field) IsMap() bool       { return fd.Message() != nil && fd.Message().IsMapEntry() }
 func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -425,6 +445,7 @@ type (
 		Extendee        protoreflect.MessageDescriptor
 		Cardinality     protoreflect.Cardinality
 		Kind            protoreflect.Kind
+		IsLazy          bool
 		EditionFeatures EditionFeatures
 	}
 	ExtensionL2 struct {
@@ -465,6 +486,7 @@ func (xd *Extension) IsPacked() bool {
 }
 func (xd *Extension) IsExtension() bool                      { return true }
 func (xd *Extension) IsWeak() bool                           { return false }
+func (xd *Extension) IsLazy() bool                           { return xd.L1.IsLazy }
 func (xd *Extension) IsList() bool                           { return xd.Cardinality() == protoreflect.Repeated }
 func (xd *Extension) IsMap() bool                            { return false }
 func (xd *Extension) MapKey() protoreflect.FieldDescriptor   { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 8a57d60b0..d2f549497 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
 			switch num {
 			case genid.FieldOptions_Packed_field_number:
 				xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+			case genid.FieldOptions_Lazy_field_number:
+				xd.L1.IsLazy = protowire.DecodeBool(v)
 			}
 		case protowire.BytesType:
 			v, m := protowire.ConsumeBytes(b)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e56c91a8d..67a51b327 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
 				fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
 			case genid.FieldOptions_Weak_field_number:
 				fd.L1.IsWeak = protowire.DecodeBool(v)
+			case genid.FieldOptions_Lazy_field_number:
+				fd.L1.IsLazy = protowire.DecodeBool(v)
 			case FieldOptions_EnforceUTF8:
 				fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356b..10132c9b3 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeVarint(b)
 			b = b[m:]
 			parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
+		case genid.GoFeatures_ApiLevel_field_number:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			parent.APILevel = int(v)
+		case genid.GoFeatures_StripEnumPrefix_field_number:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			parent.StripEnumPrefix = int(v)
 		default:
 			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
 		}
@@ -68,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeBytes(b)
 			b = b[m:]
 			switch num {
-			case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+			case genid.FeatureSet_Go_ext_number:
 				parent = unmarshalGoFeature(v, parent)
 			}
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd0121..d9b9d916a 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
 // and the well-known types.
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b4..f5ee7f5c2 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,59 @@ import (
 
 const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
 
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
 const (
 	GoFeatures_message_name     protoreflect.Name     = "GoFeatures"
-	GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+	GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
 )
 
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
+	GoFeatures_ApiLevel_field_name                protoreflect.Name = "api_level"
+	GoFeatures_StripEnumPrefix_field_name         protoreflect.Name = "strip_enum_prefix"
 
-	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_ApiLevel_field_fullname                protoreflect.FullName = "pb.GoFeatures.api_level"
+	GoFeatures_StripEnumPrefix_field_fullname         protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
 )
 
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
+	GoFeatures_ApiLevel_field_number                protoreflect.FieldNumber = 2
+	GoFeatures_StripEnumPrefix_field_number         protoreflect.FieldNumber = 3
+)
+
+// Full and short names for pb.GoFeatures.APILevel.
+const (
+	GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
+	GoFeatures_APILevel_enum_name     = "APILevel"
+)
+
+// Enum values for pb.GoFeatures.APILevel.
+const (
+	GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
+	GoFeatures_API_OPEN_enum_value              = 1
+	GoFeatures_API_HYBRID_enum_value            = 2
+	GoFeatures_API_OPAQUE_enum_value            = 3
+)
+
+// Full and short names for pb.GoFeatures.StripEnumPrefix.
+const (
+	GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
+	GoFeatures_StripEnumPrefix_enum_name     = "StripEnumPrefix"
+)
+
+// Enum values for pb.GoFeatures.StripEnumPrefix.
+const (
+	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value   = 0
+	GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value          = 1
+	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
+	GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value         = 3
+)
+
+// Extension numbers
+const (
+	FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
 )
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02ff..bef5a25fb 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field names and numbers for synthetic map entry messages.
 const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go
new file mode 100644
index 000000000..224f33930
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/name.go
@@ -0,0 +1,12 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+const (
+	NoUnkeyedLiteral_goname  = "noUnkeyedLiteral"
+	NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
+
+	BuilderSuffix_goname = "_builder"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b85..9404270de 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field name and number for messages in wrappers.proto.
 const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
new file mode 100644
index 000000000..6075d6f69
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"strconv"
+	"sync/atomic"
+	"unsafe"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (Export) UnmarshalField(msg any, fieldNum int32) {
+	UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
+}
+
+// Present checks the presence set for a certain field number (zero
+// based, ordered by appearance in original proto file). part is
+// a pointer to the correct element in the bitmask array, num is the
+// field number unaltered.  Example (field number 70 -> part =
+// &m.XXX_presence[1], num = 70)
+func (Export) Present(part *uint32, num uint32) bool {
+	// This hook will read an unprotected shadow presence set if
+	// we're unning under the race detector
+	raceDetectHookPresent(part, num)
+	return atomic.LoadUint32(part)&(1<<(num%32)) > 0
+}
+
+// SetPresent adds a field to the presence set. part is a pointer to
+// the relevant element in the array and num is the field number
+// unaltered.  size is the number of fields in the protocol
+// buffer.
+func (Export) SetPresent(part *uint32, num uint32, size uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookSetPresent(part, num, presenceSize(size))
+	for {
+		old := atomic.LoadUint32(part)
+		if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
+			return
+		}
+	}
+}
+
+// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
+// It is meant for use by builder methods, where the message is known not
+// to be accessible yet by other goroutines.
+func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookSetPresent(part, num, presenceSize(size))
+	*part |= 1 << (num % 32)
+}
+
+// ClearPresence removes a field from the presence set. part is a
+// pointer to the relevant element in the presence array and num is
+// the field number unaltered.
+func (Export) ClearPresent(part *uint32, num uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookClearPresent(part, num)
+	for {
+		old := atomic.LoadUint32(part)
+		if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
+			return
+		}
+	}
+}
+
+// interfaceToPointer takes a pointer to an empty interface whose value is a
+// pointer type, and converts it into a "pointer" that points to the same
+// target
+func interfaceToPointer(i *any) pointer {
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+func (p pointer) atomicGetPointer() pointer {
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) atomicSetPointer(q pointer) {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
+}
+
+// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
+// pointer) and returns true if the pointed-to pointer is nil (using an
+// atomic load).  This function is inlineable and, on x86, just becomes a
+// simple load and compare.
+func (Export) AtomicCheckPointerIsNil(ptr any) bool {
+	return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
+}
+
+// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
+// second is a pointer) and atomically sets the second pointer into location
+// referenced by first pointer.  Unfortunately, atomicSetPointer() does not inline
+// (even on x86), so this does not become a simple store on x86.
+func (Export) AtomicSetPointer(dstPtr, valPtr any) {
+	interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
+}
+
+// AtomicLoadPointer loads the pointer at the location pointed at by src,
+// and stores that pointer value into the location pointed at by dst.
+func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
+	*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+}
+
+// AtomicInitializePointer makes ptr and dst point to the same value.
+//
+// If *ptr is a nil pointer, it sets *ptr = *dst.
+//
+// If *ptr is a non-nil pointer, it sets *dst = *ptr.
+func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
+	if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
+		*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+	}
+}
+
+// MessageFieldStringOf returns the field formatted as a string,
+// either as the field name if resolvable otherwise as a decimal string.
+func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
+	fd := md.Fields().ByNumber(n)
+	if fd != nil {
+		return string(fd.Name())
+	}
+	return strconv.Itoa(int(n))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
new file mode 100644
index 000000000..ea276547c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package impl
+
+// There is no additional data as we're not running under race detector.
+type RaceDetectHookData struct{}
+
+// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
+func (presence) raceDetectHookPresent(num uint32)                       {}
+func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
+func (presence) raceDetectHookClearPresent(num uint32)                  {}
+func (presence) raceDetectHookAllocAndCopy(src presence)                {}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
new file mode 100644
index 000000000..e9a27583a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
@@ -0,0 +1,126 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package impl
+
+// When running under race detector, we add a presence map of bytes, that we can access
+// in the hook functions so that we trigger the race detection whenever we have concurrent
+// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
+// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
+type RaceDetectHookData struct {
+	shadowPresence *[]byte
+}
+
+// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
+// using non-atomic operations.
+func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
+	sp := make([]byte, size)
+	atomicStoreShadowPresence(&data.shadowPresence, &sp)
+}
+
+func (p presence) raceDetectHookPresent(num uint32) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		_ = (*sp)[num]
+	}
+}
+
+func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp == nil {
+		data.raceDetectHookAlloc(size)
+		sp = atomicLoadShadowPresence(&data.shadowPresence)
+	}
+	(*sp)[num] = 1
+}
+
+func (p presence) raceDetectHookClearPresent(num uint32) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		(*sp)[num] = 0
+
+	}
+}
+
+// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
+// shadowPresence bytes from src to lazy.
+func (p presence) raceDetectHookAllocAndCopy(q presence) {
+	sData := q.toRaceDetectData()
+	dData := p.toRaceDetectData()
+	if sData == nil {
+		return
+	}
+	srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
+	if srcSp == nil {
+		atomicStoreShadowPresence(&dData.shadowPresence, nil)
+		return
+	}
+	n := len(*srcSp)
+	dSlice := make([]byte, n)
+	atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
+	for i := 0; i < n; i++ {
+		dSlice[i] = (*srcSp)[i]
+	}
+}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		_ = (*sp)[num]
+	}
+}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp == nil {
+		data.raceDetectHookAlloc(size)
+		sp = atomicLoadShadowPresence(&data.shadowPresence)
+	}
+	(*sp)[num] = 1
+}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		(*sp)[num] = 0
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index f29e6a8fa..fe2c719ce 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		}
 		return nil
 	}
+
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	if mi.extensionOffset.IsValid() {
 		e := p.Apply(mi.extensionOffset).Extensions()
 		if err := mi.isInitExtensions(e); err != nil {
@@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		if !f.isRequired && f.funcs.isInit == nil {
 			continue
 		}
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				if f.isRequired {
+					return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+				}
+				continue
+			}
+			if f.funcs.isInit != nil {
+				f.mi.init()
+				if f.mi.needsInitCheck {
+					if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
+						lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+						if !lazy.AllowedPartial() {
+							// Nothing to see here, it was checked on unmarshal
+							continue
+						}
+						mi.lazyUnmarshal(p, f.num)
+					}
+					if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
+						return err
+					}
+				}
+			}
+			continue
+		}
+
 		fptr := p.Apply(f.offset)
 		if f.isPointer && fptr.Elem().IsNil() {
 			if f.isRequired {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 4bb0a7a20..0d5b546e0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
 	xi         *extensionFieldInfo
 	value      protoreflect.Value
 	b          []byte
-	fn         func() protoreflect.Value
 }
 
 type ExtensionField struct {
@@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
 		}
 		f.lazy.value = val
 	} else {
-		f.lazy.value = f.lazy.fn()
+		panic("No support for lazy fns for ExtensionField")
 	}
 	f.lazy.xi = nil
-	f.lazy.fn = nil
 	f.lazy.b = nil
 	atomic.StoreUint32(&f.lazy.atomicOnce, 1)
 }
@@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
 	f.lazy = nil
 }
 
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
-	f.typ = t
-	f.lazy = &lazyExtensionValue{fn: fn}
-}
-
 // Value returns the value of the extension field.
 // This may be called concurrently.
 func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e44..7c1f66c8c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
 			if err != nil {
 				return out, err
 			}
+			if cf.funcs.isInit == nil {
+				out.initialized = true
+			}
 			vi.Set(vw)
 			return out, nil
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
new file mode 100644
index 000000000..76818ea25
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
@@ -0,0 +1,264 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+	mi := getMessageInfo(ft)
+	if mi == nil {
+		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
+	}
+	switch fd.Kind() {
+	case protoreflect.MessageKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueMessage,
+			marshal:   appendOpaqueMessage,
+			unmarshal: consumeOpaqueMessage,
+			isInit:    isInitOpaqueMessage,
+			merge:     mergeOpaqueMessage,
+		}
+	case protoreflect.GroupKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueGroup,
+			marshal:   appendOpaqueGroup,
+			unmarshal: consumeOpaqueGroup,
+			isInit:    isInitOpaqueMessage,
+			merge:     mergeOpaqueMessage,
+		}
+	}
+	panic("unexpected field kind")
+}
+
+func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
+}
+
+func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	mp := p.AtomicGetPointer()
+	calculatedSize := f.mi.sizePointer(mp, opts)
+	b = protowire.AppendVarint(b, f.wiretag)
+	b = protowire.AppendVarint(b, uint64(calculatedSize))
+	before := len(b)
+	b, err := f.mi.marshalAppendPointer(b, mp, opts)
+	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+	}
+	return b, err
+}
+
+func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.BytesType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeBytes(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+	if err != nil {
+		return out, err
+	}
+	out.n = n
+	out.initialized = o.initialized
+	return out, nil
+}
+
+func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		return nil
+	}
+	return f.mi.checkInitializedPointer(mp)
+}
+
+func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+	dstmp := dst.AtomicGetPointer()
+	if dstmp.IsNil() {
+		dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
+}
+
+func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
+}
+
+func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	b = protowire.AppendVarint(b, f.wiretag) // start group
+	b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
+	b = protowire.AppendVarint(b, f.wiretag+1) // end group
+	return b, err
+}
+
+func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.StartGroupType {
+		return out, errUnknown
+	}
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
+	return o, e
+}
+
+func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
+	}
+	mt := ft.Elem().Elem() // *[]*T -> *T
+	mi := getMessageInfo(mt)
+	if mi == nil {
+		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
+	}
+	switch fd.Kind() {
+	case protoreflect.MessageKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueMessageSlice,
+			marshal:   appendOpaqueMessageSlice,
+			unmarshal: consumeOpaqueMessageSlice,
+			isInit:    isInitOpaqueMessageSlice,
+			merge:     mergeOpaqueMessageSlice,
+		}
+	case protoreflect.GroupKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueGroupSlice,
+			marshal:   appendOpaqueGroupSlice,
+			unmarshal: consumeOpaqueGroupSlice,
+			isInit:    isInitOpaqueMessageSlice,
+			merge:     mergeOpaqueMessageSlice,
+		}
+	}
+	panic("unexpected field kind")
+}
+
+func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.AtomicGetPointer().PointerSlice()
+	n := 0
+	for _, v := range s {
+		n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+	}
+	return n
+}
+
+func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.AtomicGetPointer().PointerSlice()
+	var err error
+	for _, v := range s {
+		b = protowire.AppendVarint(b, f.wiretag)
+		siz := f.mi.sizePointer(v, opts)
+		b = protowire.AppendVarint(b, uint64(siz))
+		before := len(b)
+		b, err = f.mi.marshalAppendPointer(b, v, opts)
+		if err != nil {
+			return b, err
+		}
+		if measuredSize := len(b) - before; siz != measuredSize {
+			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+		}
+	}
+	return b, nil
+}
+
+func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.BytesType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeBytes(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+	if err != nil {
+		return out, err
+	}
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	sp.AppendPointerSlice(mp)
+	out.n = n
+	out.initialized = o.initialized
+	return out, nil
+}
+
+func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		return nil
+	}
+	s := sp.PointerSlice()
+	for _, v := range s {
+		if err := f.mi.checkInitializedPointer(v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+	ds := dst.AtomicGetPointer()
+	if ds.IsNil() {
+		ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	for _, sp := range src.AtomicGetPointer().PointerSlice() {
+		dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+		f.mi.mergePointer(dm, sp, opts)
+		ds.AppendPointerSlice(dm)
+	}
+}
+
+func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.AtomicGetPointer().PointerSlice()
+	n := 0
+	for _, v := range s {
+		n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+	}
+	return n
+}
+
+func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.AtomicGetPointer().PointerSlice()
+	var err error
+	for _, v := range s {
+		b = protowire.AppendVarint(b, f.wiretag) // start group
+		b, err = f.mi.marshalAppendPointer(b, v, opts)
+		if err != nil {
+			return b, err
+		}
+		b = protowire.AppendVarint(b, f.wiretag+1) // end group
+	}
+	return b, nil
+}
+
+func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.StartGroupType {
+		return out, errUnknown
+	}
+	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+	out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
+	if err != nil {
+		return out, err
+	}
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	sp.AppendPointerSlice(mp)
+	return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb73..2f7b363ec 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -32,6 +32,10 @@ type coderMessageInfo struct {
 	needsInitCheck     bool
 	isMessageSet       bool
 	numRequiredFields  uint8
+
+	lazyOffset     offset
+	presenceOffset offset
+	presenceSize   presenceSize
 }
 
 type coderFieldInfo struct {
@@ -45,12 +49,19 @@ type coderFieldInfo struct {
 	tagsize    int                      // size of the varint-encoded tag
 	isPointer  bool                     // true if IsNil may be called on the struct field
 	isRequired bool                     // true if field is required
+
+	isLazy        bool
+	presenceIndex uint32
 }
 
+const noPresence = 0xffffffff
+
 func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	mi.sizecacheOffset = invalidOffset
 	mi.unknownOffset = invalidOffset
 	mi.extensionOffset = invalidOffset
+	mi.lazyOffset = invalidOffset
+	mi.presenceOffset = si.presenceOffset
 
 	if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
 		mi.sizecacheOffset = si.sizecacheOffset
@@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 			validation: newFieldValidationInfo(mi, si, fd, ft),
 			isPointer:  fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
 			isRequired: fd.Cardinality() == protoreflect.Required,
+
+			presenceIndex: noPresence,
 		}
 		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
 		mi.coderFields[cf.num] = cf
@@ -189,6 +202,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	if mi.methods.Merge == nil {
 		mi.methods.Merge = mi.merge
 	}
+	if mi.methods.Equal == nil {
+		mi.methods.Equal = equal
+	}
 }
 
 // getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
new file mode 100644
index 000000000..88c16ae5b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -0,0 +1,156 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/order"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
+	mi.sizecacheOffset = si.sizecacheOffset
+	mi.unknownOffset = si.unknownOffset
+	mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+	mi.extensionOffset = si.extensionOffset
+	mi.lazyOffset = si.lazyOffset
+	mi.presenceOffset = si.presenceOffset
+
+	mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+	fields := mi.Desc.Fields()
+	for i := 0; i < fields.Len(); i++ {
+		fd := fields.Get(i)
+
+		fs := si.fieldsByNumber[fd.Number()]
+		if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
+			fs = si.oneofsByName[fd.ContainingOneof().Name()]
+		}
+		ft := fs.Type
+		var wiretag uint64
+		if !fd.IsPacked() {
+			wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+		} else {
+			wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+		}
+		var fieldOffset offset
+		var funcs pointerCoderFuncs
+		var childMessage *MessageInfo
+		switch {
+		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+			fieldOffset = offsetOf(fs, mi.Exporter)
+		case fd.IsWeak():
+			fieldOffset = si.weakOffset
+			funcs = makeWeakMessageFieldCoder(fd)
+		case fd.Message() != nil && !fd.IsMap():
+			fieldOffset = offsetOf(fs, mi.Exporter)
+			if fd.IsList() {
+				childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
+			} else {
+				childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
+			}
+		default:
+			fieldOffset = offsetOf(fs, mi.Exporter)
+			childMessage, funcs = fieldCoder(fd, ft)
+		}
+		cf := &coderFieldInfo{
+			num:        fd.Number(),
+			offset:     fieldOffset,
+			wiretag:    wiretag,
+			ft:         ft,
+			tagsize:    protowire.SizeVarint(wiretag),
+			funcs:      funcs,
+			mi:         childMessage,
+			validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
+			isPointer: (fd.Cardinality() == protoreflect.Repeated ||
+				fd.Kind() == protoreflect.MessageKind ||
+				fd.Kind() == protoreflect.GroupKind),
+			isRequired:    fd.Cardinality() == protoreflect.Required,
+			presenceIndex: noPresence,
+		}
+
+		// TODO: Use presence for all fields.
+		//
+		// In some cases, such as maps, presence means only "might be set" rather
+		// than "is definitely set", but every field should have a presence bit to
+		// permit us to skip over definitely-unset fields at marshal time.
+
+		var hasPresence bool
+		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+
+		if hasPresence {
+			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
+		}
+
+		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+		mi.coderFields[cf.num] = cf
+	}
+	for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+		if od := oneofs.Get(i); !od.IsSynthetic() {
+			mi.initOneofFieldCoders(od, si.structInfo)
+		}
+	}
+	if messageset.IsMessageSet(mi.Desc) {
+		if !mi.extensionOffset.IsValid() {
+			panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+		}
+		if !mi.unknownOffset.IsValid() {
+			panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+		}
+		mi.isMessageSet = true
+	}
+	sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+		return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+	})
+
+	var maxDense protoreflect.FieldNumber
+	for _, cf := range mi.orderedCoderFields {
+		if cf.num >= 16 && cf.num >= 2*maxDense {
+			break
+		}
+		maxDense = cf.num
+	}
+	mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+	for _, cf := range mi.orderedCoderFields {
+		if int(cf.num) > len(mi.denseCoderFields) {
+			break
+		}
+		mi.denseCoderFields[cf.num] = cf
+	}
+
+	// To preserve compatibility with historic wire output, marshal oneofs last.
+	if mi.Desc.Oneofs().Len() > 0 {
+		sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+			fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+			fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+			return order.LegacyFieldOrder(fi, fj)
+		})
+	}
+
+	mi.needsInitCheck = needsInitCheck(mi.Desc)
+	if mi.methods.Marshal == nil && mi.methods.Size == nil {
+		mi.methods.Flags |= piface.SupportMarshalDeterministic
+		mi.methods.Marshal = mi.marshal
+		mi.methods.Size = mi.size
+	}
+	if mi.methods.Unmarshal == nil {
+		mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+		mi.methods.Unmarshal = mi.unmarshal
+	}
+	if mi.methods.CheckInitialized == nil {
+		mi.methods.CheckInitialized = mi.checkInitialized
+	}
+	if mi.methods.Merge == nil {
+		mi.methods.Merge = mi.merge
+	}
+	if mi.methods.Equal == nil {
+		mi.methods.Equal = equal
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577bd..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
-	"reflect"
-
-	"google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
-	v := p.v.Elem().Int()
-	return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	v := p.v.Elem().Int()
-	b = protowire.AppendVarint(b, f.wiretag)
-	b = protowire.AppendVarint(b, uint64(v))
-	return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeVarint(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	p.v.Elem().SetInt(int64(v))
-	out.n = n
-	return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
-	size:      sizeEnum,
-	marshal:   appendEnum,
-	unmarshal: consumeEnum,
-	merge:     mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	if p.v.Elem().Int() == 0 {
-		return 0
-	}
-	return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	if p.v.Elem().Int() == 0 {
-		return b, nil
-	}
-	return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	if src.v.Elem().Int() != 0 {
-		dst.v.Elem().Set(src.v.Elem())
-	}
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
-	size:      sizeEnumNoZero,
-	marshal:   appendEnumNoZero,
-	unmarshal: consumeEnum,
-	merge:     mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	if p.v.Elem().IsNil() {
-		p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
-	}
-	return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	if !src.v.Elem().IsNil() {
-		v := reflect.New(dst.v.Type().Elem().Elem())
-		v.Elem().Set(src.v.Elem().Elem())
-		dst.v.Elem().Set(v)
-	}
-}
-
-var coderEnumPtr = pointerCoderFuncs{
-	size:      sizeEnumPtr,
-	marshal:   appendEnumPtr,
-	unmarshal: consumeEnumPtr,
-	merge:     mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.v.Elem()
-	for i, llen := 0, s.Len(); i < llen; i++ {
-		size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
-	}
-	return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.v.Elem()
-	for i, llen := 0, s.Len(); i < llen; i++ {
-		b = protowire.AppendVarint(b, f.wiretag)
-		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
-	}
-	return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	s := p.v.Elem()
-	if wtyp == protowire.BytesType {
-		b, n := protowire.ConsumeBytes(b)
-		if n < 0 {
-			return out, errDecode
-		}
-		for len(b) > 0 {
-			v, n := protowire.ConsumeVarint(b)
-			if n < 0 {
-				return out, errDecode
-			}
-			rv := reflect.New(s.Type().Elem()).Elem()
-			rv.SetInt(int64(v))
-			s.Set(reflect.Append(s, rv))
-			b = b[n:]
-		}
-		out.n = n
-		return out, nil
-	}
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeVarint(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	rv := reflect.New(s.Type().Elem()).Elem()
-	rv.SetInt(int64(v))
-	s.Set(reflect.Append(s, rv))
-	out.n = n
-	return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
-	size:      sizeEnumSlice,
-	marshal:   appendEnumSlice,
-	unmarshal: consumeEnumSlice,
-	merge:     mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.v.Elem()
-	llen := s.Len()
-	if llen == 0 {
-		return 0
-	}
-	n := 0
-	for i := 0; i < llen; i++ {
-		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
-	}
-	return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.v.Elem()
-	llen := s.Len()
-	if llen == 0 {
-		return b, nil
-	}
-	b = protowire.AppendVarint(b, f.wiretag)
-	n := 0
-	for i := 0; i < llen; i++ {
-		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
-	}
-	b = protowire.AppendVarint(b, uint64(n))
-	for i := 0; i < llen; i++ {
-		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
-	}
-	return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
-	size:      sizeEnumPackedSlice,
-	marshal:   appendEnumPackedSlice,
-	unmarshal: consumeEnumSlice,
-	merge:     mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e23..077712c2c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
 package impl
 
 // When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index e06ece55a..f72ddd882 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
 	return protoreflect.ValueOfString(v.Convert(stringType).String())
 }
 func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
-	// pref.Value.String never panics, so we go through an interface
+	// protoreflect.Value.String never panics, so we go through an interface
 	// conversion here to check the type.
 	s := v.Interface().(string)
 	if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index cda0520c2..e0dd21fa5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
 		AllowPartial:   true,
 		DiscardUnknown: o.DiscardUnknown(),
 		Resolver:       o.resolver,
+
+		NoLazyDecoding: o.NoLazyDecoding(),
 	}
 }
 
@@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
 	return o.flags&protoiface.UnmarshalDiscardUnknown != 0
 }
 
-func (o unmarshalOptions) IsDefault() bool {
-	return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
+func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
+func (o unmarshalOptions) Validated() bool   { return o.flags&protoiface.UnmarshalValidated != 0 }
+func (o unmarshalOptions) NoLazyDecoding() bool {
+	return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
+}
+
+func (o unmarshalOptions) CanBeLazy() bool {
+	if o.resolver != protoregistry.GlobalTypes {
+		return false
+	}
+	// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
+	return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
 }
 
 var lazyUnmarshalOptions = unmarshalOptions{
 	resolver: protoregistry.GlobalTypes,
-	depth:    protowire.DefaultRecursionLimit,
+
+	flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
+
+	depth: protowire.DefaultRecursionLimit,
 }
 
 type unmarshalOutput struct {
@@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 	if flags.ProtoLegacy && mi.isMessageSet {
 		return unmarshalMessageSet(mi, b, p, opts)
 	}
+
+	lazyDecoding := LazyEnabled() // default
+	if opts.NoLazyDecoding() {
+		lazyDecoding = false // explicitly disabled
+	}
+	if mi.lazyOffset.IsValid() && lazyDecoding {
+		return mi.unmarshalPointerLazy(b, p, groupTag, opts)
+	}
+	return mi.unmarshalPointerEager(b, p, groupTag, opts)
+}
+
+// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
+// The corresponding function for Lazy is in google_lazy.go.
+func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+
 	initialized := true
 	var requiredMask uint64
 	var exts *map[int32]ExtensionField
+
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	start := len(b)
 	for len(b) > 0 {
 		// Parse the tag (field number and wire type).
@@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 			if f.funcs.isInit != nil && !o.initialized {
 				initialized = false
 			}
+
+			if f.presenceIndex != noPresence {
+				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			}
+
 		default:
 			// Possible extension.
 			if exts == nil && mi.extensionOffset.IsValid() {
@@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
 		return out, errUnknown
 	}
 	if flags.LazyUnmarshalExtensions {
-		if opts.IsDefault() && x.canLazy(xt) {
+		if opts.CanBeLazy() && x.canLazy(xt) {
 			out, valid := skipExtension(b, xi, num, wtyp, opts)
 			switch valid {
 			case ValidationValid:
@@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
 		if n < 0 {
 			return out, ValidationUnknown
 		}
+
+		if opts.Validated() {
+			out.initialized = true
+			out.n = n
+			return out, ValidationValid
+		}
+
 		out, st := xi.validation.mi.validate(v, 0, opts)
 		out.n = n
 		return out, st
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index febd21224..b2e212291 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,8 @@ import (
 	"sync/atomic"
 
 	"google.golang.org/protobuf/internal/flags"
-	proto "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/internal/protolazy"
+	"google.golang.org/protobuf/proto"
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
 
@@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 		e := p.Apply(mi.extensionOffset).Extensions()
 		size += mi.sizeExtensions(e, opts)
 	}
+
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+		if mi.lazyOffset.IsValid() {
+			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+		}
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.size == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				continue
+			}
+
+			if f.isLazy && fptr.AtomicGetPointer().IsNil() {
+				if lazyFields(opts) {
+					size += (*lazy).SizeField(uint32(f.num))
+					continue
+				} else {
+					mi.lazyUnmarshal(p, f.num)
+				}
+			}
+			size += f.funcs.size(fptr, f, opts)
+			continue
+		}
+
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
 			return b, err
 		}
 	}
+
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+		if mi.lazyOffset.IsValid() {
+			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+		}
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.marshal == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				continue
+			}
+			if f.isLazy {
+				// Be careful, this field needs to be read atomically, like for a get
+				if f.isPointer && fptr.AtomicGetPointer().IsNil() {
+					if lazyFields(opts) {
+						b, _ = (*lazy).AppendField(b, uint32(f.num))
+						continue
+					} else {
+						mi.lazyUnmarshal(p, f.num)
+					}
+				}
+
+				b, err = f.funcs.marshal(b, fptr, f, opts)
+				if err != nil {
+					return b, err
+				}
+				continue
+			} else if f.isPointer && fptr.Elem().IsNil() {
+				continue
+			}
+			b, err = f.funcs.marshal(b, fptr, f, opts)
+			if err != nil {
+				return b, err
+			}
+			continue
+		}
+
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool {
 	return opts.flags&piface.MarshalDeterministic == 0
 }
 
+// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
+func lazyFields(opts marshalOptions) bool {
+	// When deterministic marshaling is requested, force an unmarshal for lazy
+	// fields to produce a deterministic result, instead of passing through
+	// bytes lazily that may or may not match what Go Protobuf would produce.
+	return opts.flags&piface.MarshalDeterministic == 0
+}
+
 func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
 	if ext == nil {
 		return 0
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 000000000..9f6c32a7d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"bytes"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+	return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+	if mx == nil || my == nil {
+		return mx == my
+	}
+	if mx.Descriptor() != my.Descriptor() {
+		return false
+	}
+
+	msx, ok := mx.(*messageState)
+	if !ok {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+	msy, ok := my.(*messageState)
+	if !ok {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+
+	mi := msx.messageInfo()
+	miy := msy.messageInfo()
+	if mi != miy {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+	mi.init()
+	// Compares regular fields
+	// Modified Message.Range code that compares two messages of the same type
+	// while going over the fields.
+	for _, ri := range mi.rangeInfos {
+		var fd protoreflect.FieldDescriptor
+		var vx, vy protoreflect.Value
+
+		switch ri := ri.(type) {
+		case *fieldInfo:
+			hx := ri.has(msx.pointer())
+			hy := ri.has(msy.pointer())
+			if hx != hy {
+				return false
+			}
+			if !hx {
+				continue
+			}
+			fd = ri.fieldDesc
+			vx = ri.get(msx.pointer())
+			vy = ri.get(msy.pointer())
+		case *oneofInfo:
+			fnx := ri.which(msx.pointer())
+			fny := ri.which(msy.pointer())
+			if fnx != fny {
+				return false
+			}
+			if fnx <= 0 {
+				continue
+			}
+			fi := mi.fields[fnx]
+			fd = fi.fieldDesc
+			vx = fi.get(msx.pointer())
+			vy = fi.get(msy.pointer())
+		}
+
+		if !equalValue(fd, vx, vy) {
+			return false
+		}
+	}
+
+	// Compare extensions.
+	// This is more complicated because mx or my could have empty/nil extension maps,
+	// however some populated extension map values are equal to nil extension maps.
+	emx := mi.extensionMap(msx.pointer())
+	emy := mi.extensionMap(msy.pointer())
+	if emx != nil {
+		for k, x := range *emx {
+			xd := x.Type().TypeDescriptor()
+			xv := x.Value()
+			var y ExtensionField
+			ok := false
+			if emy != nil {
+				y, ok = (*emy)[k]
+			}
+			// We need to treat empty lists as equal to nil values
+			if emy == nil || !ok {
+				if xd.IsList() && xv.List().Len() == 0 {
+					continue
+				}
+				return false
+			}
+
+			if !equalValue(xd, xv, y.Value()) {
+				return false
+			}
+		}
+	}
+	if emy != nil {
+		// emy may have extensions emx does not have, need to check them as well
+		for k, y := range *emy {
+			if emx != nil {
+				// emx has the field, so we already checked it
+				if _, ok := (*emx)[k]; ok {
+					continue
+				}
+			}
+			// Empty lists are equal to nil
+			if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+				continue
+			}
+
+			// Cant be equal if the extension is populated
+			return false
+		}
+	}
+
+	return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+	// slow path
+	if fd.Kind() != protoreflect.MessageKind {
+		return vx.Equal(vy)
+	}
+
+	// fast path special cases
+	if fd.IsMap() {
+		if fd.MapValue().Kind() == protoreflect.MessageKind {
+			return equalMessageMap(vx.Map(), vy.Map())
+		}
+		return vx.Equal(vy)
+	}
+
+	if fd.IsList() {
+		return equalMessageList(vx.List(), vy.List())
+	}
+
+	return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+	if mx.Len() != my.Len() {
+		return false
+	}
+	equal := true
+	mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+		if !my.Has(k) {
+			equal = false
+			return false
+		}
+		vy := my.Get(k)
+		equal = equalMessage(vx.Message(), vy.Message())
+		return equal
+	})
+	return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+	if lx.Len() != ly.Len() {
+		return false
+	}
+	for i := 0; i < lx.Len(); i++ {
+		// We only operate on messages here since equalImpl will not call us in any other case.
+		if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+			return false
+		}
+	}
+	return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+	if len(x) != len(y) {
+		return false
+	}
+	if bytes.Equal([]byte(x), []byte(y)) {
+		return true
+	}
+
+	mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+	my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+	for len(x) > 0 {
+		fnum, _, n := protowire.ConsumeField(x)
+		mx[fnum] = append(mx[fnum], x[:n]...)
+		x = x[n:]
+	}
+	for len(y) > 0 {
+		fnum, _, n := protowire.ConsumeField(y)
+		my[fnum] = append(my[fnum], y[:n]...)
+		y = y[n:]
+	}
+	if len(mx) != len(my) {
+		return false
+	}
+
+	for k, v1 := range mx {
+		if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
new file mode 100644
index 000000000..e8fb6c35b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -0,0 +1,433 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"math/bits"
+	"os"
+	"reflect"
+	"sort"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/protolazy"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	preg "google.golang.org/protobuf/reflect/protoregistry"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+var enableLazy int32 = func() int32 {
+	if os.Getenv("GOPROTODEBUG") == "nolazy" {
+		return 0
+	}
+	return 1
+}()
+
+// EnableLazyUnmarshal enables lazy unmarshaling.
+func EnableLazyUnmarshal(enable bool) {
+	if enable {
+		atomic.StoreInt32(&enableLazy, 1)
+		return
+	}
+	atomic.StoreInt32(&enableLazy, 0)
+}
+
+// LazyEnabled reports whether lazy unmarshalling is currently enabled.
+func LazyEnabled() bool {
+	return atomic.LoadInt32(&enableLazy) != 0
+}
+
+// UnmarshalField unmarshals a field in a message.
+func UnmarshalField(m interface{}, num protowire.Number) {
+	switch m := m.(type) {
+	case *messageState:
+		m.messageInfo().lazyUnmarshal(m.pointer(), num)
+	case *messageReflectWrapper:
+		m.messageInfo().lazyUnmarshal(m.pointer(), num)
+	default:
+		panic(fmt.Sprintf("unsupported wrapper type %T", m))
+	}
+}
+
+func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
+	var f *coderFieldInfo
+	if int(num) < len(mi.denseCoderFields) {
+		f = mi.denseCoderFields[num]
+	} else {
+		f = mi.coderFields[num]
+	}
+	if f == nil {
+		panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
+	}
+	lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
+	if !found && multipleEntries == nil {
+		panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
+	}
+	// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
+	// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
+	fp := pointerOfValue(reflect.New(f.ft))
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
+		}
+	} else {
+		mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
+	}
+	p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
+}
+
+func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
+	opts := lazyUnmarshalOptions
+	opts.flags |= flags
+	for len(b) > 0 {
+		// Parse the tag (field number and wire type).
+		var tag uint64
+		if b[0] < 0x80 {
+			tag = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			tag, n = protowire.ConsumeVarint(b)
+			if n < 0 {
+				return errors.New("invalid wire data")
+			}
+			b = b[n:]
+		}
+		var num protowire.Number
+		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+			return errors.New("invalid wire data")
+		} else {
+			num = protowire.Number(n)
+		}
+		wtyp := protowire.Type(tag & 7)
+		if num == f.num {
+			o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
+			if err == nil {
+				b = b[o.n:]
+				continue
+			}
+			if err != errUnknown {
+				return err
+			}
+		}
+		n := protowire.ConsumeFieldValue(num, wtyp, b)
+		if n < 0 {
+			return errors.New("invalid wire data")
+		}
+		b = b[n:]
+	}
+	return nil
+}
+
+func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+	fmi := f.validation.mi
+	if fmi == nil {
+		fd := mi.Desc.Fields().ByNumber(f.num)
+		if fd == nil || !fd.IsWeak() {
+			return out, ValidationUnknown
+		}
+		messageName := fd.Message().FullName()
+		messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+		if err != nil {
+			return out, ValidationUnknown
+		}
+		var ok bool
+		fmi, ok = messageType.(*MessageInfo)
+		if !ok {
+			return out, ValidationUnknown
+		}
+	}
+	fmi.init()
+	switch f.validation.typ {
+	case validationTypeMessage:
+		if wtyp != protowire.BytesType {
+			return out, ValidationWrongWireType
+		}
+		v, n := protowire.ConsumeBytes(b)
+		if n < 0 {
+			return out, ValidationInvalid
+		}
+		out, st := fmi.validate(v, 0, opts)
+		out.n = n
+		return out, st
+	case validationTypeGroup:
+		if wtyp != protowire.StartGroupType {
+			return out, ValidationWrongWireType
+		}
+		out, st := fmi.validate(b, f.num, opts)
+		return out, st
+	default:
+		return out, ValidationUnknown
+	}
+}
+
+// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
+// specifically handles lazy unmarshalling.  it expects lazyOffset and
+// presenceOffset to both be valid.
+func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	initialized := true
+	var requiredMask uint64
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	var lazyIndex []protolazy.IndexEntry
+	var lastNum protowire.Number
+	outOfOrder := false
+	lazyDecode := false
+	presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+	if !presence.AnyPresent(mi.presenceSize) {
+		if opts.CanBeLazy() {
+			// If the message contains existing data, we need to merge into it.
+			// Lazy unmarshaling doesn't merge, so only enable it when the
+			// message is empty (has no presence bitmap).
+			lazyDecode = true
+			if *lazy == nil {
+				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+			}
+			(*lazy).SetUnmarshalFlags(opts.flags)
+			if !opts.AliasBuffer() {
+				// Make a copy of the buffer for lazy unmarshaling.
+				// Set the AliasBuffer flag so recursive unmarshal
+				// operations reuse the copy.
+				b = append([]byte{}, b...)
+				opts.flags |= piface.UnmarshalAliasBuffer
+			}
+			(*lazy).SetBuffer(b)
+		}
+	}
+	// Track special handling of lazy fields.
+	//
+	// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
+	// In the event that validation for a field fails, this map tracks handling of the field.
+	type lazyAction uint8
+	const (
+		lazyValidateOnly   lazyAction = iota // validate the field only
+		lazyUnmarshalNow                     // eagerly unmarshal the field
+		lazyUnmarshalLater                   // unmarshal the field after the message is fully processed
+	)
+	var lazyFields map[*coderFieldInfo]lazyAction
+	var exts *map[int32]ExtensionField
+	start := len(b)
+	pos := 0
+	for len(b) > 0 {
+		// Parse the tag (field number and wire type).
+		var tag uint64
+		if b[0] < 0x80 {
+			tag = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			tag, n = protowire.ConsumeVarint(b)
+			if n < 0 {
+				return out, errDecode
+			}
+			b = b[n:]
+		}
+		var num protowire.Number
+		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+			return out, errors.New("invalid field number")
+		} else {
+			num = protowire.Number(n)
+		}
+		wtyp := protowire.Type(tag & 7)
+
+		if wtyp == protowire.EndGroupType {
+			if num != groupTag {
+				return out, errors.New("mismatching end group marker")
+			}
+			groupTag = 0
+			break
+		}
+
+		var f *coderFieldInfo
+		if int(num) < len(mi.denseCoderFields) {
+			f = mi.denseCoderFields[num]
+		} else {
+			f = mi.coderFields[num]
+		}
+		var n int
+		err := errUnknown
+		discardUnknown := false
+	Field:
+		switch {
+		case f != nil:
+			if f.funcs.unmarshal == nil {
+				break
+			}
+			if f.isLazy && lazyDecode {
+				switch {
+				case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
+					// Attempt to validate this field and leave it for later lazy unmarshaling.
+					o, valid := mi.skipField(b, f, wtyp, opts)
+					switch valid {
+					case ValidationValid:
+						// Skip over the valid field and continue.
+						err = nil
+						presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+						requiredMask |= f.validation.requiredBit
+						if !o.initialized {
+							initialized = false
+						}
+						n = o.n
+						break Field
+					case ValidationInvalid:
+						return out, errors.New("invalid proto wire format")
+					case ValidationWrongWireType:
+						break Field
+					case ValidationUnknown:
+						if lazyFields == nil {
+							lazyFields = make(map[*coderFieldInfo]lazyAction)
+						}
+						if presence.Present(f.presenceIndex) {
+							// We were unable to determine if the field is valid or not,
+							// and we've already skipped over at least one instance of this
+							// field. Clear the presence bit (so if we stop decoding early,
+							// we don't leave a partially-initialized field around) and flag
+							// the field for unmarshaling before we return.
+							presence.ClearPresent(f.presenceIndex)
+							lazyFields[f] = lazyUnmarshalLater
+							discardUnknown = true
+							break Field
+						} else {
+							// We were unable to determine if the field is valid or not,
+							// but this is the first time we've seen it. Flag it as needing
+							// eager unmarshaling and fall through to the eager unmarshal case below.
+							lazyFields[f] = lazyUnmarshalNow
+						}
+					}
+				case lazyFields[f] == lazyUnmarshalLater:
+					// This field will be unmarshaled in a separate pass below.
+					// Skip over it here.
+					discardUnknown = true
+					break Field
+				default:
+					// Eagerly unmarshal the field.
+				}
+			}
+			if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
+				if p.Apply(f.offset).AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(p, f.num)
+				}
+			}
+			var o unmarshalOutput
+			o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+			n = o.n
+			if err != nil {
+				break
+			}
+			requiredMask |= f.validation.requiredBit
+			if f.funcs.isInit != nil && !o.initialized {
+				initialized = false
+			}
+			if f.presenceIndex != noPresence {
+				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			}
+		default:
+			// Possible extension.
+			if exts == nil && mi.extensionOffset.IsValid() {
+				exts = p.Apply(mi.extensionOffset).Extensions()
+				if *exts == nil {
+					*exts = make(map[int32]ExtensionField)
+				}
+			}
+			if exts == nil {
+				break
+			}
+			var o unmarshalOutput
+			o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+			if err != nil {
+				break
+			}
+			n = o.n
+			if !o.initialized {
+				initialized = false
+			}
+		}
+		if err != nil {
+			if err != errUnknown {
+				return out, err
+			}
+			n = protowire.ConsumeFieldValue(num, wtyp, b)
+			if n < 0 {
+				return out, errDecode
+			}
+			if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+				u := mi.mutableUnknownBytes(p)
+				*u = protowire.AppendTag(*u, num, wtyp)
+				*u = append(*u, b[:n]...)
+			}
+		}
+		b = b[n:]
+		end := start - len(b)
+		if lazyDecode && f != nil && f.isLazy {
+			if num != lastNum {
+				lazyIndex = append(lazyIndex, protolazy.IndexEntry{
+					FieldNum: uint32(num),
+					Start:    uint32(pos),
+					End:      uint32(end),
+				})
+			} else {
+				i := len(lazyIndex) - 1
+				lazyIndex[i].End = uint32(end)
+				lazyIndex[i].MultipleContiguous = true
+			}
+		}
+		if num < lastNum {
+			outOfOrder = true
+		}
+		pos = end
+		lastNum = num
+	}
+	if groupTag != 0 {
+		return out, errors.New("missing end group marker")
+	}
+	if lazyFields != nil {
+		// Some fields failed validation, and now need to be unmarshaled.
+		for f, action := range lazyFields {
+			if action != lazyUnmarshalLater {
+				continue
+			}
+			initialized = false
+			if *lazy == nil {
+				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+			}
+			if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
+				return out, err
+			}
+			presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+		}
+	}
+	if lazyDecode {
+		if outOfOrder {
+			sort.Slice(lazyIndex, func(i, j int) bool {
+				return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
+					(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
+						lazyIndex[i].Start < lazyIndex[j].Start)
+			})
+		}
+		if *lazy == nil {
+			*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+		}
+
+		(*lazy).SetIndex(lazyIndex)
+	}
+	if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+		initialized = false
+	}
+	if initialized {
+		out.initialized = true
+	}
+	out.n = start - len(b)
+	return out, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee6..b6849d669 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
 func (x placeholderExtension) HasOptionalKeyword() bool                           { return false }
 func (x placeholderExtension) IsExtension() bool                                  { return true }
 func (x placeholderExtension) IsWeak() bool                                       { return false }
+func (x placeholderExtension) IsLazy() bool                                       { return false }
 func (x placeholderExtension) IsPacked() bool                                     { return false }
 func (x placeholderExtension) IsList() bool                                       { return false }
 func (x placeholderExtension) IsMap() bool                                        { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index 7e65f64f2..8ffdce67d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
 	if src.IsNil() {
 		return
 	}
+
+	var presenceSrc presence
+	var presenceDst presence
+	if mi.presenceOffset.IsValid() {
+		presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
+		presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.merge == nil {
 			continue
 		}
 		sfptr := src.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presenceSrc.Present(f.presenceIndex) {
+				continue
+			}
+			dfptr := dst.Apply(f.offset)
+			if f.isLazy {
+				if sfptr.AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(src, f.num)
+				}
+				if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(dst, f.num)
+				}
+			}
+			f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+			presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			continue
+		}
+
 		if f.isPointer && sfptr.Elem().IsNil() {
 			continue
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 019399d45..fa10a0f5c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -30,8 +30,8 @@ type MessageInfo struct {
 	// Desc is the underlying message descriptor type and must be populated.
 	Desc protoreflect.MessageDescriptor
 
-	// Exporter must be provided in a purego environment in order to provide
-	// access to unexported fields.
+	// Deprecated: Exporter will be removed the next time we bump
+	// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
 	Exporter exporter
 
 	// OneofWrappers is list of pointers to oneof wrapper struct types.
@@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() {
 	if mi.initDone == 1 {
 		return
 	}
+	if opaqueInitHook(mi) {
+		return
+	}
 
 	t := mi.GoReflectType
 	if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -133,6 +136,9 @@ type structInfo struct {
 	extensionOffset offset
 	extensionType   reflect.Type
 
+	lazyOffset     offset
+	presenceOffset offset
+
 	fieldsByNumber        map[protoreflect.FieldNumber]reflect.StructField
 	oneofsByName          map[protoreflect.Name]reflect.StructField
 	oneofWrappersByType   map[reflect.Type]protoreflect.FieldNumber
@@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
 		weakOffset:      invalidOffset,
 		unknownOffset:   invalidOffset,
 		extensionOffset: invalidOffset,
+		lazyOffset:      invalidOffset,
+		presenceOffset:  invalidOffset,
 
 		fieldsByNumber:        map[protoreflect.FieldNumber]reflect.StructField{},
 		oneofsByName:          map[protoreflect.Name]reflect.StructField{},
@@ -175,6 +183,10 @@ fieldLoop:
 				si.extensionOffset = offsetOf(f, mi.Exporter)
 				si.extensionType = f.Type
 			}
+		case "lazyFields", "XXX_lazyUnmarshalInfo":
+			si.lazyOffset = offsetOf(f, mi.Exporter)
+		case "XXX_presence":
+			si.presenceOffset = offsetOf(f, mi.Exporter)
 		default:
 			for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
 				if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
new file mode 100644
index 000000000..d7ec53f07
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -0,0 +1,632 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type opaqueStructInfo struct {
+	structInfo
+}
+
+// isOpaque determines whether a protobuf message type is on the Opaque API.  It
+// checks whether the type is a Go struct that protoc-gen-go would generate.
+//
+// This function only detects newly generated messages from the v2
+// implementation of protoc-gen-go. It is unable to classify generated messages
+// that are too old or those that are generated by a different generator
+// such as protoc-gen-gogo.
+func isOpaque(t reflect.Type) bool {
+	// The current detection mechanism is to simply check the first field
+	// for a struct tag with the "protogen" key.
+	if t.Kind() == reflect.Struct && t.NumField() > 0 {
+		pgt := t.Field(0).Tag.Get("protogen")
+		return strings.HasPrefix(pgt, "opaque.")
+	}
+	return false
+}
+
+func opaqueInitHook(mi *MessageInfo) bool {
+	mt := mi.GoReflectType.Elem()
+	si := opaqueStructInfo{
+		structInfo: mi.makeStructInfo(mt),
+	}
+
+	if !isOpaque(mt) {
+		return false
+	}
+
+	defer atomic.StoreUint32(&mi.initDone, 1)
+
+	mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
+	fds := mi.Desc.Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		fs := si.fieldsByNumber[fd.Number()]
+		var fi fieldInfo
+		usePresence, _ := usePresenceForField(si, fd)
+
+		switch {
+		case fd.IsWeak():
+			// Weak fields are no different for opaque.
+			fi = fieldInfoForWeakMessage(fd, si.weakOffset)
+		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+			// Oneofs are no different for opaque.
+			fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+		case fd.IsMap():
+			fi = mi.fieldInfoForMapOpaque(si, fd, fs)
+		case fd.IsList() && fd.Message() == nil && usePresence:
+			fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
+		case fd.IsList() && fd.Message() == nil:
+			// Proto3 lists without presence can use same access methods as open
+			fi = fieldInfoForList(fd, fs, mi.Exporter)
+		case fd.IsList() && usePresence:
+			fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
+		case fd.IsList():
+			// Proto3 opaque messages that does not need presence bitmap.
+			// Different representation than open struct, but same logic
+			fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
+		case fd.Message() != nil && usePresence:
+			fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
+		case fd.Message() != nil:
+			// Proto3 messages without presence can use same access methods as open
+			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+		default:
+			fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
+		}
+		mi.fields[fd.Number()] = &fi
+	}
+	mi.oneofs = map[protoreflect.Name]*oneofInfo{}
+	for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
+		od := mi.Desc.Oneofs().Get(i)
+		mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
+	}
+
+	mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+	for i := 0; i < fds.Len(); i++ {
+		if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+			mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+		}
+	}
+
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
+			mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+			i += od.Fields().Len()
+		} else {
+			mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+			i++
+		}
+	}
+
+	mi.makeExtensionFieldsFunc(mt, si.structInfo)
+	mi.makeUnknownFieldsFunc(mt, si.structInfo)
+	mi.makeOpaqueCoderMethods(mt, si)
+	mi.makeFieldTypes(si.structInfo)
+
+	return true
+}
+
+func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+	oi := &oneofInfo{oneofDesc: od}
+	if od.IsSynthetic() {
+		fd := od.Fields().Get(0)
+		index, _ := presenceIndex(mi.Desc, fd)
+		oi.which = func(p pointer) protoreflect.FieldNumber {
+			if p.IsNil() {
+				return 0
+			}
+			if !mi.present(p, index) {
+				return 0
+			}
+			return od.Fields().Get(0).Number()
+		}
+		return oi
+	}
+	// Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
+	return makeOneofInfo(od, si, x)
+}
+
+func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Map {
+		panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
+	}
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	conv := NewConverter(ft, fd)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			// Don't bother checking presence bits, since we need to
+			// look at the map length even if the presence bit is set.
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return rv.Len() > 0
+		},
+		clear: func(p pointer) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			pv := conv.GoValueOf(v)
+			if pv.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(pv)
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if v.IsNil() {
+				v.Set(reflect.MakeMap(fs.Type))
+			}
+			return conv.PBValueOf(v)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(reflect.PtrTo(ft), fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return rv.Len() > 0
+		},
+		clear: func(p pointer) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			pv := conv.GoValueOf(v)
+			if pv.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			}
+			mi.setPresent(p, index)
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(pv.Elem())
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			mi.setPresent(p, index)
+			return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	fieldNumber := fd.Number()
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			if !mi.present(p, index) {
+				return false
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				sp = p.Apply(fieldOffset).AtomicGetPointer()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return rv.Elem().Len() > 0
+		},
+		clear: func(p pointer) {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+				mi.setPresent(p, index)
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			if !mi.present(p, index) {
+				return conv.Zero()
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				sp = p.Apply(fieldOffset).AtomicGetPointer()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+				mi.setPresent(p, index)
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			val := conv.GoValueOf(v)
+			if val.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			} else {
+				rv.Elem().Set(val.Elem())
+			}
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				if mi.present(p, index) {
+					// Lazily unmarshal this field.
+					mi.lazyUnmarshal(p, fieldNumber)
+					sp = p.Apply(fieldOffset).AtomicGetPointer()
+				} else {
+					sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+					mi.setPresent(p, index)
+				}
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return conv.PBValueOf(rv)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				return false
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return rv.Elem().Len() > 0
+		},
+		clear: func(p pointer) {
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if !sp.IsNil() {
+				rv := sp.AsValueOf(fs.Type.Elem())
+				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+			}
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				return conv.Zero()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
+				rv.Set(reflect.New(fs.Type.Elem()))
+			}
+			val := conv.GoValueOf(v)
+			if val.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			} else {
+				rv.Elem().Set(val.Elem())
+			}
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
+				rv.Set(reflect.New(fs.Type.Elem()))
+			}
+			return conv.PBValueOf(rv)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	nullable := fd.HasPresence()
+	if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
+		nullable = true
+	}
+	deref := false
+	if nullable && ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+		deref = true
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	var getter func(p pointer) protoreflect.Value
+	if !nullable {
+		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+	} else {
+		getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
+	}
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			if nullable {
+				return mi.present(p, index)
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			switch rv.Kind() {
+			case reflect.Bool:
+				return rv.Bool()
+			case reflect.Int32, reflect.Int64:
+				return rv.Int() != 0
+			case reflect.Uint32, reflect.Uint64:
+				return rv.Uint() != 0
+			case reflect.Float32, reflect.Float64:
+				return rv.Float() != 0 || math.Signbit(rv.Float())
+			case reflect.String, reflect.Slice:
+				return rv.Len() > 0
+			default:
+				panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
+			}
+		},
+		clear: func(p pointer) {
+			if nullable {
+				mi.clearPresent(p, index)
+			}
+			// This is only valuable for bytes and strings, but we do it unconditionally.
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: getter,
+		// TODO: Implement unsafe fast path for set?
+		set: func(p pointer, v protoreflect.Value) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if deref {
+				if rv.IsNil() {
+					rv.Set(reflect.New(ft))
+				}
+				rv = rv.Elem()
+			}
+
+			rv.Set(conv.GoValueOf(v))
+			if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
+				rv.Set(emptyBytes)
+			}
+			if nullable {
+				mi.setPresent(p, index)
+			}
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	fieldNumber := fd.Number()
+	elemType := fs.Type.Elem()
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			return mi.present(p, index)
+		},
+		clear: func(p pointer) {
+			mi.clearPresent(p, index)
+			p.Apply(fieldOffset).AtomicSetNilPointer()
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			fp := p.Apply(fieldOffset)
+			mp := fp.AtomicGetPointer()
+			if mp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				mp = fp.AtomicGetPointer()
+			}
+			rv := mp.AsValueOf(elemType)
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			val := pointerOfValue(conv.GoValueOf(v))
+			if val.IsNil() {
+				panic("invalid nil pointer")
+			}
+			p.Apply(fieldOffset).AtomicSetPointer(val)
+			mi.setPresent(p, index)
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			fp := p.Apply(fieldOffset)
+			mp := fp.AtomicGetPointer()
+			if mp.IsNil() {
+				if mi.present(p, index) {
+					// Lazily unmarshal this field.
+					mi.lazyUnmarshal(p, fieldNumber)
+					mp = fp.AtomicGetPointer()
+				} else {
+					mp = pointerOfValue(conv.GoValueOf(conv.New()))
+					fp.AtomicSetPointer(mp)
+					mi.setPresent(p, index)
+				}
+			}
+			return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
+		},
+		newMessage: func() protoreflect.Message {
+			return conv.New().Message()
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+// A presenceList wraps a List, updating presence bits as necessary when the
+// list contents change.
+type presenceList struct {
+	pvalueList
+	setPresence func(bool)
+}
+type pvalueList interface {
+	protoreflect.List
+	//Unwrapper
+}
+
+func (list presenceList) Append(v protoreflect.Value) {
+	list.pvalueList.Append(v)
+	list.setPresence(true)
+}
+func (list presenceList) Truncate(i int) {
+	list.pvalueList.Truncate(i)
+	list.setPresence(i > 0)
+}
+
+// presenceIndex returns the index to pass to presence functions.
+//
+// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
+func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
+	found := false
+	var index, numIndices uint32
+	for i := 0; i < md.Fields().Len(); i++ {
+		f := md.Fields().Get(i)
+		if f == fd {
+			found = true
+			index = numIndices
+		}
+		if f.ContainingOneof() == nil || isLastOneofField(f) {
+			numIndices++
+		}
+	}
+	if !found {
+		panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
+	}
+	return index, presenceSize(numIndices)
+}
+
+func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
+	fields := fd.ContainingOneof().Fields()
+	return fields.Get(fields.Len()-1) == fd
+}
+
+func (mi *MessageInfo) setPresent(p pointer, index uint32) {
+	p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
+}
+
+func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
+	p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
+}
+
+func (mi *MessageInfo) present(p pointer, index uint32) bool {
+	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
+}
+
+// usePresenceForField implements the somewhat intricate logic of when
+// the presence bitmap is used for a field.  The main logic is that a
+// field that is optional or that can be lazy will use the presence
+// bit, but for proto2, also maps have a presence bit. It also records
+// if the field can ever be lazy, which is true if we have a
+// lazyOffset and the field is a message or a slice of messages. A
+// field that is lazy will always need a presence bit.  Oneofs are not
+// lazy and do not use presence, unless they are a synthetic oneof,
+// which is a proto3 optional field. For proto3 optionals, we use the
+// presence and they can also be lazy when applicable (a message).
+func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
+
+	// Non-oneof scalar fields with explicit field presence use the presence array.
+	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		return false, false
+	case fd.IsWeak():
+		return false, false
+	case fd.IsMap():
+		return false, false
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		return hasLazyField, hasLazyField
+	default:
+		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
new file mode 100644
index 000000000..a69825699
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+	}
+	if fd.Kind() == protoreflect.EnumKind {
+		// Enums for nullable opaque types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return conv.PBValueOf(rv)
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bool()
+			return protoreflect.ValueOfBool(*x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32()
+			return protoreflect.ValueOfInt32(*x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32()
+			return protoreflect.ValueOfUint32(*x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64()
+			return protoreflect.ValueOfInt64(*x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64()
+			return protoreflect.ValueOfUint64(*x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32()
+			return protoreflect.ValueOfFloat32(*x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64()
+			return protoreflect.ValueOfFloat64(*x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() || !mi.present(p, index) {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).StringPtr()
+				if *x == nil {
+					return conv.Zero()
+				}
+				if len(**x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(**x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).StringPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfString(**x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() || !mi.present(p, index) {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index ecb4623d7..31c19b54f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -205,6 +205,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
 		case fd.IsList():
 			if fd.Enum() != nil || fd.Message() != nil {
 				ft = fs.Type.Elem()
+
+				if ft.Kind() == reflect.Slice {
+					ft = ft.Elem()
+				}
+
 			}
 			isMessage = fd.Message() != nil
 		case fd.Enum() != nil:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 986322b19..a74064620 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 	ft := fs.Type
 	nullable := fd.HasPresence()
 	isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+	var getter func(p pointer) protoreflect.Value
 	if nullable {
 		if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
 			// This never occurs for generated message types.
@@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 		}
 	}
 	conv := NewConverter(ft, fd)
-
-	// TODO: Implement unsafe fast path?
 	fieldOffset := offsetOf(fs, x)
+
+	// Generate specialized getter functions to avoid going through reflect.Value
+	if nullable {
+		getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
+	} else {
+		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+	}
+
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
 			if p.IsNil() {
 				return false
 			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable {
-				return !rv.IsNil()
+				return !p.Apply(fieldOffset).Elem().IsNil()
 			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			switch rv.Kind() {
 			case reflect.Bool:
 				return rv.Bool()
@@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			rv.Set(reflect.Zero(rv.Type()))
 		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if nullable {
-				if rv.IsNil() {
-					return conv.Zero()
-				}
-				if rv.Kind() == reflect.Ptr {
-					rv = rv.Elem()
-				}
-			}
-			return conv.PBValueOf(rv)
-		},
+		get: getter,
+		// TODO: Implement unsafe fast path for set?
 		set: func(p pointer, v protoreflect.Value) {
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable && rv.Kind() == reflect.Ptr {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
new file mode 100644
index 000000000..af5e063a1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
@@ -0,0 +1,273 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+	}
+	if fd.Kind() == protoreflect.EnumKind {
+		elemType := fs.Type.Elem()
+		// Enums for nullable types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
+			if rv.IsNil() {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv.Elem())
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).BoolPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfBool(**x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfInt32(**x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfUint32(**x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfInt64(**x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfUint64(**x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfFloat32(**x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfFloat64(**x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).StringPtr()
+				if *x == nil {
+					return conv.Zero()
+				}
+				if len(**x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(**x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).StringPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfString(**x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				if len(*x) == 0 {
+					return conv.Zero()
+				}
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
+
+func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if fd.Kind() == protoreflect.EnumKind {
+		// Enums for non nullable types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return conv.PBValueOf(rv)
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bool()
+			return protoreflect.ValueOfBool(*x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32()
+			return protoreflect.ValueOfInt32(*x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32()
+			return protoreflect.ValueOfUint32(*x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64()
+			return protoreflect.ValueOfInt64(*x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64()
+			return protoreflect.ValueOfUint64(*x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32()
+			return protoreflect.ValueOfFloat32(*x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64()
+			return protoreflect.ValueOfFloat64(*x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).String()
+				if len(*x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).String()
+			return protoreflect.ValueOfString(*x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index da685e8a2..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
-	"fmt"
-	"reflect"
-	"sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer any
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
-	index  int
-	export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
-	if len(f.Index) != 1 {
-		panic("embedded structs are not supported")
-	}
-	if f.PkgPath == "" {
-		return offset{index: f.Index[0]} // field is already exported
-	}
-	if x == nil {
-		panic("exporter must be provided for unexported field")
-	}
-	return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
-	return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
-	return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v any) pointer {
-	return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
-	return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
-	if f.export != nil {
-		if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
-			return pointer{v: v}
-		}
-	}
-	return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
-	if got := p.v.Type().Elem(); got != t {
-		panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
-	}
-	return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) any {
-	return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool              { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool          { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool       { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32            { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32        { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32     { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64            { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64        { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64     { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32          { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32      { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32   { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64          { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64      { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64   { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32        { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32    { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64        { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64    { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string          { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string      { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string   { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte           { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte       { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte    { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields  { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
-	return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
-	return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
-	// TODO: reconsider this
-	if p.v.IsNil() {
-		return nil
-	}
-	n := p.v.Elem().Len()
-	s := make([]pointer, n)
-	for i := 0; i < n; i++ {
-		s[i] = pointer{v: p.v.Elem().Index(i)}
-	}
-	return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
-	sp := p.v.Elem()
-	sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
-	p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
-	// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
-	in := p.v.Elem()
-	out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
-	reflect.Copy(out, in)
-	p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState     { panic("not supported") }
-func (ms *messageState) pointer() pointer                 { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo        { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo    { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
-	once sync.Once
-	m    messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
-	m.once.Do(func() {
-		m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
-		m.m.mi = mi
-	})
-	return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 5f20ca5d8..041ebde2d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,15 +2,14 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
 package impl
 
 import (
 	"reflect"
 	"sync/atomic"
 	"unsafe"
+
+	"google.golang.org/protobuf/internal/protolazy"
 )
 
 const UnsafeEnabled = true
@@ -114,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte                    { return (**[]byte)(p.p)
 func (p pointer) BytesSlice() *[][]byte                 { return (*[][]byte)(p.p) }
 func (p pointer) WeakFields() *weakFields               { return (*weakFields)(p.p) }
 func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
+	return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
+}
+
+func (p pointer) PresenceInfo() presence {
+	return presence{P: p.p}
+}
 
 func (p pointer) Elem() pointer {
 	return pointer{p: *(*unsafe.Pointer)(p.p)}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
new file mode 100644
index 000000000..38aa7b7dc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+func (p pointer) AtomicGetPointer() pointer {
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) AtomicSetPointer(v pointer) {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
+}
+
+func (p pointer) AtomicSetNilPointer() {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
+}
+
+func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
+	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
+		return v
+	}
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+type atomicV1MessageInfo struct{ p Pointer }
+
+func (mi *atomicV1MessageInfo) Get() Pointer {
+	return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
+}
+
+func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
+	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
+		return p
+	}
+	return mi.Get()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
new file mode 100644
index 000000000..914cb1ded
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -0,0 +1,142 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// presenceSize represents the size of a presence set, which should be the largest index of the set+1
+type presenceSize uint32
+
+// presence is the internal representation of the bitmap array in a generated protobuf
+type presence struct {
+	// This is a pointer to the beginning of an array of uint32
+	P unsafe.Pointer
+}
+
+func (p presence) toElem(num uint32) (ret *uint32) {
+	const (
+		bitsPerByte = 8
+		siz         = unsafe.Sizeof(*ret)
+	)
+	// p.P points to an array of uint32, num is the bit in this array that the
+	// caller wants to check/manipulate. Calculate the index in the array that
+	// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
+	offset := uintptr(num) / (siz * bitsPerByte) * siz
+	return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
+}
+
+// Present checks for the presence of a specific field number in a presence set.
+func (p presence) Present(num uint32) bool {
+	if p.P == nil {
+		return false
+	}
+	return Export{}.Present(p.toElem(num), num)
+}
+
+// SetPresent adds presence for a specific field number in a presence set.
+func (p presence) SetPresent(num uint32, size presenceSize) {
+	Export{}.SetPresent(p.toElem(num), num, uint32(size))
+}
+
+// SetPresentUnatomic adds presence for a specific field number in a presence set without using
+// atomic operations. Only to be called during unmarshaling.
+func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
+	Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
+}
+
+// ClearPresent removes presence for a specific field number in a presence set.
+func (p presence) ClearPresent(num uint32) {
+	Export{}.ClearPresent(p.toElem(num), num)
+}
+
+// LoadPresenceCache (together with PresentInCache) allows for a
+// cached version of checking for presence without re-reading the word
+// for every field. It is optimized for efficiency and assumes no
+// simltaneous mutation of the presence set (or at least does not have
+// a problem with simultaneous mutation giving inconsistent results).
+func (p presence) LoadPresenceCache() (current uint32) {
+	if p.P == nil {
+		return 0
+	}
+	return atomic.LoadUint32((*uint32)(p.P))
+}
+
+// PresentInCache reads presence from a cached word in the presence
+// bitmap. It caches up a new word if the bit is outside the
+// word. This is for really fast iteration through bitmaps in cases
+// where we either know that the bitmap will not be altered, or we
+// don't care about inconsistencies caused by simultaneous writes.
+func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
+	if num/32 != *cachedElement {
+		o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
+		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+		*current = atomic.LoadUint32(q)
+		*cachedElement = num / 32
+	}
+	return (*current & (1 << (num % 32))) > 0
+}
+
+// AnyPresent checks if any field is marked as present in the bitmap.
+func (p presence) AnyPresent(size presenceSize) bool {
+	n := uintptr((size + 31) / 32)
+	for j := uintptr(0); j < n; j++ {
+		o := j * unsafe.Sizeof(uint32(0))
+		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+		b := atomic.LoadUint32(q)
+		if b > 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// toRaceDetectData finds the preceding RaceDetectHookData in a
+// message by using pointer arithmetic. As the type of the presence
+// set (bitmap) varies with the number of fields in the protobuf, we
+// can not have a struct type containing the array and the
+// RaceDetectHookData.  instead the RaceDetectHookData is placed
+// immediately before the bitmap array, and we find it by walking
+// backwards in the struct.
+//
+// This method is only called from the race-detect version of the code,
+// so RaceDetectHookData is never an empty struct.
+func (p presence) toRaceDetectData() *RaceDetectHookData {
+	var template struct {
+		d RaceDetectHookData
+		a [1]uint32
+	}
+	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
+	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
+}
+
+func atomicLoadShadowPresence(p **[]byte) *[]byte {
+	return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
+	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
+}
+
+// findPointerToRaceDetectData finds the preceding RaceDetectHookData
+// in a message by using pointer arithmetic. For the methods called
+// directy from generated code, we don't have a pointer to the
+// beginning of the presence set, but a pointer inside the array. As
+// we know the index of the bit we're manipulating (num), we can
+// calculate which element of the array ptr is pointing to. With that
+// information we find the preceding RaceDetectHookData and can
+// manipulate the shadow bitmap.
+//
+// This method is only called from the race-detect version of the
+// code, so RaceDetectHookData is never an empty struct.
+func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
+	var template struct {
+		d RaceDetectHookData
+		a [1]uint32
+	}
+	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
+	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index a24e6bbd7..b534a3d6d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -37,6 +37,10 @@ const (
 
 	// ValidationValid indicates that unmarshaling the message will succeed.
 	ValidationValid
+
+	// ValidationWrongWireType indicates that a validated field does not have
+	// the expected wire type.
+	ValidationWrongWireType
 )
 
 func (v ValidationStatus) String() string {
@@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
 		switch fd.Kind() {
 		case protoreflect.MessageKind:
 			vi.typ = validationTypeMessage
+
+			if ft.Kind() == reflect.Ptr {
+				// Repeated opaque message fields are *[]*T.
+				ft = ft.Elem()
+			}
+
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
 		case protoreflect.GroupKind:
 			vi.typ = validationTypeGroup
+
+			if ft.Kind() == reflect.Ptr {
+				// Repeated opaque message fields are *[]*T.
+				ft = ft.Elem()
+			}
+
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
new file mode 100644
index 000000000..82e5cab4a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
@@ -0,0 +1,364 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper code for parsing a protocol buffer
+
+package protolazy
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"google.golang.org/protobuf/encoding/protowire"
+)
+
+// BufferReader is a structure encapsulating a protobuf and a current position
+type BufferReader struct {
+	Buf []byte
+	Pos int
+}
+
+// NewBufferReader creates a new BufferRead from a protobuf
+func NewBufferReader(buf []byte) BufferReader {
+	return BufferReader{Buf: buf, Pos: 0}
+}
+
+var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
+var errOverflow = errors.New("proto: integer overflow")
+
+func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
+	i := b.Pos
+	l := len(b.Buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		v := b.Buf[i]
+		i++
+		x |= (uint64(v) & 0x7F) << shift
+		if v < 0x80 {
+			b.Pos = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// decodeVarint decodes a varint at the current position
+func (b *BufferReader) DecodeVarint() (x uint64, err error) {
+	i := b.Pos
+	buf := b.Buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		b.Pos++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return b.DecodeVarintSlow()
+	}
+
+	var v uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) & 127
+	i++
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 7
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 14
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 21
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 28
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 35
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 42
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 49
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 56
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 63
+	if v < 128 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	b.Pos = i
+	return
+}
+
+// decodeVarint32 decodes a varint32 at the current position
+func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
+	i := b.Pos
+	buf := b.Buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		b.Pos++
+		return uint32(buf[i]), nil
+	} else if len(buf)-i < 5 {
+		v, err := b.DecodeVarintSlow()
+		return uint32(v), err
+	}
+
+	var v uint32
+	// we already checked the first byte
+	x = uint32(buf[i]) & 127
+	i++
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 7
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 14
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 21
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 28
+	if v < 128 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	b.Pos = i
+	return
+}
+
+// skipValue skips a value in the protobuf, based on the specified tag
+func (b *BufferReader) SkipValue(tag uint32) (err error) {
+	wireType := tag & 0x7
+	switch protowire.Type(wireType) {
+	case protowire.VarintType:
+		err = b.SkipVarint()
+	case protowire.Fixed64Type:
+		err = b.SkipFixed64()
+	case protowire.BytesType:
+		var n uint32
+		n, err = b.DecodeVarint32()
+		if err == nil {
+			err = b.Skip(int(n))
+		}
+	case protowire.StartGroupType:
+		err = b.SkipGroup(tag)
+	case protowire.Fixed32Type:
+		err = b.SkipFixed32()
+	default:
+		err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+	}
+	return
+}
+
+// skipGroup skips a group with the specified tag.  It executes efficiently using a tag stack
+func (b *BufferReader) SkipGroup(tag uint32) (err error) {
+	tagStack := make([]uint32, 0, 16)
+	tagStack = append(tagStack, tag)
+	var n uint32
+	for len(tagStack) > 0 {
+		tag, err = b.DecodeVarint32()
+		if err != nil {
+			return err
+		}
+		switch protowire.Type(tag & 0x7) {
+		case protowire.VarintType:
+			err = b.SkipVarint()
+		case protowire.Fixed64Type:
+			err = b.Skip(8)
+		case protowire.BytesType:
+			n, err = b.DecodeVarint32()
+			if err == nil {
+				err = b.Skip(int(n))
+			}
+		case protowire.StartGroupType:
+			tagStack = append(tagStack, tag)
+		case protowire.Fixed32Type:
+			err = b.SkipFixed32()
+		case protowire.EndGroupType:
+			if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
+				tagStack = tagStack[:len(tagStack)-1]
+			} else {
+				err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
+					protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// skipVarint effiently skips a varint
+func (b *BufferReader) SkipVarint() (err error) {
+	i := b.Pos
+
+	if len(b.Buf)-i < 10 {
+		// Use DecodeVarintSlow() to check for buffer overflow, but ignore result
+		if _, err := b.DecodeVarintSlow(); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	return errOverflow
+
+out:
+	b.Pos = i + 1
+	return nil
+}
+
+// skip skips the specified number of bytes
+func (b *BufferReader) Skip(n int) (err error) {
+	if len(b.Buf) < b.Pos+n {
+		return io.ErrUnexpectedEOF
+	}
+	b.Pos += n
+	return
+}
+
+// skipFixed64 skips a fixed64
+func (b *BufferReader) SkipFixed64() (err error) {
+	return b.Skip(8)
+}
+
+// skipFixed32 skips a fixed32
+func (b *BufferReader) SkipFixed32() (err error) {
+	return b.Skip(4)
+}
+
+// skipBytes skips a set of bytes
+func (b *BufferReader) SkipBytes() (err error) {
+	n, err := b.DecodeVarint32()
+	if err != nil {
+		return err
+	}
+	return b.Skip(int(n))
+}
+
+// Done returns whether we are at the end of the protobuf
+func (b *BufferReader) Done() bool {
+	return b.Pos == len(b.Buf)
+}
+
+// Remaining returns how many bytes remain
+func (b *BufferReader) Remaining() int {
+	return len(b.Buf) - b.Pos
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
new file mode 100644
index 000000000..ff4d4834b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
@@ -0,0 +1,359 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protolazy contains internal data structures for lazy message decoding.
+package protolazy
+
+import (
+	"fmt"
+	"sort"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// IndexEntry is the structure for an index of the fields in a message of a
+// proto (not descending to sub-messages)
+type IndexEntry struct {
+	FieldNum uint32
+	// first byte of this tag/field
+	Start uint32
+	// first byte after a contiguous sequence of bytes for this tag/field, which could
+	// include a single encoding of the field, or multiple encodings for the field
+	End uint32
+	// True if this protobuf segment includes multiple encodings of the field
+	MultipleContiguous bool
+}
+
+// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+type XXX_lazyUnmarshalInfo struct {
+	// Index of fields and their positions in the protobuf for this
+	// message.  Make index be a pointer to a slice so it can be updated
+	// atomically.  The index pointer is only set once (lazily when/if
+	// the index is first needed), and must always be SET and LOADED
+	// ATOMICALLY.
+	index *[]IndexEntry
+	// The protobuf associated with this lazily decoded message.  It is
+	// only set during proto.Unmarshal().  It doesn't need to be set and
+	// loaded atomically, since any simultaneous set (Unmarshal) and read
+	// (during a get) would already be a race in the app code.
+	Protobuf []byte
+	// The flags present when Unmarshal was originally called for this particular message
+	unmarshalFlags piface.UnmarshalInputFlags
+}
+
+// The Buffer and SetBuffer methods let v2/internal/impl interact with
+// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
+
+// Buffer returns the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
+	return lazy.Protobuf
+}
+
+// SetBuffer sets the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
+	lazy.Protobuf = b
+}
+
+// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
+// The flags should reflect how Unmarshal was called.
+func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
+	lazy.unmarshalFlags = f
+}
+
+// UnmarshalFlags returns the original unmarshalInputFlags.
+func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
+	return lazy.unmarshalFlags
+}
+
+// AllowedPartial returns true if the user originally unmarshalled this message with
+// AllowPartial set to true
+func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
+	return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
+}
+
+func protoFieldNumber(tag uint32) uint32 {
+	return tag >> 3
+}
+
+// buildIndex builds an index of the specified protobuf, return the index
+// array and an error.
+func buildIndex(buf []byte) ([]IndexEntry, error) {
+	index := make([]IndexEntry, 0, 16)
+	var lastProtoFieldNum uint32
+	var outOfOrder bool
+
+	var r BufferReader = NewBufferReader(buf)
+
+	for !r.Done() {
+		var tag uint32
+		var err error
+		var curPos = r.Pos
+		// INLINED: tag, err = r.DecodeVarint32()
+		{
+			i := r.Pos
+			buf := r.Buf
+
+			if i >= len(buf) {
+				return nil, errOutOfBounds
+			} else if buf[i] < 0x80 {
+				r.Pos++
+				tag = uint32(buf[i])
+			} else if r.Remaining() < 5 {
+				var v uint64
+				v, err = r.DecodeVarintSlow()
+				tag = uint32(v)
+			} else {
+				var v uint32
+				// we already checked the first byte
+				tag = uint32(buf[i]) & 127
+				i++
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 7
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 14
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 21
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 28
+				if v < 128 {
+					goto done
+				}
+
+				return nil, errOutOfBounds
+
+			done:
+				r.Pos = i
+			}
+		}
+		// DONE: tag, err = r.DecodeVarint32()
+
+		fieldNum := protoFieldNumber(tag)
+		if fieldNum < lastProtoFieldNum {
+			outOfOrder = true
+		}
+
+		// Skip the current value -- will skip over an entire group as well.
+		// INLINED: err = r.SkipValue(tag)
+		wireType := tag & 0x7
+		switch protowire.Type(wireType) {
+		case protowire.VarintType:
+			// INLINED: err = r.SkipVarint()
+			i := r.Pos
+
+			if len(r.Buf)-i < 10 {
+				// Use DecodeVarintSlow() to skip while
+				// checking for buffer overflow, but ignore result
+				_, err = r.DecodeVarintSlow()
+				goto out2
+			}
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			return nil, errOverflow
+		out:
+			r.Pos = i + 1
+			// DONE: err = r.SkipVarint()
+		case protowire.Fixed64Type:
+			err = r.SkipFixed64()
+		case protowire.BytesType:
+			var n uint32
+			n, err = r.DecodeVarint32()
+			if err == nil {
+				err = r.Skip(int(n))
+			}
+		case protowire.StartGroupType:
+			err = r.SkipGroup(tag)
+		case protowire.Fixed32Type:
+			err = r.SkipFixed32()
+		default:
+			err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+		}
+		// DONE: err = r.SkipValue(tag)
+
+	out2:
+		if err != nil {
+			return nil, err
+		}
+		if fieldNum != lastProtoFieldNum {
+			index = append(index, IndexEntry{FieldNum: fieldNum,
+				Start: uint32(curPos),
+				End:   uint32(r.Pos)},
+			)
+		} else {
+			index[len(index)-1].End = uint32(r.Pos)
+			index[len(index)-1].MultipleContiguous = true
+		}
+		lastProtoFieldNum = fieldNum
+	}
+	if outOfOrder {
+		sort.Slice(index, func(i, j int) bool {
+			return index[i].FieldNum < index[j].FieldNum ||
+				(index[i].FieldNum == index[j].FieldNum &&
+					index[i].Start < index[j].Start)
+		})
+	}
+	return index, nil
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			size += int(entry.End - entry.Start)
+		}
+		return size
+	}
+	if !found {
+		return 0
+	}
+	return int(end - start)
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
+		}
+		return b, true
+	}
+	if !found {
+		return nil, false
+	}
+	b = append(b, lazy.Protobuf[start:end]...)
+	return b, true
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
+	atomicStoreIndex(&lazy.index, &index)
+}
+
+// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
+// (including protobuf), returns startOffset/endOffset/found.
+func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
+	if lazy.Protobuf == nil {
+		// There is no backing protobuf for this message -- it was made from a builder
+		return 0, 0, false, false, nil
+	}
+	index := atomicLoadIndex(&lazy.index)
+	if index == nil {
+		r, err := buildIndex(lazy.Protobuf)
+		if err != nil {
+			panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
+		}
+		// lazy.index is a pointer to the slice returned by BuildIndex
+		index = &r
+		atomicStoreIndex(&lazy.index, index)
+	}
+	return lookupField(index, fieldNum)
+}
+
+// lookupField returns the offset at which the indicated field starts using
+// the index, offset immediately after field ends (including all instances of
+// a repeated field), and bools indicating if field was found and if there
+// are multiple encodings of the field in the byte range.
+//
+// To hande the uncommon case where there are repeated encodings for the same
+// field which are not consecutive in the protobuf (so we need to returns
+// multiple start/end offsets), we also return a slice multipleEntries.  If
+// multipleEntries is non-nil, then multiple entries were found, and the
+// values in the slice should be used, rather than start/end/found.
+func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
+	// The pointer indexp to the index was already loaded atomically.
+	// The slice is uniquely associated with the pointer, so it doesn't
+	// need to be loaded atomically.
+	index := *indexp
+	for i, entry := range index {
+		if fieldNum == entry.FieldNum {
+			if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
+				// Handle the uncommon case where there are
+				// repeated entries for the same field which
+				// are not contiguous in the protobuf.
+				multiple := make([]IndexEntry, 1, 2)
+				multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
+				i++
+				for i < len(index) && index[i].FieldNum == fieldNum {
+					multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
+					i++
+				}
+				return 0, 0, false, false, multiple
+
+			}
+			return entry.Start, entry.End, true, entry.MultipleContiguous, nil
+		}
+		if fieldNum < entry.FieldNum {
+			return 0, 0, false, false, nil
+		}
+	}
+	return 0, 0, false, false, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
new file mode 100644
index 000000000..dc2a64ca6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protolazy
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
+	return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f3338..000000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
-	return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
-	return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
-	return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
-	return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index a008acd09..832a7988f 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 60166f2ba..1ffddf687 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index dbbf1f686..386c823aa 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,7 +51,7 @@ import (
 //  10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 34
+	Minor      = 36
 	Patch      = 2
 	PreRelease = ""
 )
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index d75a6534c..a3b5e142d 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -47,6 +47,12 @@ type UnmarshalOptions struct {
 	// RecursionLimit limits how deeply messages may be nested.
 	// If zero, a default limit is applied.
 	RecursionLimit int
+
+	//
+	// NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
+	// default. Lazy decoding only affects submessages (annotated with [lazy =
+	// true] in the .proto file) within messages that use the Opaque API.
+	NoLazyDecoding bool
 }
 
 // Unmarshal parses the wire-format message in b and places the result in m.
@@ -104,6 +110,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
 		if o.DiscardUnknown {
 			in.Flags |= protoiface.UnmarshalDiscardUnknown
 		}
+
+		if !allowPartial {
+			// This does not affect how current unmarshal functions work, it just allows them
+			// to record this for lazy the decoding case.
+			in.Flags |= protoiface.UnmarshalCheckRequired
+		}
+		if o.NoLazyDecoding {
+			in.Flags |= protoiface.UnmarshalNoLazyDecoding
+		}
+
 		out, err = methods.Unmarshal(in)
 	} else {
 		o.RecursionLimit--
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 1f847bcc3..f0473c586 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -63,7 +63,8 @@ type MarshalOptions struct {
 	// options (except for UseCachedSize itself).
 	//
 	// 2. The message and all its submessages have not changed in any
-	// way since the Size call.
+	// way since the Size call. For lazily decoded messages, accessing
+	// a message results in decoding the message, which is a change.
 	//
 	// If either of these invariants is violated,
 	// the results are undefined and may include panics or corrupted output.
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b03..c36d4a9cd 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
 	"reflect"
 
 	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
 )
 
 // Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
 	if mx.IsValid() != my.IsValid() {
 		return false
 	}
+
+	// Only one of the messages needs to implement the fast-path for it to work.
+	pmx := protoMethods(mx)
+	pmy := protoMethods(my)
+	if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+		return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+	}
+
 	vx := protoreflect.ValueOfMessage(mx)
 	vy := protoreflect.ValueOfMessage(my)
 	return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index d248f2928..78445d116 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
 // If the field is unpopulated, it returns the default value for
 // scalars and an immutable, empty value for lists or messages.
 // It panics if xt does not extend m.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+//	╔═══════════════════╤═════════════════════════╗
+//	║ Go type           │ Protobuf kind           ║
+//	╠═══════════════════╪═════════════════════════╣
+//	║ bool              │ bool                    ║
+//	║ int32             │ int32, sint32, sfixed32 ║
+//	║ int64             │ int64, sint64, sfixed64 ║
+//	║ uint32            │ uint32, fixed32         ║
+//	║ uint64            │ uint64, fixed64         ║
+//	║ float32           │ float                   ║
+//	║ float64           │ double                  ║
+//	║ string            │ string                  ║
+//	║ []byte            │ bytes                   ║
+//	║ protoreflect.Enum │ enum                    ║
+//	║ proto.Message     │ message, group          ║
+//	╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+//	mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+//	if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+//	    ... // make use of mm
+//	}
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
 func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 	// Treat nil message interface as an empty message; return the default.
 	if m == nil {
@@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 // SetExtension stores the value of an extension field.
 // It panics if m is invalid, xt does not extend m, or if type of v
 // is invalid for the specified extension field.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+//	╔═══════════════════╤═════════════════════════╗
+//	║ Go type           │ Protobuf kind           ║
+//	╠═══════════════════╪═════════════════════════╣
+//	║ bool              │ bool                    ║
+//	║ int32             │ int32, sint32, sfixed32 ║
+//	║ int64             │ int64, sint64, sfixed64 ║
+//	║ uint32            │ uint32, fixed32         ║
+//	║ uint64            │ uint64, fixed64         ║
+//	║ float32           │ float                   ║
+//	║ float64           │ double                  ║
+//	║ string            │ string                  ║
+//	║ []byte            │ bytes                   ║
+//	║ protoreflect.Enum │ enum                    ║
+//	║ proto.Message     │ message, group          ║
+//	╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
 func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
 	xd := xt.TypeDescriptor()
 	pv := xt.ValueOf(v)
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index 052fb5ae3..c8675806c 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -12,11 +12,19 @@ import (
 )
 
 // Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
 func Size(m Message) int {
 	return MarshalOptions{}.Size(m)
 }
 
 // Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
 func (o MarshalOptions) Size(m Message) int {
 	// Treat a nil message interface as an empty message; nothing to output.
 	if m == nil {
diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
new file mode 100644
index 000000000..267fd0f1f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// ValueOrNil returns nil if has is false, or a pointer to a new variable
+// containing the value returned by the specified getter.
+//
+// This function is similar to the wrappers (proto.Int32(), proto.String(),
+// etc.), but is generic (works for any field type) and works with the hasser
+// and getter of a field, as opposed to a value.
+//
+// This is convenient when populating builder fields.
+//
+// Example:
+//
+//	hop := attr.GetDirectHop()
+//	injectedRoute := ripb.InjectedRoute_builder{
+//	  Prefixes: route.GetPrefixes(),
+//	  NextHop:  proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
+//	}
+func ValueOrNil[T any](has bool, getter func() T) *T {
+	if !has {
+		return nil
+	}
+	v := getter()
+	return &v
+}
+
+// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
+// it returns a pointer to an empty val message.
+//
+// This function allows for translating code from the old Open Struct API to the
+// new Opaque API.
+//
+// The old Open Struct API represented oneof fields with a wrapper struct:
+//
+//	var signedImg *accountpb.SignedImage
+//	profile := &accountpb.Profile{
+//		// The Avatar oneof will be set, with an empty SignedImage.
+//		Avatar: &accountpb.Profile_SignedImage{signedImg},
+//	}
+//
+// The new Opaque API treats oneof fields like regular fields, there are no more
+// wrapper structs:
+//
+//	var signedImg *accountpb.SignedImage
+//	profile := &accountpb.Profile{}
+//	profile.SetSignedImage(signedImg)
+//
+// For convenience, the Opaque API also offers Builders, which allow for a
+// direct translation of struct initialization. However, because Builders use
+// nilness to represent field presence (but there is no non-nil wrapper struct
+// anymore), Builders cannot distinguish between an unset oneof and a set oneof
+// with nil message. The above code would need to be translated with help of the
+// ValueOrDefault function to retain the same behavior:
+//
+//	var signedImg *accountpb.SignedImage
+//	return &accountpb.Profile_builder{
+//		SignedImage: proto.ValueOrDefault(signedImg),
+//	}.Build()
+func ValueOrDefault[T interface {
+	*P
+	Message
+}, P any](val T) T {
+	if val == nil {
+		return T(new(P))
+	}
+	return val
+}
+
+// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
+// type []byte.
+func ValueOrDefaultBytes(val []byte) []byte {
+	if val == nil {
+		return []byte{}
+	}
+	return val
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6eb..742cb518c 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
 		Unmarshal        func(unmarshalInput) (unmarshalOutput, error)
 		Merge            func(mergeInput) mergeOutput
 		CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+		Equal            func(equalInput) equalOutput
 	}
 	supportFlags = uint64
 	sizeInput    = struct {
@@ -75,4 +76,13 @@ type (
 	checkInitializedOutput = struct {
 		pragma.NoUnkeyedLiterals
 	}
+	equalInput = struct {
+		pragma.NoUnkeyedLiterals
+		MessageA Message
+		MessageB Message
+	}
+	equalOutput = struct {
+		pragma.NoUnkeyedLiterals
+		Equal bool
+	}
 )
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a7b0d06ff..a4b78acef 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -152,7 +152,7 @@ type Message interface {
 	// This method may return nil.
 	//
 	// The returned methods type is identical to
-	// google.golang.org/protobuf/runtime/protoiface.Methods.
+	// [google.golang.org/protobuf/runtime/protoiface.Methods].
 	// Consult the protoiface package documentation for details.
 	ProtoMethods() *methods
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 75f83a2af..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
-	nilType valueType = iota
-	boolType
-	int32Type
-	int64Type
-	uint32Type
-	uint64Type
-	float32Type
-	float64Type
-	stringType
-	bytesType
-	enumType
-	ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	typ   valueType // 8B
-	num   uint64    // 8B
-	str   string    // 16B
-	bin   []byte    // 24B
-	iface any       // 16B
-}
-
-func valueOfString(v string) Value {
-	return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
-	return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v any) Value {
-	return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
-	return v.str
-}
-func (v Value) getBytes() []byte {
-	return v.bin
-}
-func (v Value) getIface() any {
-	return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 7f3583ead..0015fcb35 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
 
 package protoreflect
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index f7d386990..479527b58 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
 
 package protoreflect
 
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d8..28e9e9f03 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
 
 	// CheckInitialized returns an error if any required fields in the message are not set.
 	CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+	// Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+	Equal func(EqualInput) EqualOutput
 }
 
 // SupportFlags indicate support for optional features.
@@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8
 
 const (
 	UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+
+	// UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
+	// The unmarshaller must not modify the contents of the buffer.
+	UnmarshalAliasBuffer
+
+	// UnmarshalValidated indicates that validation has already been
+	// performed on the input buffer.
+	UnmarshalValidated
+
+	// UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
+	// initialized.
+	UnmarshalCheckRequired
+
+	// UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
+	// lazy decoding, even when otherwise available.
+	UnmarshalNoLazyDecoding
 )
 
 // UnmarshalOutputFlags are output from the Unmarshal method.
@@ -166,3 +185,18 @@ type CheckInitializedInput = struct {
 type CheckInitializedOutput = struct {
 	pragma.NoUnkeyedLiterals
 }
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+	pragma.NoUnkeyedLiterals
+
+	MessageA protoreflect.Message
+	MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+	pragma.NoUnkeyedLiterals
+
+	Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
index 4a1ab7fb3..93df1b569 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -15,6 +15,7 @@ import (
 	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/filetype"
 	"google.golang.org/protobuf/internal/impl"
+	"google.golang.org/protobuf/internal/protolazy"
 )
 
 // UnsafeEnabled specifies whether package unsafe can be used.
@@ -39,6 +40,9 @@ type (
 	ExtensionFieldV1 = impl.ExtensionField
 
 	Pointer = impl.Pointer
+
+	LazyUnmarshalInfo  = *protolazy.XXX_lazyUnmarshalInfo
+	RaceDetectHookData = impl.RaceDetectHookData
 )
 
 var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 7172b43d3..191552cce 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -210,10 +210,7 @@ import (
 //	  "value": "1.212s"
 //	}
 type Any struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// A URL/resource name that uniquely identifies the type of the serialized
 	// protocol buffer message. This string must contain at least
 	// one "/" character. The last segment of the URL's path must represent
@@ -244,7 +241,9 @@ type Any struct {
 	// used with implementation specific semantics.
 	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
 	// Must be a valid serialized protocol buffer of the above specified type.
-	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	Value         []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New marshals src into a new Any instance.
@@ -368,11 +367,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
 
 func (x *Any) Reset() {
 	*x = Any{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_any_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_any_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Any) String() string {
@@ -383,7 +380,7 @@ func (*Any) ProtoMessage() {}
 
 func (x *Any) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_any_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -461,20 +458,6 @@ func file_google_protobuf_any_proto_init() {
 	if File_google_protobuf_any_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
-			switch v := v.(*Any); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 84603a614..d767386fe 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# dario.cat/mergo v1.0.0
+# dario.cat/mergo v1.0.1
 ## explicit; go 1.13
 dario.cat/mergo
 # github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
@@ -56,15 +56,15 @@ github.com/containerd/errdefs
 github.com/containerd/errdefs/pkg/errgrpc
 github.com/containerd/errdefs/pkg/internal/cause
 github.com/containerd/errdefs/pkg/internal/types
-# github.com/containerd/stargz-snapshotter/estargz v0.15.1
-## explicit; go 1.19
+# github.com/containerd/stargz-snapshotter/estargz v0.16.3
+## explicit; go 1.22.0
 github.com/containerd/stargz-snapshotter/estargz
 github.com/containerd/stargz-snapshotter/estargz/errorutil
-# github.com/containerd/typeurl/v2 v2.2.0
+# github.com/containerd/typeurl/v2 v2.2.3
 ## explicit; go 1.21
 github.com/containerd/typeurl/v2
-# github.com/containers/image/v5 v5.31.1
-## explicit; go 1.21
+# github.com/containers/image/v5 v5.34.3
+## explicit; go 1.22.8
 github.com/containers/image/v5/directory
 github.com/containers/image/v5/directory/explicitfilepath
 github.com/containers/image/v5/docker
@@ -87,6 +87,7 @@ github.com/containers/image/v5/internal/multierr
 github.com/containers/image/v5/internal/pkg/platform
 github.com/containers/image/v5/internal/private
 github.com/containers/image/v5/internal/putblobdigest
+github.com/containers/image/v5/internal/reflink
 github.com/containers/image/v5/internal/rootless
 github.com/containers/image/v5/internal/set
 github.com/containers/image/v5/internal/signature
@@ -118,11 +119,11 @@ github.com/containers/image/v5/version
 # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
 ## explicit
 github.com/containers/libtrust
-# github.com/containers/ocicrypt v1.2.0
-## explicit; go 1.21
+# github.com/containers/ocicrypt v1.2.1
+## explicit; go 1.22
 github.com/containers/ocicrypt/spec
-# github.com/containers/storage v1.54.0
-## explicit; go 1.21
+# github.com/containers/storage v1.57.2
+## explicit; go 1.22.0
 github.com/containers/storage
 github.com/containers/storage/drivers
 github.com/containers/storage/drivers/aufs
@@ -135,12 +136,14 @@ github.com/containers/storage/drivers/register
 github.com/containers/storage/drivers/vfs
 github.com/containers/storage/drivers/windows
 github.com/containers/storage/drivers/zfs
+github.com/containers/storage/internal/dedup
 github.com/containers/storage/pkg/archive
 github.com/containers/storage/pkg/chrootarchive
 github.com/containers/storage/pkg/chunked
 github.com/containers/storage/pkg/chunked/compressor
 github.com/containers/storage/pkg/chunked/dump
-github.com/containers/storage/pkg/chunked/internal
+github.com/containers/storage/pkg/chunked/internal/minimal
+github.com/containers/storage/pkg/chunked/internal/path
 github.com/containers/storage/pkg/chunked/toc
 github.com/containers/storage/pkg/config
 github.com/containers/storage/pkg/directory
@@ -168,8 +171,8 @@ github.com/containers/storage/pkg/tarlog
 github.com/containers/storage/pkg/truncindex
 github.com/containers/storage/pkg/unshare
 github.com/containers/storage/types
-# github.com/cyphar/filepath-securejoin v0.3.1
-## explicit; go 1.20
+# github.com/cyphar/filepath-securejoin v0.3.6
+## explicit; go 1.18
 github.com/cyphar/filepath-securejoin
 # github.com/distribution/reference v0.6.0
 ## explicit; go 1.20
@@ -178,8 +181,7 @@ github.com/distribution/reference
 ## explicit
 github.com/docker/distribution/registry/api/errcode
 github.com/docker/distribution/registry/api/v2
-github.com/docker/distribution/registry/client/auth/challenge
-# github.com/docker/docker v27.3.1+incompatible
+# github.com/docker/docker v27.5.1+incompatible
 ## explicit
 github.com/docker/docker/api
 github.com/docker/docker/api/types
@@ -244,7 +246,7 @@ github.com/gogo/protobuf/proto
 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
 ## explicit
 github.com/golang/groupcache/lru
-# github.com/google/go-containerregistry v0.20.1
+# github.com/google/go-containerregistry v0.20.2
 ## explicit; go 1.18
 github.com/google/go-containerregistry/pkg/v1
 github.com/google/go-containerregistry/pkg/v1/types
@@ -269,7 +271,7 @@ github.com/inconshreveable/mousetrap
 # github.com/json-iterator/go v1.1.12
 ## explicit; go 1.12
 github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.10
+# github.com/klauspost/compress v1.17.11
 ## explicit; go 1.21
 github.com/klauspost/compress
 github.com/klauspost/compress/flate
@@ -296,6 +298,9 @@ github.com/moby/buildkit/util/stack
 # github.com/moby/docker-image-spec v1.3.1
 ## explicit; go 1.18
 github.com/moby/docker-image-spec/specs-go/v1
+# github.com/moby/sys/capability v0.4.0
+## explicit; go 1.21
+github.com/moby/sys/capability
 # github.com/moby/sys/mountinfo v0.7.2
 ## explicit; go 1.17
 github.com/moby/sys/mountinfo
@@ -325,11 +330,10 @@ github.com/opencontainers/image-spec/specs-go/v1
 # github.com/opencontainers/runtime-spec v1.2.0
 ## explicit
 github.com/opencontainers/runtime-spec/specs-go
-# github.com/opencontainers/selinux v1.11.0
+# github.com/opencontainers/selinux v1.11.1
 ## explicit; go 1.19
 github.com/opencontainers/selinux/go-selinux
 github.com/opencontainers/selinux/go-selinux/label
-github.com/opencontainers/selinux/pkg/pwalk
 github.com/opencontainers/selinux/pkg/pwalkdir
 # github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f
 ## explicit
@@ -338,8 +342,6 @@ github.com/ostreedev/ostree-go/pkg/otbuiltin
 # github.com/pkg/errors v0.9.1
 ## explicit
 github.com/pkg/errors
-# github.com/rogpeppe/go-internal v1.11.0
-## explicit; go 1.19
 # github.com/sirupsen/logrus v1.9.3
 ## explicit; go 1.13
 github.com/sirupsen/logrus
@@ -349,13 +351,10 @@ github.com/spf13/cobra
 # github.com/spf13/pflag v1.0.5
 ## explicit; go 1.12
 github.com/spf13/pflag
-# github.com/sylabs/sif/v2 v2.18.0
-## explicit; go 1.21.0
+# github.com/sylabs/sif/v2 v2.20.2
+## explicit; go 1.22.0
 github.com/sylabs/sif/v2/pkg/sif
-# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
-## explicit
-github.com/syndtr/gocapability/capability
-# github.com/tchap/go-patricia/v2 v2.3.1
+# github.com/tchap/go-patricia/v2 v2.3.2
 ## explicit; go 1.16
 github.com/tchap/go-patricia/v2/patricia
 # github.com/ulikunitz/xz v0.5.12
@@ -364,7 +363,7 @@ github.com/ulikunitz/xz
 github.com/ulikunitz/xz/internal/hash
 github.com/ulikunitz/xz/internal/xlog
 github.com/ulikunitz/xz/lzma
-# github.com/vbatts/tar-split v0.11.6
+# github.com/vbatts/tar-split v0.11.7
 ## explicit; go 1.17
 github.com/vbatts/tar-split/archive/tar
 github.com/vbatts/tar-split/tar/asm
@@ -376,13 +375,14 @@ go.opencensus.io/internal
 go.opencensus.io/trace
 go.opencensus.io/trace/internal
 go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0
 ## explicit; go 1.21
 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.28.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel v1.31.0
+## explicit; go 1.22
 go.opentelemetry.io/otel
 go.opentelemetry.io/otel/attribute
 go.opentelemetry.io/otel/baggage
@@ -393,20 +393,21 @@ go.opentelemetry.io/otel/internal/baggage
 go.opentelemetry.io/otel/internal/global
 go.opentelemetry.io/otel/propagation
 go.opentelemetry.io/otel/semconv/v1.20.0
-go.opentelemetry.io/otel/semconv/v1.24.0
-# go.opentelemetry.io/otel/metric v1.28.0
-## explicit; go 1.21
+go.opentelemetry.io/otel/semconv/v1.26.0
+# go.opentelemetry.io/otel/metric v1.31.0
+## explicit; go 1.22
 go.opentelemetry.io/otel/metric
 go.opentelemetry.io/otel/metric/embedded
-# go.opentelemetry.io/otel/trace v1.28.0
-## explicit; go 1.21
+go.opentelemetry.io/otel/metric/noop
+# go.opentelemetry.io/otel/trace v1.31.0
+## explicit; go 1.22
 go.opentelemetry.io/otel/trace
 go.opentelemetry.io/otel/trace/embedded
-# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
-## explicit; go 1.20
+# golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67
+## explicit; go 1.22.0
 golang.org/x/exp/maps
-# golang.org/x/mod v0.18.0
-## explicit; go 1.18
+# golang.org/x/mod v0.22.0
+## explicit; go 1.22.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
@@ -414,7 +415,7 @@ golang.org/x/mod/semver
 # golang.org/x/net v0.35.0
 ## explicit; go 1.18
 golang.org/x/net/context
-# golang.org/x/sync v0.8.0
+# golang.org/x/sync v0.10.0
 ## explicit; go 1.18
 golang.org/x/sync/errgroup
 # golang.org/x/sys v0.30.0
@@ -422,11 +423,11 @@ golang.org/x/sync/errgroup
 golang.org/x/sys/unix
 golang.org/x/sys/windows
 golang.org/x/sys/windows/registry
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f
-## explicit; go 1.21
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d
+## explicit; go 1.22
 google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.67.0
-## explicit; go 1.21
+# google.golang.org/grpc v1.69.4
+## explicit; go 1.22
 google.golang.org/grpc/codes
 google.golang.org/grpc/connectivity
 google.golang.org/grpc/grpclog
@@ -435,8 +436,8 @@ google.golang.org/grpc/internal
 google.golang.org/grpc/internal/status
 google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/status
-# google.golang.org/protobuf v1.34.2
-## explicit; go 1.20
+# google.golang.org/protobuf v1.36.2
+## explicit; go 1.21
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
 google.golang.org/protobuf/encoding/protowire
@@ -457,6 +458,7 @@ google.golang.org/protobuf/internal/genid
 google.golang.org/protobuf/internal/impl
 google.golang.org/protobuf/internal/order
 google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/protolazy
 google.golang.org/protobuf/internal/set
 google.golang.org/protobuf/internal/strs
 google.golang.org/protobuf/internal/version