From 32249f97d3cb6946b95f9a06f834bd2e8adc17b7 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 06:19:59 +0700 Subject: [PATCH 01/21] feat: initialize oss client Signed-off-by: Imre Nagi --- go.mod | 3 + go.sum | 29 + pbm/config/config.go | 2 + pbm/storage/oss/client.go | 157 + pbm/storage/oss/oss.go | 68 + pbm/storage/storage.go | 1 + pbm/util/storage.go | 3 + .../github.com/alibabacloud-go/debug/LICENSE | 201 ++ .../alibabacloud-go/debug/debug/debug.go | 58 + .../aliyun/alibabacloud-oss-go-sdk-v2/LICENSE | 201 ++ .../oss/api_op_accesspoint.go | 468 +++ .../api_op_accesspoint_publicaccessblock.go | 163 ++ .../oss/api_op_bucket.go | 1310 +++++++++ .../oss/api_op_bucket_accessmonitor.go | 106 + .../oss/api_op_bucket_archivedirectread.go | 104 + .../oss/api_op_bucket_cname.go | 351 +++ .../oss/api_op_bucket_cors.go | 250 ++ .../oss/api_op_bucket_encryption.go | 170 ++ .../oss/api_op_bucket_httpsconfig.go | 114 + .../oss/api_op_bucket_inventory.go | 318 ++ .../oss/api_op_bucket_lifecycle.go | 280 ++ .../oss/api_op_bucket_logging.go | 320 ++ .../oss/api_op_bucket_metaquery.go | 534 ++++ .../oss/api_op_bucket_objectfcaccesspoint.go | 710 +++++ .../oss/api_op_bucket_policy.go | 209 ++ .../oss/api_op_bucket_publicaccessblock.go | 154 + .../oss/api_op_bucket_redundancytransition.go | 310 ++ .../oss/api_op_bucket_referer.go | 133 + .../oss/api_op_bucket_replication.go | 469 +++ .../oss/api_op_bucket_resourcegroup.go | 104 + .../oss/api_op_bucket_style.go | 244 ++ .../oss/api_op_bucket_tags.go | 146 + .../oss/api_op_bucket_transferacceleration.go | 113 + .../oss/api_op_bucket_website.go | 280 ++ .../oss/api_op_bucket_worm.go | 273 ++ .../oss/api_op_cloud_box.go | 89 + .../oss/api_op_common.go | 12 + .../oss/api_op_object.go | 2572 +++++++++++++++++ .../oss/api_op_publicaccessblock.go | 147 + .../oss/api_op_region.go | 72 + .../oss/api_op_select_object.go | 740 +++++ .../oss/api_op_service.go | 106 + .../oss/checkpoint.go | 369 +++ .../alibabacloud-oss-go-sdk-v2/oss/client.go | 1499 ++++++++++ .../oss/client_extension.go | 164 ++ .../oss/client_paginators.go | 407 +++ .../oss/client_presign.go | 164 ++ .../alibabacloud-oss-go-sdk-v2/oss/config.go | 286 ++ .../alibabacloud-oss-go-sdk-v2/oss/copier.go | 587 ++++ .../oss/credentials/credentials.go | 47 + .../ecs_role_credentials_provider.go | 168 ++ .../environment_credentials_provider.go | 27 + .../fetcher_credentials_provider.go | 183 ++ .../process_credentials_provider.go | 168 ++ .../static_credentials_provider.go | 26 + .../oss/crypto/aes_ctr.go | 65 + .../oss/crypto/aes_ctr_cipher.go | 208 ++ .../oss/crypto/cipher.go | 69 + .../oss/crypto/crypto_const.go | 8 + .../oss/crypto/crypto_type.go | 125 + .../oss/crypto/master_rsa_cipher.go | 102 + .../oss/defaults.go | 79 + .../oss/downloader.go | 598 ++++ .../oss/encryption_client.go | 503 ++++ .../oss/endpoints.go | 62 + .../alibabacloud-oss-go-sdk-v2/oss/enums.go | 344 +++ .../alibabacloud-oss-go-sdk-v2/oss/errors.go | 170 ++ .../oss/filelike.go | 795 +++++ .../oss/from_ptr.go | 63 + .../oss/io_utils.go | 869 ++++++ .../alibabacloud-oss-go-sdk-v2/oss/limiter.go | 44 + .../alibabacloud-oss-go-sdk-v2/oss/logger.go | 130 + .../oss/progress.go | 41 + .../oss/retry/backoff.go | 79 + .../oss/retry/retryable_error.go | 103 + .../oss/retry/retryer.go | 22 + .../oss/retry/standard.go | 71 + .../oss/retry/types.go | 19 + .../oss/signer/signer.go | 51 + .../oss/signer/v1.go | 264 ++ .../oss/signer/v4.go | 390 +++ .../alibabacloud-oss-go-sdk-v2/oss/to_ptr.go | 15 + .../oss/transport/dialer.go | 88 + .../oss/transport/http.go | 177 ++ .../alibabacloud-oss-go-sdk-v2/oss/types.go | 162 ++ .../oss/uploader.go | 768 +++++ .../alibabacloud-oss-go-sdk-v2/oss/utils.go | 405 +++ .../oss/utils_copy.go | 95 + .../oss/utils_crc.go | 140 + .../oss/utils_mime.go | 595 ++++ .../oss/utils_pool.go | 248 ++ .../oss/validation.go | 84 + .../alibabacloud-oss-go-sdk-v2/oss/version.go | 34 + .../oss/xml_utils.go | 246 ++ .../github.com/aliyun/credentials-go/LICENSE | 201 ++ .../credentials/internal/http/http.go | 145 + .../credentials/internal/utils/path.go | 18 + .../credentials/internal/utils/runtime.go | 36 + .../credentials/internal/utils/utils.go | 204 ++ .../credentials/providers/cli_profile.go | 266 ++ .../credentials/providers/cloud_sso.go | 216 ++ .../credentials/providers/credentials.go | 22 + .../credentials/providers/default.go | 113 + .../credentials/providers/ecs_ram_role.go | 283 ++ .../credentials/providers/env.go | 55 + .../credentials/providers/hook.go | 7 + .../credentials/providers/oidc.go | 278 ++ .../credentials/providers/profile.go | 169 ++ .../credentials/providers/ram_role_arn.go | 375 +++ .../credentials/providers/static_ak.go | 67 + .../credentials/providers/static_sts.go | 83 + .../credentials/providers/uri.go | 152 + vendor/modules.txt | 16 + 113 files changed, 27226 insertions(+) create mode 100644 pbm/storage/oss/client.go create mode 100644 pbm/storage/oss/oss.go create mode 100644 vendor/github.com/alibabacloud-go/debug/LICENSE create mode 100644 vendor/github.com/alibabacloud-go/debug/debug/debug.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go create mode 100644 vendor/github.com/aliyun/credentials-go/LICENSE create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/default.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/env.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go diff --git a/go.mod b/go.mod index 6dfc9fef7..0fb6cc1fb 100644 --- a/go.mod +++ b/go.mod @@ -54,6 +54,9 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/alibabacloud-go/debug v1.0.1 // indirect + github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3 // indirect + github.com/aliyun/credentials-go v1.4.7 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect diff --git a/go.sum b/go.sum index c6603f569..79a1e9361 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,14 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/debug v1.0.1 h1:MsW9SmUtbb1Fnt3ieC6NNZi6aEwrXfDksD4QA6GSbPg= +github.com/alibabacloud-go/debug v1.0.1/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/tea v1.2.2/go.mod h1:CF3vOzEMAG+bR4WOql8gc2G9H3EkH3ZLAQdpmpXMgwk= +github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3 h1:LyeTJauAchnWdre3sAyterGrzaAtZ4dSNoIvDvaWfo4= +github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M= +github.com/aliyun/credentials-go v1.4.7 h1:T17dLqEtPUFvjDRRb5giVvLh6dFT8IcNFJJb7MeyCxw= +github.com/aliyun/credentials-go v1.4.7/go.mod h1:Jm6d+xIgwJVLVWT561vy67ZRP4lPTQxMbEYRuT2Ti1U= github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= @@ -191,6 +199,8 @@ github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -228,6 +238,7 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8 github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -288,6 +299,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -358,6 +370,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck= @@ -365,6 +378,7 @@ golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -373,6 +387,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= @@ -381,6 +398,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -399,16 +417,23 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= @@ -418,6 +443,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -435,10 +461,13 @@ google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7E google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pbm/config/config.go b/pbm/config/config.go index f03f2213a..b4027d72a 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -26,6 +26,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/oss" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -227,6 +228,7 @@ type StorageConf struct { GCS *gcs.Config `bson:"gcs,omitempty" json:"gcs,omitempty" yaml:"gcs,omitempty"` Azure *azure.Config `bson:"azure,omitempty" json:"azure,omitempty" yaml:"azure,omitempty"` Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` + OSS *oss.Config `bson:"oss,omitempty" json:"oss,omitempty" yaml:"oss,omitempty"` } func (s *StorageConf) Clone() *StorageConf { diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go new file mode 100644 index 000000000..2da86d864 --- /dev/null +++ b/pbm/storage/oss/client.go @@ -0,0 +1,157 @@ +package oss + +import ( + "context" + "fmt" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" + osscred "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" + "github.com/aliyun/credentials-go/credentials/providers" +) + +const ( + defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb + defaultS3Region = "ap-southeast-5" + + defaultRetryBaseDelay = 30 * time.Millisecond + defaultRetryerMaxBackoff = 300 * time.Second +) + +//nolint:lll +type Config struct { + Region string `bson:"region" json:"region" yaml:"region"` + EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` + + Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` + Prefix string `bson:"prefix,omitempty" json:"prefix,omitempty" yaml:"prefix,omitempty"` + Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + + Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` + + ConnectTimeout int `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` + UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` + MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` +} + +type Retryer struct { + MaxAttempts int `bson:"maxAttempts" json:"maxAttempts" yaml:"maxAttempts"` + MaxBackoff time.Duration `bson:"maxBackoff" json:"maxBackoff" yaml:"maxBackoff"` + BaseDelay time.Duration `bson:"baseDelay" json:"baseDelay" yaml:"baseDelay"` +} + +type Credentials struct { + AccessKeyID string `bson:"accessKeyId" json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty"` + AccessKeySecret string `bson:"accessKeySecret" json:"accessKeySecret,omitempty" yaml:"accessKeySecret,omitempty"` + SecurityToken string `bson:"securityToken" json:"securityToken,omitempty" yaml:"securityToken,omitempty"` + RoleARN string `bson:"roleArn,omitempty" json:"roleArn,omitempty" yaml:"roleArn,omitempty"` + SessionName string `bson:"sessionName,omitempty" json:"sessionName,omitempty" yaml:"sessionName,omitempty"` +} + +func (cfg *Config) Cast() error { + if cfg.Region == "" { + cfg.Region = defaultS3Region + } + if cfg.Retryer != nil { + if cfg.Retryer.BaseDelay == 0 { + cfg.Retryer.BaseDelay = defaultRetryBaseDelay + } + if cfg.Retryer.MaxBackoff == 0 { + cfg.Retryer.MaxBackoff = defaultRetryerMaxBackoff + } + } + return nil +} + +const ( + defaultSessionExpiration = 3600 +) + +func newCred(config *Config) (*cred, error) { + var credentialsProvider providers.CredentialsProvider + var err error + + if config.Credentials.AccessKeyID == "" || config.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("access key ID and secret are required") + } + + if config.Credentials.SecurityToken != "" { + credentialsProvider, err = providers.NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(config.Credentials.AccessKeyID). + WithAccessKeySecret(config.Credentials.AccessKeySecret). + WithSecurityToken(config.Credentials.SecurityToken). + Build() + } else { + credentialsProvider, err = providers.NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(config.Credentials.AccessKeyID). + WithAccessKeySecret(config.Credentials.AccessKeySecret). + Build() + } + if err != nil { + return nil, fmt.Errorf("credentials provider: %w", err) + } + + if config.Credentials.RoleARN != "" { + internalProvider := credentialsProvider + credentialsProvider, err = providers.NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(internalProvider). + WithRoleArn(config.Credentials.RoleARN). + WithRoleSessionName(config.Credentials.SessionName). + WithDurationSeconds(defaultSessionExpiration). + Build() + if err != nil { + return nil, fmt.Errorf("ram role credential provider: %w", err) + } + } + + return &cred{ + provider: credentialsProvider, + }, nil +} + +type cred struct { + provider providers.CredentialsProvider +} + +func (c *cred) GetCredentials(ctx context.Context) (osscred.Credentials, error) { + cc, err := c.provider.GetCredentials() + if err != nil { + return osscred.Credentials{}, err + } + + return osscred.Credentials{ + AccessKeyID: cc.AccessKeyId, + AccessKeySecret: cc.AccessKeySecret, + SecurityToken: cc.SecurityToken, + }, nil +} + +func configureClient(config *Config) (*oss.Client, error) { + if config.Region == "" { + return nil, fmt.Errorf("oss region is required") + } + + cred, err := newCred(config) + if err != nil { + return nil, fmt.Errorf("create credentials: %w", err) + } + + ossConfig := oss.LoadDefaultConfig(). + WithRegion(config.Region). + WithCredentialsProvider(cred). + WithSignatureVersion(oss.SignatureVersionV4). + WithRetryMaxAttempts(config.Retryer.MaxAttempts). + WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { + ro.MaxAttempts = config.Retryer.MaxAttempts + ro.MaxBackoff = config.Retryer.MaxBackoff + ro.BaseDelay = config.Retryer.BaseDelay + })). + WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) + + if config.EndpointURL != "" { + ossConfig = ossConfig.WithEndpoint(config.EndpointURL) + } + + return oss.NewClient(ossConfig), nil +} diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go new file mode 100644 index 000000000..71a99f4a8 --- /dev/null +++ b/pbm/storage/oss/oss.go @@ -0,0 +1,68 @@ +package oss + +import ( + "fmt" + "io" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" + + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +var _ storage.Storage = &OSS{} + +func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { + if err := cfg.Cast(); err != nil { + return nil, fmt.Errorf("cast config: %w", err) + } + + client, err := configureClient(cfg) + if err != nil { + return nil, fmt.Errorf("configure client: %w", err) + } + + o := &OSS{ + cfg: cfg, + node: node, + log: l, + ossCli: client, + } + + return o, nil +} + +type OSS struct { + cfg *Config + node string + log log.LogEvent + ossCli *oss.Client +} + +func (o *OSS) Type() storage.Type { + return storage.OSS +} + +func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error { + return nil +} + +func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { + return nil, nil +} + +func (o *OSS) FileStat(name string) (storage.FileInfo, error) { + return storage.FileInfo{}, nil +} + +func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { + return nil, nil +} + +func (o *OSS) Delete(name string) error { + return nil +} + +func (o *OSS) Copy(src, dst string) error { + return nil +} diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 843772495..117f0c51f 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -28,6 +28,7 @@ const ( Filesystem Type = "filesystem" Blackhole Type = "blackhole" GCS Type = "gcs" + OSS Type = "oss" ) type FileInfo struct { diff --git a/pbm/util/storage.go b/pbm/util/storage.go index fd22dc8b1..987db71f3 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -14,6 +14,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/oss" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" "github.com/percona/percona-backup-mongodb/pbm/version" ) @@ -35,6 +36,8 @@ func StorageFromConfig(cfg *config.StorageConf, node string, l log.LogEvent) (st return blackhole.New(), nil case storage.GCS: return gcs.New(cfg.GCS, node, l) + case storage.OSS: + return oss.New(cfg.OSS, node, l) case storage.Undefined: return nil, ErrStorageUndefined default: diff --git a/vendor/github.com/alibabacloud-go/debug/LICENSE b/vendor/github.com/alibabacloud-go/debug/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/debug/debug/debug.go b/vendor/github.com/alibabacloud-go/debug/debug/debug.go new file mode 100644 index 000000000..c9c8b5422 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/debug/debug.go @@ -0,0 +1,58 @@ +// Package debug is a library to display debug info that control by enviroment variable DEBUG +// +// # Example +// +// package main +// // import the package +// import "github.com/alibabacloud-go/debug/debug" +// +// // init a debug method +// var d = debug.Init("sdk") +// +// func main() { +// // try `go run demo.go` +// // and `DEBUG=sdk go run demo.go` +// d("this debug information just print when DEBUG environment variable was set") +// } +// +// When you run application with `DEBUG=sdk go run main.go`, it will display logs. Otherwise +// it do nothing +package debug + +import ( + "fmt" + "os" + "strings" +) + +// Debug is a method that display logs, it is useful for developer to trace program running +// details when troubleshooting +type Debug func(format string, v ...interface{}) + +var hookGetEnv = func() string { + return os.Getenv("DEBUG") +} + +var hookPrint = func(input string) { + fmt.Println(input) +} + +// Init returns a debug method that based the enviroment variable DEBUG value +func Init(flag string) Debug { + enable := false + + env := hookGetEnv() + parts := strings.Split(env, ",") + for _, part := range parts { + if part == flag { + enable = true + break + } + } + + return func(format string, v ...interface{}) { + if enable { + hookPrint(fmt.Sprintf(format, v...)) + } + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go new file mode 100644 index 000000000..f18b17b3c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go @@ -0,0 +1,468 @@ +package oss + +import ( + "context" + "io" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type AccessPointVpcConfiguration struct { + // The ID of the VPC that is required only when the NetworkOrigin parameter is set to vpc. + VpcId *string `xml:"VpcId"` +} + +type CreateAccessPointConfiguration struct { + // The name of the access point. The name of the access point must meet the following naming rules:* The name must be unique in a region of your Alibaba Cloud account.* The name cannot end with -ossalias.* The name can contain only lowercase letters, digits, and hyphens (-). It cannot start or end with a hyphen (-).* The name must be 3 to 19 characters in length. + AccessPointName *string `xml:"AccessPointName"` + + // The network origin of the access point. + NetworkOrigin *string `xml:"NetworkOrigin"` + + // The container that stores the information about the VPC. + VpcConfiguration *AccessPointVpcConfiguration `xml:"VpcConfiguration"` +} + +type ListAccessPointsRequest struct { + // The maximum number of access points that can be returned. Valid values:* For user-level access points: (0,1000].* For bucket-level access points: (0,100]. + MaxKeys int64 `input:"query,max-keys"` + + // The token from which the listing operation starts. You must specify the value of NextContinuationToken that is obtained from the previous query as the value of continuation-token. + ContinuationToken *string `input:"query,continuation-token"` + + // The name of the bucket. + Bucket *string `input:"host,bucket"` + + RequestCommon +} + +type AccessPoint struct { + // The network origin of the access point. + NetworkOrigin *string `xml:"NetworkOrigin"` + + // The container that stores the information about the VPC. + VpcConfiguration *AccessPointVpcConfiguration `xml:"VpcConfiguration"` + + // The status of the access point. + Status *string `xml:"Status"` + + // The name of the bucket for which the access point is configured. + Bucket *string `xml:"Bucket"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` + + // The alias of the access point. + Alias *string `xml:"Alias"` +} + +type ListAccessPointsResult struct { + // The maximum number of results set for this enumeration operation. + MaxKeys *int32 `xml:"MaxKeys"` + + // Indicates whether the returned list is truncated. Valid values: * true: indicates that not all results are returned. * false: indicates that all results are returned. + IsTruncated *bool `xml:"IsTruncated"` + + // Indicates that this ListAccessPoints request does not return all results that can be listed. You can use NextContinuationToken to continue obtaining list results. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The ID of the Alibaba Cloud account to which the access point belongs. + AccountId *string `xml:"AccountId"` + + // The container that stores the information about all access point. + AccessPoints []AccessPoint `xml:"AccessPoints>AccessPoint"` + + ResultCommon +} + +// ListAccessPoints Queries the information about user-level or bucket-level access points. +func (c *Client) ListAccessPoints(ctx context.Context, request *ListAccessPointsRequest, optFns ...func(*Options)) (*ListAccessPointsResult, error) { + var err error + if request == nil { + request = &ListAccessPointsRequest{} + } + input := &OperationInput{ + OpName: "ListAccessPoints", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListAccessPointsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type GetAccessPointResult struct { + // The ARN of the access point. + AccessPointArn *string `xml:"AccessPointArn"` + + // The alias of the access point. + Alias *string `xml:"Alias"` + + // The public endpoint of the access point. + PublicEndpoint *string `xml:"Endpoints>PublicEndpoint"` + + // The internal endpoint of the access point. + InternalEndpoint *string `xml:"Endpoints>InternalEndpoint"` + + // The time when the access point was created. + CreationDate *string `xml:"CreationDate"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` + + // The name of the bucket for which the access point is configured. + Bucket *string `xml:"Bucket"` + + // The ID of the Alibaba Cloud account for which the access point is configured. + AccountId *string `xml:"AccountId"` + + // The network origin of the access point. Valid values: vpc and internet. vpc: You can only use the specified VPC ID to access the access point. internet: You can use public endpoints and internal endpoints to access the access point. + NetworkOrigin *string `xml:"NetworkOrigin"` + + // The container that stores the information about the VPC. + VpcConfiguration *AccessPointVpcConfiguration `xml:"VpcConfiguration"` + + // The status of the access point. + AccessPointStatus *string `xml:"Status"` + + // The container that stores the Block Public Access configurations. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + ResultCommon +} + +// GetAccessPoint Queries the information about an access point. +func (c *Client) GetAccessPoint(ctx context.Context, request *GetAccessPointRequest, optFns ...func(*Options)) (*GetAccessPointResult, error) { + var err error + if request == nil { + request = &GetAccessPointRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPoint", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type GetAccessPointPolicyResult struct { + // The configurations of the access point policy. + Body string + + ResultCommon +} + +// GetAccessPointPolicy Queries the configurations of an access point policy. +func (c *Client) GetAccessPointPolicy(ctx context.Context, request *GetAccessPointPolicyRequest, optFns ...func(*Options)) (*GetAccessPointPolicyResult, error) { + var err error + if request == nil { + request = &GetAccessPointPolicyRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointPolicy", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(output.Body) + defer output.Body.Close() + if err != nil { + return nil, err + } + result := &GetAccessPointPolicyResult{ + Body: string(body), + } + + if err = c.unmarshalOutput(result, output); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type DeleteAccessPointPolicyResult struct { + ResultCommon +} + +// DeleteAccessPointPolicy Deletes an access point policy. +func (c *Client) DeleteAccessPointPolicy(ctx context.Context, request *DeleteAccessPointPolicyRequest, optFns ...func(*Options)) (*DeleteAccessPointPolicyResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointPolicyRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointPolicy", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointPolicyResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + // The configurations of the access point policy. + Body io.Reader `input:"body,nop,required"` + + RequestCommon +} + +type PutAccessPointPolicyResult struct { + ResultCommon +} + +// PutAccessPointPolicy Configures an access point policy. +func (c *Client) PutAccessPointPolicy(ctx context.Context, request *PutAccessPointPolicyRequest, optFns ...func(*Options)) (*PutAccessPointPolicyResult, error) { + var err error + if request == nil { + request = &PutAccessPointPolicyRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointPolicy", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointPolicyResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type DeleteAccessPointResult struct { + ResultCommon +} + +// DeleteAccessPoint Deletes an access point. +func (c *Client) DeleteAccessPoint(ctx context.Context, request *DeleteAccessPointRequest, optFns ...func(*Options)) (*DeleteAccessPointResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPoint", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CreateAccessPointRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + CreateAccessPointConfiguration *CreateAccessPointConfiguration `input:"body,CreateAccessPointConfiguration,xml,required"` + + RequestCommon +} + +type CreateAccessPointResult struct { + // The Alibaba Cloud Resource Name (ARN) of the access point. + AccessPointArn *string `xml:"AccessPointArn"` + + // The alias of the access point. + Alias *string `xml:"Alias"` + + ResultCommon +} + +// CreateAccessPoint Creates an access point. +func (c *Client) CreateAccessPoint(ctx context.Context, request *CreateAccessPointRequest, optFns ...func(*Options)) (*CreateAccessPointResult, error) { + var err error + if request == nil { + request = &CreateAccessPointRequest{} + } + input := &OperationInput{ + OpName: "CreateAccessPoint", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateAccessPointResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go new file mode 100644 index 000000000..4fa22593a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go @@ -0,0 +1,163 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type GetAccessPointPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"query,x-oss-access-point-name,required"` + + RequestCommon +} + +type GetAccessPointPublicAccessBlockResult struct { + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `output:"body,PublicAccessBlockConfiguration,xml"` + + ResultCommon +} + +// GetAccessPointPublicAccessBlock Queries the Block Public Access configurations of an access point. +func (c *Client) GetAccessPointPublicAccessBlock(ctx context.Context, request *GetAccessPointPublicAccessBlockRequest, optFns ...func(*Options)) (*GetAccessPointPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &GetAccessPointPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointPublicAccessBlock", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"query,x-oss-access-point-name,required"` + + // The request body. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `input:"body,PublicAccessBlockConfiguration,xml,required"` + + RequestCommon +} + +type PutAccessPointPublicAccessBlockResult struct { + ResultCommon +} + +// PutAccessPointPublicAccessBlock Enables or disables Block Public Access for an access point. +func (c *Client) PutAccessPointPublicAccessBlock(ctx context.Context, request *PutAccessPointPublicAccessBlockRequest, optFns ...func(*Options)) (*PutAccessPointPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &PutAccessPointPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointPublicAccessBlock", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"query,x-oss-access-point-name,required"` + + RequestCommon +} + +type DeleteAccessPointPublicAccessBlockResult struct { + ResultCommon +} + +// DeleteAccessPointPublicAccessBlock Deletes the Block Public Access configurations of an access point. +func (c *Client) DeleteAccessPointPublicAccessBlock(ctx context.Context, request *DeleteAccessPointPublicAccessBlockRequest, optFns ...func(*Options)) (*DeleteAccessPointPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointPublicAccessBlock", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go new file mode 100644 index 000000000..9c73d7ef5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go @@ -0,0 +1,1310 @@ +package oss + +import ( + "context" + "encoding/xml" + "net/url" + "strings" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PutBucketRequest struct { + // The name of the bucket to create. + Bucket *string `input:"host,bucket,required"` + + // The access control list (ACL) of the bucket. + Acl BucketACLType `input:"header,x-oss-acl"` + + // The ID of the resource group. + ResourceGroupId *string `input:"header,x-oss-resource-group-id"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `input:"body,CreateBucketConfiguration,xml"` + + RequestCommon +} + +type CreateBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + + // The storage class of the bucket. + StorageClass StorageClassType `xml:"StorageClass,omitempty"` + + // The redundancy type of the bucket. + DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"` +} + +type PutBucketResult struct { + ResultCommon +} + +// PutBucket Creates a bucket. +func (c *Client) PutBucket(ctx context.Context, request *PutBucketRequest, optFns ...func(*Options)) (*PutBucketResult, error) { + var err error + if request == nil { + request = &PutBucketRequest{} + } + input := &OperationInput{ + OpName: "PutBucket", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketResult{} + + if err = c.unmarshalOutput(result, output, discardBody); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketRequest struct { + // The name of the bucket to delete. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketResult struct { + ResultCommon +} + +// DeleteBucket Deletes a bucket. +func (c *Client) DeleteBucket(ctx context.Context, request *DeleteBucketRequest, optFns ...func(*Options)) (*DeleteBucketResult, error) { + var err error + if request == nil { + request = &DeleteBucketRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucket", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketResult{} + if err = c.unmarshalOutput(result, output, discardBody); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListObjectsRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The name of the object after which the ListObjects (GetBucket) operation starts. + // If this parameter is specified, objects whose names are alphabetically greater than the marker value are returned. + Marker *string `input:"query,marker"` + + // The maximum number of objects that you want to return. If the list operation cannot be complete at a time + // because the max-keys parameter is specified, the NextMarker element is included in the response as the marker + // for the next list operation. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListObjectsResult struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // The name of the object after which the list operation begins. + Marker *string `xml:"Marker"` + + // The maximum number of returned objects in the response. + MaxKeys int32 `xml:"MaxKeys"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // Indicates whether the returned results are truncated. + // true indicates that not all results are returned this time. + // false indicates that all results are returned this time. + IsTruncated bool `xml:"IsTruncated"` + + // The position from which the next list operation starts. + NextMarker *string `xml:"NextMarker"` + + // The encoding type of the content in the response. + EncodingType *string `xml:"EncodingType"` + + // The container that stores the metadata of the returned objects. + Contents []ObjectProperties `xml:"Contents"` + + // If the Delimiter parameter is specified in the request, the response contains the CommonPrefixes element. + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + + ResultCommon +} + +type ObjectProperties struct { + // The name of the object. + Key *string `xml:"Key"` + + // The type of the object. Valid values: Normal, Multipart and Appendable + Type *string `xml:"Type"` + + // The size of the returned object. Unit: bytes. + Size int64 `xml:"Size"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `xml:"ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The storage class of the object. + StorageClass *string `xml:"StorageClass"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The restoration status of the object. + RestoreInfo *string `xml:"RestoreInfo"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `xml:"TransitionTime"` +} + +type Owner struct { + // The ID of the bucket owner. + ID *string `xml:"ID"` + + // The name of the object owner. + DisplayName *string `xml:"DisplayName"` +} + +type CommonPrefix struct { + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` +} + +// ListObjects Lists the information about all objects in a bucket. +func (c *Client) ListObjects(ctx context.Context, request *ListObjectsRequest, optFns ...func(*Options)) (*ListObjectsResult, error) { + var err error + if request == nil { + request = &ListObjectsRequest{} + } + input := &OperationInput{ + OpName: "ListObjects", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListObjectsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +func unmarshalEncodeType(result any, output *OperationOutput) error { + switch r := result.(type) { + case *ListObjectsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Prefix, &r.Marker, &r.Delimiter, &r.NextMarker} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.Contents); i++ { + if r.Contents[i].Key != nil { + if *r.Contents[i].Key, err = url.QueryUnescape(*r.Contents[i].Key); err != nil { + return err + } + } + + } + for i := 0; i < len(r.CommonPrefixes); i++ { + if r.CommonPrefixes[i].Prefix != nil { + if *r.CommonPrefixes[i].Prefix, err = url.QueryUnescape(*r.CommonPrefixes[i].Prefix); err != nil { + return err + } + } + } + } + case *ListObjectsV2Result: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Prefix, &r.StartAfter, &r.Delimiter, &r.ContinuationToken, &r.NextContinuationToken} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.Contents); i++ { + if r.Contents[i].Key != nil { + if *r.Contents[i].Key, err = url.QueryUnescape(*r.Contents[i].Key); err != nil { + return err + } + } + } + for i := 0; i < len(r.CommonPrefixes); i++ { + if r.CommonPrefixes[i].Prefix != nil { + if *r.CommonPrefixes[i].Prefix, err = url.QueryUnescape(*r.CommonPrefixes[i].Prefix); err != nil { + return err + } + } + } + } + case *DeleteMultipleObjectsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + var err error + for i := 0; i < len(r.DeletedObjects); i++ { + if r.DeletedObjects[i].Key != nil { + if *r.DeletedObjects[i].Key, err = url.QueryUnescape(*r.DeletedObjects[i].Key); err != nil { + return err + } + } + } + } + case *InitiateMultipartUploadResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + var err error + if r.Key != nil { + if *r.Key, err = url.QueryUnescape(*r.Key); err != nil { + return err + } + } + } + case *CompleteMultipartUploadResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + var err error + if r.Key != nil { + if *r.Key, err = url.QueryUnescape(*r.Key); err != nil { + return err + } + } + } + case *ListMultipartUploadsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.KeyMarker, &r.NextKeyMarker, &r.Prefix, &r.Delimiter} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.Uploads); i++ { + if r.Uploads[i].Key != nil { + if *r.Uploads[i].Key, err = url.QueryUnescape(*r.Uploads[i].Key); err != nil { + return err + } + } + } + } + case *ListPartsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Key} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + } + case *ListObjectVersionsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Prefix, &r.KeyMarker, &r.Delimiter, &r.NextKeyMarker} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.ObjectVersions); i++ { + if r.ObjectVersions[i].Key != nil { + if *r.ObjectVersions[i].Key, err = url.QueryUnescape(*r.ObjectVersions[i].Key); err != nil { + return err + } + } + + } + for i := 0; i < len(r.ObjectDeleteMarkers); i++ { + if r.ObjectDeleteMarkers[i].Key != nil { + if *r.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(*r.ObjectDeleteMarkers[i].Key); err != nil { + return err + } + } + } + for i := 0; i < len(r.ObjectVersionsDeleteMarkers); i++ { + if r.ObjectVersionsDeleteMarkers[i].Key != nil { + if *r.ObjectVersionsDeleteMarkers[i].Key, err = url.QueryUnescape(*r.ObjectVersionsDeleteMarkers[i].Key); err != nil { + return err + } + } + } + for i := 0; i < len(r.CommonPrefixes); i++ { + if r.CommonPrefixes[i].Prefix != nil { + if *r.CommonPrefixes[i].Prefix, err = url.QueryUnescape(*r.CommonPrefixes[i].Prefix); err != nil { + return err + } + } + } + } + } + return nil +} + +type ListObjectsV2Request struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // The name of the object after which the ListObjectsV2 (GetBucketV2) operation starts. + // The objects are returned in alphabetical order of their names. The start-after parameter + // is used to list the returned objects by page. + // The value of the parameter must be less than 1,024 bytes in length. + // Even if the specified start-after value does not exist during a conditional query, + // the ListObjectsV2 (GetBucketV2) operation starts from the object whose name is alphabetically greater than the start-after value. + // By default, this parameter is left empty. + StartAfter *string `input:"query,start-after"` + + // The token from which the ListObjectsV2 (GetBucketV2) operation must start. + // You can obtain the token from the NextContinuationToken parameter in the ListObjectsV2 (GetBucketV2) response. + ContinuationToken *string `input:"query,continuation-token"` + + // The maximum number of objects that you want to return. If the list operation cannot be complete at a time + // because the max-keys parameter is specified, the NextMarker element is included in the response as the marker + // for the next list operation. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // Specifies whether to include information about the object owner in the response. + FetchOwner bool `input:"query,fetch-owner"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListObjectsV2Result struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // If the StartAfter parameter is specified in the request, the response contains the StartAfter parameter. + StartAfter *string `xml:"StartAfter"` + + // The maximum number of returned objects in the response. + MaxKeys int32 `xml:"MaxKeys"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // Indicates whether the returned results are truncated. + // true indicates that not all results are returned this time. + // false indicates that all results are returned this time. + IsTruncated bool `xml:"IsTruncated"` + + // If the ContinuationToken parameter is specified in the request, the response contains the ContinuationToken parameter. + ContinuationToken *string `xml:"ContinuationToken"` + + // The name of the object from which the next ListObjectsV2 (GetBucketV2) operation starts. + // The NextContinuationToken value is used as the ContinuationToken value to query subsequent results. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The encoding type of the content in the response. + EncodingType *string `xml:"EncodingType"` + + // The container that stores the metadata of the returned objects. + Contents []ObjectProperties `xml:"Contents"` + + // If the Delimiter parameter is specified in the request, the response contains the CommonPrefixes element. + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + + // The number of objects returned for this request. If Delimiter is specified, KeyCount is the sum of the values of Key and CommonPrefixes. + KeyCount int `xml:"KeyCount"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `xml:"TransitionTime"` + + ResultCommon +} + +// ListObjectsV2 Queries information about all objects in a bucket. +func (c *Client) ListObjectsV2(ctx context.Context, request *ListObjectsV2Request, optFns ...func(*Options)) (*ListObjectsV2Result, error) { + var err error + if request == nil { + request = &ListObjectsV2Request{} + } + input := &OperationInput{ + OpName: "ListObjectsV2", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "list-type": "2", + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListObjectsV2Result{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketInfoRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + RequestCommon +} + +type GetBucketInfoResult struct { + // The container that stores the bucket information. + BucketInfo BucketInfo `xml:"Bucket"` + ResultCommon +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // Indicates whether access tracking is enabled for the bucket. + AccessMonitor *string `xml:"AccessMonitor"` + + // The region in which the bucket is located. + Location *string `xml:"Location"` + + // The time when the bucket is created. The time is in UTC. + CreationDate *time.Time `xml:"CreationDate"` + + // The public endpoint that is used to access the bucket over the Internet. + ExtranetEndpoint *string `xml:"ExtranetEndpoint"` + + // The internal endpoint that is used to access the bucket from Elastic + IntranetEndpoint *string `xml:"IntranetEndpoint"` + + // The container that stores the access control list (ACL) information about the bucket. + ACL *string `xml:"AccessControlList>Grant"` + + // The disaster recovery type of the bucket. + DataRedundancyType *string `xml:"DataRedundancyType"` + + // The container that stores the information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The storage class of the bucket. + StorageClass *string `xml:"StorageClass"` + + // The ID of the resource group to which the bucket belongs. + ResourceGroupId *string `xml:"ResourceGroupId"` + + // The container that stores the server-side encryption method. + SseRule SSERule `xml:"ServerSideEncryptionRule"` + + // Indicates whether versioning is enabled for the bucket. + Versioning *string `xml:"Versioning"` + + // Indicates whether transfer acceleration is enabled for the bucket. + TransferAcceleration *string `xml:"TransferAcceleration"` + + // Indicates whether cross-region replication (CRR) is enabled for the bucket. + CrossRegionReplication *string `xml:"CrossRegionReplication"` + + // The container that stores the logs. + BucketPolicy BucketPolicy `xml:"BucketPolicy"` + + // The description of the bucket. + Comment *string `xml:"Comment"` + + // Indicates whether Block Public Access is enabled for the bucket. + // true: Block Public Access is enabled. false: Block Public Access is disabled. + BlockPublicAccess *bool `xml:"BlockPublicAccess"` +} + +type SSERule struct { + // The customer master key (CMK) ID in use. A valid value is returned only if you set SSEAlgorithm to KMS + // and specify the CMK ID. In other cases, an empty value is returned. + KMSMasterKeyID *string `xml:"KMSMasterKeyID"` + + // The server-side encryption method that is used by default. + SSEAlgorithm *string `xml:"SSEAlgorithm"` + + // Object's encryption algorithm. If this element is not included in the response, + // it indicates that the object is using the AES256 encryption algorithm. + // This option is only valid if the SSEAlgorithm value is KMS. + KMSDataEncryption *string `xml:"KMSDataEncryption"` +} + +type BucketPolicy struct { + // The name of the bucket that stores the logs. + LogBucket *string `xml:"LogBucket"` + + // The directory in which logs are stored. + LogPrefix *string `xml:"LogPrefix"` +} + +// GetBucketInfo Queries information about a bucket. +func (c *Client) GetBucketInfo(ctx context.Context, request *GetBucketInfoRequest, optFns ...func(*Options)) (*GetBucketInfoResult, error) { + var err error + if request == nil { + request = &GetBucketInfoRequest{} + } + input := &OperationInput{ + OpName: "GetBucketInfo", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "bucketInfo": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketInfoResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalSseRule); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +func unmarshalSseRule(result any, output *OperationOutput) error { + switch r := result.(type) { + case *GetBucketInfoResult: + fields := []*string{r.BucketInfo.SseRule.KMSMasterKeyID, r.BucketInfo.SseRule.SSEAlgorithm, r.BucketInfo.SseRule.KMSDataEncryption} + for _, pp := range fields { + if pp != nil && *pp == "None" { + *pp = "" + } + } + } + return nil +} + +type GetBucketLocationRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + RequestCommon +} + +type GetBucketLocationResult struct { + // The region in which the bucket is located. + LocationConstraint *string `xml:",chardata"` + ResultCommon +} + +// GetBucketLocation Queries the region of an Object Storage Service (OSS) bucket. +func (c *Client) GetBucketLocation(ctx context.Context, request *GetBucketLocationRequest, optFns ...func(*Options)) (*GetBucketLocationResult, error) { + var err error + if request == nil { + request = &GetBucketLocationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketLocation", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "location": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketLocationResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketStatRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + RequestCommon +} + +type GetBucketStatResult struct { + // The storage capacity of the bucket. Unit: bytes. + Storage int64 `xml:"Storage"` + + // The total number of objects that are stored in the bucket. + ObjectCount int64 `xml:"ObjectCount"` + + // The number of multipart upload tasks that have been initiated but are not completed or canceled. + MultipartUploadCount int64 `xml:"MultipartUploadCount"` + + // The number of LiveChannels in the bucket. + LiveChannelCount int64 `xml:"LiveChannelCount"` + + // The time when the obtained information is last modified. The value of this element is a UNIX timestamp. Unit: seconds. + LastModifiedTime int64 `xml:"LastModifiedTime"` + + // The storage usage of Standard objects in the bucket. Unit: bytes. + StandardStorage int64 `xml:"StandardStorage"` + + // The number of Standard objects in the bucket. + StandardObjectCount int64 `xml:"StandardObjectCount"` + + // The billed storage usage of Infrequent Access (IA) objects in the bucket. Unit: bytes. + InfrequentAccessStorage int64 `xml:"InfrequentAccessStorage"` + + // The actual storage usage of IA objects in the bucket. Unit: bytes. + InfrequentAccessRealStorage int64 `xml:"InfrequentAccessRealStorage"` + + // The number of IA objects in the bucket. + InfrequentAccessObjectCount int64 `xml:"InfrequentAccessObjectCount"` + + // The billed storage usage of Archive objects in the bucket. Unit: bytes. + ArchiveStorage int64 `xml:"ArchiveStorage"` + + // The actual storage usage of Archive objects in the bucket. Unit: bytes. + ArchiveRealStorage int64 `xml:"ArchiveRealStorage"` + + // The number of Archive objects in the bucket. + ArchiveObjectCount int64 `xml:"ArchiveObjectCount"` + + // The billed storage usage of Cold Archive objects in the bucket. Unit: bytes. + ColdArchiveStorage int64 `xml:"ColdArchiveStorage"` + + // The actual storage usage of Cold Archive objects in the bucket. Unit: bytes. + ColdArchiveRealStorage int64 `xml:"ColdArchiveRealStorage"` + + // The number of Cold Archive objects in the bucket. + ColdArchiveObjectCount int64 `xml:"ColdArchiveObjectCount"` + + // The number of Deep Cold Archive objects in the bucket. + DeepColdArchiveObjectCount int64 `xml:"DeepColdArchiveObjectCount"` + + // The billed storage usage of Deep Cold Archive objects in the bucket. Unit: bytes. + DeepColdArchiveStorage int64 `xml:"DeepColdArchiveStorage"` + + // The actual storage usage of Deep Cold Archive objects in the bucket. Unit: bytes. + DeepColdArchiveRealStorage int64 `xml:"DeepColdArchiveRealStorage"` + + // The number of multipart parts in the bucket. + MultipartPartCount int64 `xml:"MultipartPartCount"` + + // The number of delete marker in the bucket. + DeleteMarkerCount int64 `xml:"DeleteMarkerCount"` + + ResultCommon +} + +// GetBucketStat Queries the storage capacity of a specified bucket and the number of objects that are stored in the bucket. +func (c *Client) GetBucketStat(ctx context.Context, request *GetBucketStatRequest, optFns ...func(*Options)) (*GetBucketStatResult, error) { + var err error + if request == nil { + request = &GetBucketStatRequest{} + } + input := &OperationInput{ + OpName: "GetBucketStat", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "stat": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketStatResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketAclRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The access control list (ACL) of the object. + Acl BucketACLType `input:"header,x-oss-acl,required"` + + RequestCommon +} + +type PutBucketAclResult struct { + ResultCommon +} + +// PutBucketAcl You can call this operation to configure or modify the ACL of a bucket. +func (c *Client) PutBucketAcl(ctx context.Context, request *PutBucketAclRequest, optFns ...func(*Options)) (*PutBucketAclResult, error) { + var err error + if request == nil { + request = &PutBucketAclRequest{} + } + input := &OperationInput{ + OpName: "PutBucketAcl", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "acl": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketAclResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketAclRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketAclResult struct { + // The container that stores the access control list (ACL) information about the bucket. + ACL *string `xml:"AccessControlList>Grant"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + ResultCommon +} + +// GetBucketAcl You can call this operation to query the ACL of a bucket. +func (c *Client) GetBucketAcl(ctx context.Context, request *GetBucketAclRequest, optFns ...func(*Options)) (*GetBucketAclResult, error) { + var err error + if request == nil { + request = &GetBucketAclRequest{} + } + input := &OperationInput{ + OpName: "GetBucketAcl", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "acl": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketAclResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketVersioningRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + VersioningConfiguration *VersioningConfiguration `input:"body,VersioningConfiguration,xml,required"` + + RequestCommon +} + +type VersioningConfiguration struct { + // The versioning state of the bucket. Valid values: Enabled,Suspended + Status VersioningStatusType `xml:"Status"` +} + +type PutBucketVersioningResult struct { + ResultCommon +} + +// PutBucketVersioning Configures the versioning state for a bucket. +func (c *Client) PutBucketVersioning(ctx context.Context, request *PutBucketVersioningRequest, optFns ...func(*Options)) (*PutBucketVersioningResult, error) { + var err error + if request == nil { + request = &PutBucketVersioningRequest{} + } + input := &OperationInput{ + OpName: "PutBucketVersioning", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "versioning": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketVersioningResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketVersioningRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketVersioningResult struct { + // The versioning state of the bucket. Valid values: Enabled,Suspended + VersionStatus *string `xml:"Status"` + + ResultCommon +} + +// GetBucketVersioning You can call this operation to query the versioning state of a bucket. +func (c *Client) GetBucketVersioning(ctx context.Context, request *GetBucketVersioningRequest, optFns ...func(*Options)) (*GetBucketVersioningResult, error) { + var err error + if request == nil { + request = &GetBucketVersioningRequest{} + } + input := &OperationInput{ + OpName: "GetBucketVersioning", + Method: "GET", + Parameters: map[string]string{ + "versioning": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketVersioningResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type ListObjectVersionsRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // Specifies that objects whose names are alphabetically after the value of the key-marker parameter are returned. + // This parameter can be specified together with version-id-marker. + // By default, this parameter is left empty. + KeyMarker *string `input:"query,key-marker"` + + // Specifies that the versions created before the version specified by version-id-marker for the object + // whose name is specified by key-marker are returned by creation time in descending order. + // By default, if this parameter is not specified, the results are returned from the latest + // version of the object whose name is alphabetically after the value of key-marker. + VersionIdMarker *string `input:"query,version-id-marker"` + + // The maximum number of objects that you want to return. If the list operation cannot be complete at a time + // because the max-keys parameter is specified, the NextMarker element is included in the response as the marker + // for the next list operation. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + // To indicate that whether to stores the versions of objects and delete markers together in one container. + // When false(default), stores the versions of objects into ListObjectVersionsResult.ObjectVersions, + // When false(default), stores the delete markers into ListObjectVersionsResult.ObjectDeleteMarkers, + // When true, stores the versions and delete markers into ListObjectVersionsResult.ObjectVersionsDeleteMarkers, + IsMix bool + + RequestCommon +} + +type ListObjectVersionsResult struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // Indicates the object from which the ListObjectVersions (GetBucketVersions) operation starts. + KeyMarker *string `xml:"KeyMarker"` + + // The version from which the ListObjectVersions (GetBucketVersions) operation starts. + // This parameter is used together with KeyMarker. + VersionIdMarker *string `xml:"VersionIdMarker"` + + // If not all results are returned for the request, the NextKeyMarker parameter is included + // in the response to indicate the key-marker value of the next ListObjectVersions (GetBucketVersions) request. + NextKeyMarker *string `xml:"NextKeyMarker"` + + // If not all results are returned for the request, the NextVersionIdMarker parameter is included in + // the response to indicate the version-id-marker value of the next ListObjectVersions (GetBucketVersions) request. + NextVersionIdMarker *string `xml:"NextVersionIdMarker"` + + // The container that stores delete markers. + ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` + + // The container that stores the versions of objects, excluding delete markers. + ObjectVersions []ObjectVersionProperties `xml:"Version"` + + // The container that stores the versions of objects and delete markers together in the order they are returned. + // Only valid when ListObjectVersionsRequest.IsMix is set to true + ObjectVersionsDeleteMarkers []ObjectMixProperties `xml:"ObjectMix"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // The maximum number of returned objects in the response. + MaxKeys int32 `xml:"MaxKeys"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // Indicates whether the returned results are truncated. + // true indicates that not all results are returned this time. + // false indicates that all results are returned this time. + IsTruncated bool `xml:"IsTruncated"` + + // The encoding type of the content in the response. + EncodingType *string `xml:"EncodingType"` + + // If the Delimiter parameter is specified in the request, the response contains the CommonPrefixes element. + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + + ResultCommon +} + +type ObjectMixProperties ObjectVersionProperties + +func (m ObjectMixProperties) IsDeleteMarker() bool { + if m.VersionId != nil && m.Type == nil { + return true + } + return false +} + +type ObjectDeleteMarkerProperties struct { + // The name of the object. + Key *string `xml:"Key"` + + // The version ID of the object. + VersionId *string `xml:"VersionId"` + + // Indicates whether the version is the current version. + IsLatest bool `xml:"IsLatest"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` +} + +type ObjectVersionProperties struct { + // The name of the object. + Key *string `xml:"Key"` + + // The version ID of the object. + VersionId *string `xml:"VersionId"` + + // Indicates whether the version is the current version. + IsLatest bool `xml:"IsLatest"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The type of the returned object. + Type *string `xml:"Type"` + + // The size of the returned object. Unit: bytes. + Size int64 `xml:"Size"` + + // The entity tag (ETag) that is generated when an object is created. ETags are used to identify the content of objects. + ETag *string `xml:"ETag"` + + // The storage class of the object. + StorageClass *string `xml:"StorageClass"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The restoration status of the object. + RestoreInfo *string `xml:"RestoreInfo"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `xml:"TransitionTime"` +} + +// ListObjectVersions Lists the versions of all objects in a bucket, including delete markers. +func (c *Client) ListObjectVersions(ctx context.Context, request *ListObjectVersionsRequest, optFns ...func(*Options)) (*ListObjectVersionsResult, error) { + var err error + if request == nil { + request = &ListObjectVersionsRequest{} + } + input := &OperationInput{ + OpName: "ListObjectVersions", + Method: "GET", + Parameters: map[string]string{ + "versions": "", + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListObjectVersionsResult{} + var unmarshalFns []func(result any, output *OperationOutput) error + if request.IsMix { + unmarshalFns = append(unmarshalFns, unmarshalBodyXmlVersions) + } else { + unmarshalFns = append(unmarshalFns, unmarshalBodyXml) + } + unmarshalFns = append(unmarshalFns, unmarshalEncodeType) + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketRequestPaymentRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The request payment configuration information for the bucket. + PaymentConfiguration *RequestPaymentConfiguration `input:"body,RequestPaymentConfiguration,xml,required"` + + RequestCommon +} + +type RequestPaymentConfiguration struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + + // The payer of the request and traffic fees. + Payer PayerType `xml:"Payer"` +} + +type PutBucketRequestPaymentResult struct { + ResultCommon +} + +// PutBucketRequestPayment You can call this operation to enable pay-by-requester for a bucket. +func (c *Client) PutBucketRequestPayment(ctx context.Context, request *PutBucketRequestPaymentRequest, optFns ...func(*Options)) (*PutBucketRequestPaymentResult, error) { + var err error + if request == nil { + request = &PutBucketRequestPaymentRequest{} + } + input := &OperationInput{ + OpName: "PutBucketRequestPayment", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "requestPayment": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"requestPayment"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketRequestPaymentResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketRequestPaymentRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketRequestPaymentResult struct { + // Indicates who pays the download and request fees. + Payer *string `xml:"Payer"` + + ResultCommon +} + +// GetBucketRequestPayment You can call this operation to obtain pay-by-requester configurations for a bucket. +func (c *Client) GetBucketRequestPayment(ctx context.Context, request *GetBucketRequestPaymentRequest, optFns ...func(*Options)) (*GetBucketRequestPaymentResult, error) { + var err error + if request == nil { + request = &GetBucketRequestPaymentRequest{} + } + input := &OperationInput{ + OpName: "GetBucketRequestPayment", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "requestPayment": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"requestPayment"}) + if err = c.marshalInput(request, input); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketRequestPaymentResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go new file mode 100644 index 000000000..3226aff4c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go @@ -0,0 +1,106 @@ +package oss + +import ( + "context" +) + +type AccessMonitorConfiguration struct { + // The access tracking status of the bucket. Valid values:- Enabled: Access tracking is enabled.- Disabled: Access tracking is disabled. + Status AccessMonitorStatusType `xml:"Status"` +} + +type PutBucketAccessMonitorRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + AccessMonitorConfiguration *AccessMonitorConfiguration `input:"body,AccessMonitorConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketAccessMonitorResult struct { + ResultCommon +} + +// PutBucketAccessMonitor Modifies the access tracking status of a bucket. +func (c *Client) PutBucketAccessMonitor(ctx context.Context, request *PutBucketAccessMonitorRequest, optFns ...func(*Options)) (*PutBucketAccessMonitorResult, error) { + var err error + if request == nil { + request = &PutBucketAccessMonitorRequest{} + } + input := &OperationInput{ + OpName: "PutBucketAccessMonitor", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessmonitor": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketAccessMonitorResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketAccessMonitorRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketAccessMonitorResult struct { + // The container that stores access monitor configuration. + AccessMonitorConfiguration *AccessMonitorConfiguration `output:"body,AccessMonitorConfiguration,xml"` + + ResultCommon +} + +// GetBucketAccessMonitor Queries the access tracking status of a bucket. +func (c *Client) GetBucketAccessMonitor(ctx context.Context, request *GetBucketAccessMonitorRequest, optFns ...func(*Options)) (*GetBucketAccessMonitorResult, error) { + var err error + if request == nil { + request = &GetBucketAccessMonitorRequest{} + } + input := &OperationInput{ + OpName: "GetBucketAccessMonitor", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessmonitor": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketAccessMonitorResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go new file mode 100644 index 000000000..fac2aa56e --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go @@ -0,0 +1,104 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type ArchiveDirectReadConfiguration struct { + // Specifies whether to enable real-time access of Archive objects for a bucket. Valid values:- true- false + Enabled *bool `xml:"Enabled"` +} + +type GetBucketArchiveDirectReadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketArchiveDirectReadResult struct { + // The container that stores the configurations for real-time access of Archive objects. + ArchiveDirectReadConfiguration *ArchiveDirectReadConfiguration `output:"body,ArchiveDirectReadConfiguration,xml"` + + ResultCommon +} + +// GetBucketArchiveDirectRead Queries whether real-time access of Archive objects is enabled for a bucket. +func (c *Client) GetBucketArchiveDirectRead(ctx context.Context, request *GetBucketArchiveDirectReadRequest, optFns ...func(*Options)) (*GetBucketArchiveDirectReadResult, error) { + var err error + if request == nil { + request = &GetBucketArchiveDirectReadRequest{} + } + input := &OperationInput{ + OpName: "GetBucketArchiveDirectRead", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "bucketArchiveDirectRead": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"bucketArchiveDirectRead"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketArchiveDirectReadResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketArchiveDirectReadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body. + ArchiveDirectReadConfiguration *ArchiveDirectReadConfiguration `input:"body,ArchiveDirectReadConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketArchiveDirectReadResult struct { + ResultCommon +} + +// PutBucketArchiveDirectRead Enables or disables real-time access of Archive objects for a bucket. +func (c *Client) PutBucketArchiveDirectRead(ctx context.Context, request *PutBucketArchiveDirectReadRequest, optFns ...func(*Options)) (*PutBucketArchiveDirectReadResult, error) { + var err error + if request == nil { + request = &PutBucketArchiveDirectReadRequest{} + } + input := &OperationInput{ + OpName: "PutBucketArchiveDirectRead", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "bucketArchiveDirectRead": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"bucketArchiveDirectRead"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketArchiveDirectReadResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go new file mode 100644 index 000000000..20c9dad78 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go @@ -0,0 +1,351 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type CertificateConfiguration struct { + // The ID of the certificate. + CertId *string `xml:"CertId"` + + // The public key of the certificate. + Certificate *string `xml:"Certificate"` + + // The private key of the certificate. + PrivateKey *string `xml:"PrivateKey"` + + // The ID of the certificate. If the Force parameter is not set to true, the OSS server checks whether the value of the Force parameter matches the current certificate ID. If the value does not match the certificate ID, an error is returned.noticeIf you do not specify the PreviousCertId parameter when you bind a certificate, you must set the Force parameter to true./notice + PreviousCertId *string `xml:"PreviousCertId"` + + // Specifies whether to overwrite the certificate. Valid values:- true: overwrites the certificate.- false: does not overwrite the certificate. + Force *bool `xml:"Force"` + + // Specifies whether to delete the certificate. Valid values:- true: deletes the certificate.- false: does not delete the certificate. + DeleteCertificate *bool `xml:"DeleteCertificate"` +} + +type BucketCnameConfiguration struct { + // The custom domain name. + Domain *string `xml:"Cname>Domain"` + + // The container for which the certificate is configured. + CertificateConfiguration *CertificateConfiguration `xml:"Cname>CertificateConfiguration"` +} + +type CnameCertificate struct { + // The time when the certificate was bound. + CreationDate *string `xml:"CreationDate"` + + // The signature of the certificate. + Fingerprint *string `xml:"Fingerprint"` + + // The time when the certificate takes effect. + ValidStartDate *string `xml:"ValidStartDate"` + + // The time when the certificate expires. + ValidEndDate *string `xml:"ValidEndDate"` + + // The source of the certificate.Valid values:* CAS * Upload + Type *string `xml:"Type"` + + // The ID of the certificate. + CertId *string `xml:"CertId"` + + // The status of the certificate.Valid values:* Enabled * Disabled + Status *string `xml:"Status"` +} + +type CnameInfo struct { + // The custom domain name. + Domain *string `xml:"Domain"` + + // The time when the custom domain name was mapped. + LastModified *string `xml:"LastModified"` + + // The status of the domain name. Valid values:* Enabled* Disabled + Status *string `xml:"Status"` + + // The container in which the certificate information is stored. + Certificate *CnameCertificate `xml:"Certificate"` +} + +type CnameToken struct { + // The name of the bucket to which the CNAME record is mapped. + Bucket *string `xml:"Bucket"` + + // The name of the CNAME record that is mapped to the bucket. + Cname *string `xml:"Cname"` + + // The CNAME token that is returned by OSS. + Token *string `xml:"Token"` + + // The time when the CNAME token expires. + ExpireTime *string `xml:"ExpireTime"` +} + +type PutCnameRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketCnameConfiguration *BucketCnameConfiguration `input:"body,BucketCnameConfiguration,xml,required"` + + RequestCommon +} + +type PutCnameResult struct { + ResultCommon +} + +// PutCname Maps a CNAME record to a bucket. +func (c *Client) PutCname(ctx context.Context, request *PutCnameRequest, optFns ...func(*Options)) (*PutCnameResult, error) { + var err error + if request == nil { + request = &PutCnameRequest{} + } + input := &OperationInput{ + OpName: "PutCname", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + "comp": "add", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"comp", "cname"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutCnameResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListCnameRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type ListCnameResult struct { + // The container that is used to store the information about all CNAME records. + Cnames []CnameInfo `xml:"Cname"` + + // The name of the bucket to which the CNAME records you want to query are mapped. + Bucket *string `xml:"Bucket"` + + // The name of the bucket owner. + Owner *string `xml:"Owner"` + + ResultCommon +} + +// ListCname Queries all CNAME records that are mapped to a bucket. +func (c *Client) ListCname(ctx context.Context, request *ListCnameRequest, optFns ...func(*Options)) (*ListCnameResult, error) { + var err error + if request == nil { + request = &ListCnameRequest{} + } + input := &OperationInput{ + OpName: "ListCname", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cname"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListCnameResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteCnameRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketCnameConfiguration *BucketCnameConfiguration `input:"body,BucketCnameConfiguration,xml,required"` + + RequestCommon +} + +type DeleteCnameResult struct { + ResultCommon +} + +// DeleteCname Deletes a CNAME record that is mapped to a bucket. +func (c *Client) DeleteCname(ctx context.Context, request *DeleteCnameRequest, optFns ...func(*Options)) (*DeleteCnameResult, error) { + var err error + if request == nil { + request = &DeleteCnameRequest{} + } + input := &OperationInput{ + OpName: "DeleteCname", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + "comp": "delete", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cname", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteCnameResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetCnameTokenRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the CNAME record that is mapped to the bucket. + Cname *string `input:"query,cname,required"` + + RequestCommon +} + +type GetCnameTokenResult struct { + // The container in which the CNAME token is stored. + CnameToken *CnameToken `output:"body,CnameToken,xml"` + + ResultCommon +} + +// GetCnameToken Queries the created CNAME tokens. +func (c *Client) GetCnameToken(ctx context.Context, request *GetCnameTokenRequest, optFns ...func(*Options)) (*GetCnameTokenResult, error) { + var err error + if request == nil { + request = &GetCnameTokenRequest{} + } + input := &OperationInput{ + OpName: "GetCnameToken", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "token", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"comp", "cname"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetCnameTokenResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CreateCnameTokenRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketCnameConfiguration *BucketCnameConfiguration `input:"body,BucketCnameConfiguration,xml,required"` + + RequestCommon +} + +type CreateCnameTokenResult struct { + // The container in which the CNAME token is stored. + CnameToken *CnameToken `output:"body,CnameToken,xml"` + + ResultCommon +} + +// CreateCnameToken Creates a CNAME token to verify the ownership of a domain name. +func (c *Client) CreateCnameToken(ctx context.Context, request *CreateCnameTokenRequest, optFns ...func(*Options)) (*CreateCnameTokenResult, error) { + var err error + if request == nil { + request = &CreateCnameTokenRequest{} + } + input := &OperationInput{ + OpName: "CreateCnameToken", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + "comp": "token", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cname", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateCnameTokenResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go new file mode 100644 index 000000000..83b0107aa --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go @@ -0,0 +1,250 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type CORSConfiguration struct { + // The container that stores CORS rules. Up to 10 rules can be configured for a bucket. + CORSRules []CORSRule `xml:"CORSRule"` + + // Indicates whether the Vary: Origin header was returned. Default value: false.- true: The Vary: Origin header is returned regardless whether the request is a cross-origin request or whether the cross-origin request succeeds.- false: The Vary: Origin header is not returned. + ResponseVary *bool `xml:"ResponseVary"` +} + +type CORSRule struct { + // The origins from which cross-origin requests are allowed. + AllowedOrigins []string `xml:"AllowedOrigin"` + + // The methods that you can use in cross-origin requests. + AllowedMethods []string `xml:"AllowedMethod"` + + // Specifies whether the headers specified by Access-Control-Request-Headers in the OPTIONS preflight request are allowed. Each header specified by Access-Control-Request-Headers must match the value of an AllowedHeader element. You can use only one asterisk (\*) as the wildcard character. + AllowedHeaders []string `xml:"AllowedHeader"` + + // The response headers for allowed access requests from applications, such as an XMLHttpRequest object in JavaScript. The asterisk (\*) wildcard character is not supported. + ExposeHeaders []string `xml:"ExposeHeader"` + + // The period of time within which the browser can cache the response to an OPTIONS preflight request for the specified resource. Unit: seconds.You can specify only one MaxAgeSeconds element in a CORS rule. + MaxAgeSeconds *int64 `xml:"MaxAgeSeconds"` +} + +type PutBucketCorsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + CORSConfiguration *CORSConfiguration `input:"body,CORSConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketCorsResult struct { + ResultCommon +} + +// PutBucketCors Configures cross-origin resource sharing (CORS) rules for a bucket. +func (c *Client) PutBucketCors(ctx context.Context, request *PutBucketCorsRequest, optFns ...func(*Options)) (*PutBucketCorsResult, error) { + var err error + if request == nil { + request = &PutBucketCorsRequest{} + } + input := &OperationInput{ + OpName: "PutBucketCors", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cors": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cors"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketCorsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketCorsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketCorsResult struct { + // The container that stores CORS configuration. + CORSConfiguration *CORSConfiguration `output:"body,CORSConfiguration,xml"` + + ResultCommon +} + +// GetBucketCors Queries the cross-origin resource sharing (CORS) rules that are configured for a bucket. +func (c *Client) GetBucketCors(ctx context.Context, request *GetBucketCorsRequest, optFns ...func(*Options)) (*GetBucketCorsResult, error) { + var err error + if request == nil { + request = &GetBucketCorsRequest{} + } + input := &OperationInput{ + OpName: "GetBucketCors", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cors": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cors"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketCorsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketCorsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketCorsResult struct { + ResultCommon +} + +// DeleteBucketCors Disables the cross-origin resource sharing (CORS) feature and deletes all CORS rules for a bucket. +func (c *Client) DeleteBucketCors(ctx context.Context, request *DeleteBucketCorsRequest, optFns ...func(*Options)) (*DeleteBucketCorsResult, error) { + var err error + if request == nil { + request = &DeleteBucketCorsRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketCors", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cors": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cors"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketCorsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type OptionObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The full path of the object. + Key *string `input:"path,key,required"` + + // The origin of the request. It is used to identify a cross-origin request. You can specify only one Origin header in a cross-origin request. By default, this header is left empty. + Origin *string `input:"header,Origin,required"` + + // The method to be used in the actual cross-origin request. You can specify only one Access-Control-Request-Method header in a cross-origin request. By default, this header is left empty. + AccessControlRequestMethod *string `input:"header,Access-Control-Request-Method,required"` + + // The custom headers to be sent in the actual cross-origin request. You can configure multiple custom headers in a cross-origin request. Custom headers are separated by commas (,). By default, this header is left empty. + AccessControlRequestHeaders *string `input:"header,Access-Control-Request-Headers"` + + RequestCommon +} + +type OptionObjectResult struct { + // The HTTP method of the request. If the request is denied, the response does not contain the header. + AccessControlAllowMethods *string `output:"header,Access-Control-Allow-Methods"` + + // The list of headers included in the request. If the request includes headers that are not allowed, the response does not contain the headers and the request is denied. + AccessControlAllowHeaders *string `output:"header,Access-Control-Allow-Headers"` + + // The list of headers that can be accessed by JavaScript applications on a client. + AccessControlExposeHeaders *string `output:"header,Access-Control-Expose-Headers"` + + // The maximum duration for the browser to cache preflight results. Unit: seconds. + AccessControlMaxAge *int64 `output:"header,Access-Control-Max-Age"` + + // The origin that is included in the request. If the request is denied, the response does not contain the header. + AccessControlAllowOrigin *string `output:"header,Access-Control-Allow-Origin"` + + ResultCommon +} + +// OptionObject Determines whether to send a cross-origin request. Before a cross-origin request is sent, the browser sends a preflight OPTIONS request that includes a specific origin, HTTP method, and header information to Object Storage Service (OSS) to determine whether to send the cross-origin request. +func (c *Client) OptionObject(ctx context.Context, request *OptionObjectRequest, optFns ...func(*Options)) (*OptionObjectResult, error) { + var err error + if request == nil { + request = &OptionObjectRequest{} + } + input := &OperationInput{ + OpName: "OptionObject", + Method: "OPTIONS", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Bucket: request.Bucket, + Key: request.Key, + } + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &OptionObjectResult{} + + if err = c.unmarshalOutput(result, output, unmarshalHeader, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go new file mode 100644 index 000000000..4af046ccf --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go @@ -0,0 +1,170 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type ApplyServerSideEncryptionByDefault struct { + // The default server-side encryption method. Valid values: KMS, AES256, and SM4. You are charged when you call API operations to encrypt or decrypt data by using CMKs managed by KMS. For more information, see [Billing of KMS](~~52608~~). If the default server-side encryption method is configured for the destination bucket and ReplicaCMKID is configured in the CRR rule:* If objects in the source bucket are not encrypted, they are encrypted by using the default encryption method of the destination bucket after they are replicated.* If objects in the source bucket are encrypted by using SSE-KMS or SSE-OSS, they are encrypted by using the same method after they are replicated.For more information, see [Use data replication with server-side encryption](~~177216~~). + SSEAlgorithm *string `xml:"SSEAlgorithm"` + + // The CMK ID that is specified when SSEAlgorithm is set to KMS and a specified CMK is used for encryption. In other cases, leave this parameter empty. + KMSMasterKeyID *string `xml:"KMSMasterKeyID"` + + // The algorithm that is used to encrypt objects. If this parameter is not specified, objects are encrypted by using AES256. This parameter is valid only when SSEAlgorithm is set to KMS. Valid value: SM4. + KMSDataEncryption *string `xml:"KMSDataEncryption"` +} + +type ServerSideEncryptionRule struct { + // The container that stores the default server-side encryption method. + ApplyServerSideEncryptionByDefault *ApplyServerSideEncryptionByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +type PutBucketEncryptionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + ServerSideEncryptionRule *ServerSideEncryptionRule `input:"body,ServerSideEncryptionRule,xml,required"` + + RequestCommon +} + +type PutBucketEncryptionResult struct { + ResultCommon +} + +// PutBucketEncryption Configures encryption rules for a bucket. +func (c *Client) PutBucketEncryption(ctx context.Context, request *PutBucketEncryptionRequest, optFns ...func(*Options)) (*PutBucketEncryptionResult, error) { + var err error + if request == nil { + request = &PutBucketEncryptionRequest{} + } + input := &OperationInput{ + OpName: "PutBucketEncryption", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "encryption": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"encryption"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketEncryptionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketEncryptionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketEncryptionResult struct { + // The container that stores server-side encryption rules. + ServerSideEncryptionRule *ServerSideEncryptionRule `output:"body,ServerSideEncryptionRule,xml"` + + ResultCommon +} + +// GetBucketEncryption Queries the encryption rules configured for a bucket. +func (c *Client) GetBucketEncryption(ctx context.Context, request *GetBucketEncryptionRequest, optFns ...func(*Options)) (*GetBucketEncryptionResult, error) { + var err error + if request == nil { + request = &GetBucketEncryptionRequest{} + } + input := &OperationInput{ + OpName: "GetBucketEncryption", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "encryption": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"encryption"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketEncryptionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketEncryptionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketEncryptionResult struct { + ResultCommon +} + +// DeleteBucketEncryption Deletes encryption rules for a bucket. +func (c *Client) DeleteBucketEncryption(ctx context.Context, request *DeleteBucketEncryptionRequest, optFns ...func(*Options)) (*DeleteBucketEncryptionResult, error) { + var err error + if request == nil { + request = &DeleteBucketEncryptionRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketEncryption", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "encryption": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"encryption"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketEncryptionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go new file mode 100644 index 000000000..08f4f9ab8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go @@ -0,0 +1,114 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type TLS struct { + // Specifies whether to enable TLS version management for the bucket.Valid values:* true * false + Enable *bool `xml:"Enable"` + + // The TLS versions. + TLSVersions []string `xml:"TLSVersion"` +} + +type HttpsConfiguration struct { + // The container that stores TLS version configurations. + TLS *TLS `xml:"TLS"` +} + +type GetBucketHttpsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketHttpsConfigResult struct { + // The container that stores HTTPS configurations. + HttpsConfiguration *HttpsConfiguration `output:"body,HttpsConfiguration,xml"` + + ResultCommon +} + +// GetBucketHttpsConfig Queries the Transport Layer Security (TLS) version configurations of a bucket. +func (c *Client) GetBucketHttpsConfig(ctx context.Context, request *GetBucketHttpsConfigRequest, optFns ...func(*Options)) (*GetBucketHttpsConfigResult, error) { + var err error + if request == nil { + request = &GetBucketHttpsConfigRequest{} + } + input := &OperationInput{ + OpName: "GetBucketHttpsConfig", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "httpsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"httpsConfig"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketHttpsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketHttpsConfigRequest struct { + // This name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + HttpsConfiguration *HttpsConfiguration `input:"body,HttpsConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketHttpsConfigResult struct { + ResultCommon +} + +// PutBucketHttpsConfig Enables or disables Transport Layer Security (TLS) version management for a bucket. +func (c *Client) PutBucketHttpsConfig(ctx context.Context, request *PutBucketHttpsConfigRequest, optFns ...func(*Options)) (*PutBucketHttpsConfigResult, error) { + var err error + if request == nil { + request = &PutBucketHttpsConfigRequest{} + } + input := &OperationInput{ + OpName: "PutBucketHttpsConfig", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "httpsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"httpsConfig"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketHttpsConfigResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go new file mode 100644 index 000000000..54d83529f --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go @@ -0,0 +1,318 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type InventoryOSSBucketDestination struct { + // The format of exported inventory lists. The exported inventory lists are CSV objects compressed by using GZIP. + Format InventoryFormatType `xml:"Format"` + + // The ID of the account to which permissions are granted by the bucket owner. + AccountId *string `xml:"AccountId"` + + // The Alibaba Cloud Resource Name (ARN) of the role that has the permissions to read all objects from the source bucket and write objects to the destination bucket. Format: `acs:ram::uid:role/rolename`. + RoleArn *string `xml:"RoleArn"` + + // The name of the bucket in which exported inventory lists are stored. + Bucket *string `xml:"Bucket"` + + // The prefix of the path in which the exported inventory lists are stored. + Prefix *string `xml:"Prefix"` + + // The container that stores the encryption method of the exported inventory lists. + Encryption *InventoryEncryption `xml:"Encryption"` +} + +type InventoryDestination struct { + // The container that stores information about the bucket in which exported inventory lists are stored. + OSSBucketDestination *InventoryOSSBucketDestination `xml:"OSSBucketDestination"` +} + +type InventorySchedule struct { + // The frequency at which the inventory list is exported. Valid values:- Daily: The inventory list is exported on a daily basis. - Weekly: The inventory list is exported on a weekly basis. + Frequency InventoryFrequencyType `xml:"Frequency"` +} + +type InventoryFilter struct { + // The beginning of the time range during which the object was last modified. Unit: seconds.Valid values: [1262275200, 253402271999] + LastModifyBeginTimeStamp *int64 `xml:"LastModifyBeginTimeStamp"` + + // The end of the time range during which the object was last modified. Unit: seconds.Valid values: [1262275200, 253402271999] + LastModifyEndTimeStamp *int64 `xml:"LastModifyEndTimeStamp"` + + // The minimum size of the specified object. Unit: B.Valid values: [0 B, 48.8 TB] + LowerSizeBound *int64 `xml:"LowerSizeBound"` + + // The maximum size of the specified object. Unit: B.Valid values: (0 B, 48.8 TB] + UpperSizeBound *int64 `xml:"UpperSizeBound"` + + // The storage class of the object. You can specify multiple storage classes.Valid values:StandardIAArchiveColdArchiveAll + StorageClass *string `xml:"StorageClass"` + + // The prefix that is specified in the inventory. + Prefix *string `xml:"Prefix"` +} + +type SSEKMS struct { + // The ID of the key that is managed by Key Management Service (KMS). + KeyId *string `xml:"KeyId"` +} + +type InventoryEncryption struct { + // The container that stores information about the SSE-OSS encryption method. + SseOss *string `xml:"SSE-OSS"` + + // The container that stores the customer master key (CMK) used for SSE-KMS encryption. + SseKms *SSEKMS `xml:"SSE-KMS"` +} + +type InventoryConfiguration struct { + // The name of the inventory. The name must be unique in the bucket. + Id *string `xml:"Id"` + + // Specifies whether to enable the bucket inventory feature. Valid values:* true* false + IsEnabled *bool `xml:"IsEnabled"` + + // The container that stores the exported inventory lists. + Destination *InventoryDestination `xml:"Destination"` + + // The container that stores information about the frequency at which inventory lists are exported. + Schedule *InventorySchedule `xml:"Schedule"` + + // The container that stores the prefix used to filter objects. Only objects whose names contain the specified prefix are included in the inventory. + Filter *InventoryFilter `xml:"Filter"` + + // Specifies whether to include the version information about the objects in inventory lists. Valid values:* All: The information about all versions of the objects is exported.* Current: Only the information about the current versions of the objects is exported. + IncludedObjectVersions *string `xml:"IncludedObjectVersions"` + + // The container that stores the configuration fields in inventory lists. + OptionalFields *OptionalFields `xml:"OptionalFields"` +} + +type ListInventoryConfigurationsResult struct { + // The container that stores inventory configurations. + InventoryConfigurations []InventoryConfiguration `xml:"InventoryConfiguration"` + + // Specifies whether to list all inventory tasks configured for the bucket.Valid values: true and false- The value of false indicates that all inventory tasks configured for the bucket are listed.- The value of true indicates that not all inventory tasks configured for the bucket are listed. To list the next page of inventory configurations, set the continuation-token parameter in the next request to the value of the NextContinuationToken header in the response to the current request. + IsTruncated *bool `xml:"IsTruncated"` + + // If the value of IsTruncated in the response is true and value of this header is not null, set the continuation-token parameter in the next request to the value of this header. + NextContinuationToken *string `xml:"NextContinuationToken"` +} + +type OptionalFields struct { + // The configuration fields that are included in inventory lists. Available configuration fields:* Size: the size of the object.* LastModifiedDate: the time when the object was last modified.* ETag: the ETag of the object. It is used to identify the content of the object.* StorageClass: the storage class of the object.* IsMultipartUploaded: specifies whether the object is uploaded by using multipart upload.* EncryptionStatus: the encryption status of the object. + Fields []InventoryOptionalFieldType `xml:"Field"` +} + +type PutBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the inventory. + InventoryId *string `input:"query,inventoryId,required"` + + // Request body schema. + InventoryConfiguration *InventoryConfiguration `input:"body,InventoryConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketInventoryResult struct { + ResultCommon +} + +// PutBucketInventory Configures an inventory for a bucket. +func (c *Client) PutBucketInventory(ctx context.Context, request *PutBucketInventoryRequest, optFns ...func(*Options)) (*PutBucketInventoryResult, error) { + var err error + if request == nil { + request = &PutBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "PutBucketInventory", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory", "inventoryId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the inventory to be queried. + InventoryId *string `input:"query,inventoryId,required"` + + RequestCommon +} + +type GetBucketInventoryResult struct { + // The inventory task configured for a bucket. + InventoryConfiguration *InventoryConfiguration `output:"body,InventoryConfiguration,xml"` + + ResultCommon +} + +// GetBucketInventory Queries the inventories that are configured for a bucket. +func (c *Client) GetBucketInventory(ctx context.Context, request *GetBucketInventoryRequest, optFns ...func(*Options)) (*GetBucketInventoryResult, error) { + var err error + if request == nil { + request = &GetBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "GetBucketInventory", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory", "inventoryId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // Specify the start position of the list operation. You can obtain this token from the NextContinuationToken field of last ListBucketInventory's result. + ContinuationToken *string `input:"query,continuation-token"` + + RequestCommon +} + +type ListBucketInventoryResult struct { + // The container that stores inventory configuration list. + ListInventoryConfigurationsResult *ListInventoryConfigurationsResult `output:"body,ListInventoryConfigurationsResult,xml"` + + ResultCommon +} + +// ListBucketInventory Queries all inventories in a bucket at a time. +func (c *Client) ListBucketInventory(ctx context.Context, request *ListBucketInventoryRequest, optFns ...func(*Options)) (*ListBucketInventoryResult, error) { + var err error + if request == nil { + request = &ListBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "ListBucketInventory", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the inventory that you want to delete. + InventoryId *string `input:"query,inventoryId,required"` + + RequestCommon +} + +type DeleteBucketInventoryResult struct { + ResultCommon +} + +// DeleteBucketInventory Deletes an inventory for a bucket. +func (c *Client) DeleteBucketInventory(ctx context.Context, request *DeleteBucketInventoryRequest, optFns ...func(*Options)) (*DeleteBucketInventoryResult, error) { + var err error + if request == nil { + request = &DeleteBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketInventory", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory", "inventoryId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go new file mode 100644 index 000000000..66952e7a8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go @@ -0,0 +1,280 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type LifecycleRule struct { + // Specifies whether to enable the rule. Valid values:* Enabled: enables the rule. OSS periodically executes the rule.* Disabled: does not enable the rule. OSS ignores the rule. + Status *string `xml:"Status"` + + // The delete operation that you want OSS to perform on the parts that are uploaded in incomplete multipart upload tasks when the parts expire. + AbortMultipartUpload *LifecycleRuleAbortMultipartUpload `xml:"AbortMultipartUpload"` + + // Timestamp for when access tracking was enabled. + AtimeBase *int64 `xml:"AtimeBase"` + + // The conversion of the storage class of previous versions of the objects that match the lifecycle rule when the previous versions expire. The storage class of the previous versions can be converted to IA or Archive. The period of time from when the previous versions expire to when the storage class of the previous versions is converted to Archive must be longer than the period of time from when the previous versions expire to when the storage class of the previous versions is converted to IA. + NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition"` + + // The container that stores the Not parameter that is used to filter objects. + Filter *LifecycleRuleFilter `xml:"Filter"` + + // The ID of the lifecycle rule. The ID can contain up to 255 characters. If you do not specify the ID, OSS automatically generates a unique ID for the lifecycle rule. + ID *string `xml:"ID"` + + // The prefix in the names of the objects to which the rule applies. The prefixes specified by different rules cannot overlap.* If Prefix is specified, this rule applies only to objects whose names contain the specified prefix in the bucket.* If Prefix is not specified, this rule applies to all objects in the bucket. + Prefix *string `xml:"Prefix"` + + // The delete operation to perform on objects based on the lifecycle rule. For an object in a versioning-enabled bucket, the delete operation specified by this parameter is performed only on the current version of the object.The period of time from when the objects expire to when the objects are deleted must be longer than the period of time from when the objects expire to when the storage class of the objects is converted to IA or Archive. + Expiration *LifecycleRuleExpiration `xml:"Expiration"` + + // The conversion of the storage class of objects that match the lifecycle rule when the objects expire. The storage class of the objects can be converted to IA, Archive, and ColdArchive. The storage class of Standard objects in a Standard bucket can be converted to IA, Archive, or Cold Archive. The period of time from when the objects expire to when the storage class of the objects is converted to Archive must be longer than the period of time from when the objects expire to when the storage class of the objects is converted to IA. For example, if the validity period is set to 30 for objects whose storage class is converted to IA after the validity period, the validity period must be set to a value greater than 30 for objects whose storage class is converted to Archive. Either Days or CreatedBeforeDate is required. + Transitions []LifecycleRuleTransition `xml:"Transition"` + + // The tag of the objects to which the lifecycle rule applies. You can specify multiple tags. + Tags []Tag `xml:"Tag"` + + // The delete operation that you want OSS to perform on the previous versions of the objects that match the lifecycle rule when the previous versions expire. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration"` +} + +type LifecycleRuleAbortMultipartUpload struct { + // The number of days from when the objects were last modified to when the lifecycle rule takes effect. + + Days *int32 `xml:"Days"` + + // The date based on which the lifecycle rule takes effect. OSS performs the specified operation on data whose last modified date is earlier than this date. Specify the time in the ISO 8601 standard. The time must be at 00:00:00 in UTC. + CreatedBeforeDate *string `xml:"CreatedBeforeDate"` + + // Deprecated: please use Days or CreateDateBefore. + // The date after which the lifecycle rule takes effect. If the specified time is earlier than the current moment, it'll takes effect immediately. (This fields is NOT RECOMMENDED, please use Days or CreateDateBefore) + Date *string `xml:"Date"` +} + +type LifecycleRuleNot struct { + // The tag of the objects to which the lifecycle rule does not apply. + Tag *Tag `xml:"Tag"` + + // The prefix in the names of the objects to which the lifecycle rule does not apply. + Prefix *string `xml:"Prefix"` +} + +type LifecycleRuleFilter struct { + // The condition that is matched by objects to which the lifecycle rule does not apply. + Not *LifecycleRuleNot `xml:"Not"` + + // This lifecycle rule only applies to files larger than this size. + ObjectSizeGreaterThan *int64 `xml:"ObjectSizeGreaterThan"` + + // This lifecycle rule only applies to files smaller than this size. + ObjectSizeLessThan *int64 `xml:"ObjectSizeLessThan"` +} + +type LifecycleRuleExpiration struct { + // The date based on which the lifecycle rule takes effect. OSS performs the specified operation on data whose last modified date is earlier than this date. The value of this parameter is in the yyyy-MM-ddT00:00:00.000Z format.Specify the time in the ISO 8601 standard. The time must be at 00:00:00 in UTC. + CreatedBeforeDate *string `xml:"CreatedBeforeDate"` + + // The number of days from when the objects were last modified to when the lifecycle rule takes effect. + Days *int32 `xml:"Days"` + + // Specifies whether to automatically remove expired delete markers.* true: Expired delete markers are automatically removed. If you set this parameter to true, you cannot specify the Days or CreatedBeforeDate parameter.* false: Expired delete markers are not automatically removed. If you set this parameter to false, you must specify the Days or CreatedBeforeDate parameter. + ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker"` + + // Deprecated: please use Days or CreateDateBefore. + // The date after which the lifecycle rule takes effect. If the specified time is earlier than the current moment, it'll takes effect immediately. (This fields is NOT RECOMMENDED, please use Days or CreateDateBefore) + Date *string `xml:"Date"` +} + +type NoncurrentVersionExpiration struct { + // The number of days from when the objects became previous versions to when the lifecycle rule takes effect. + NoncurrentDays *int32 `xml:"NoncurrentDays"` +} + +type NoncurrentVersionTransition struct { + // Specifies whether the lifecycle rule applies to objects based on their last access time. Valid values:* true: The rule applies to objects based on their last access time.* false: The rule applies to objects based on their last modified time. + IsAccessTime *bool `xml:"IsAccessTime"` + + // Specifies whether to convert the storage class of non-Standard objects back to Standard after the objects are accessed. This parameter takes effect only when the IsAccessTime parameter is set to true. Valid values:* true: converts the storage class of the objects to Standard.* false: does not convert the storage class of the objects to Standard. + ReturnToStdWhenVisit *bool `xml:"ReturnToStdWhenVisit"` + + // Specifies whether to convert the storage class of objects whose sizes are less than 64 KB to IA, Archive, or Cold Archive based on their last access time. Valid values:* true: converts the storage class of objects that are smaller than 64 KB to IA, Archive, or Cold Archive. Objects that are smaller than 64 KB are charged as 64 KB. Objects that are greater than or equal to 64 KB are charged based on their actual sizes. If you set this parameter to true, the storage fees may increase.* false: does not convert the storage class of an object that is smaller than 64 KB. + AllowSmallFile *bool `xml:"AllowSmallFile"` + + // The number of days from when the objects became previous versions to when the lifecycle rule takes effect. + NoncurrentDays *int32 `xml:"NoncurrentDays"` + + // The storage class to which objects are converted. Valid values:* IA* Archive* ColdArchive You can convert the storage class of objects in an IA bucket to only Archive or Cold Archive. + StorageClass StorageClassType `xml:"StorageClass"` +} + +type LifecycleRuleTransition struct { + // The date based on which the lifecycle rule takes effect. OSS performs the specified operation on data whose last modified date is earlier than this date. Specify the time in the ISO 8601 standard. The time must be at 00:00:00 in UTC. + CreatedBeforeDate *string `xml:"CreatedBeforeDate"` + + // The number of days from when the objects were last modified to when the lifecycle rule takes effect. + Days *int32 `xml:"Days"` + + // The storage class to which objects are converted. Valid values:* IA* Archive* ColdArchive You can convert the storage class of objects in an IA bucket to only Archive or Cold Archive. + StorageClass StorageClassType `xml:"StorageClass"` + + // Specifies whether the lifecycle rule applies to objects based on their last access time. Valid values:* true: The rule applies to objects based on their last access time.* false: The rule applies to objects based on their last modified time. + IsAccessTime *bool `xml:"IsAccessTime"` + + // Specifies whether to convert the storage class of non-Standard objects back to Standard after the objects are accessed. This parameter takes effect only when the IsAccessTime parameter is set to true. Valid values:* true: converts the storage class of the objects to Standard.* false: does not convert the storage class of the objects to Standard. + ReturnToStdWhenVisit *bool `xml:"ReturnToStdWhenVisit"` + + // Specifies whether to convert the storage class of objects whose sizes are less than 64 KB to IA, Archive, or Cold Archive based on their last access time. Valid values:* true: converts the storage class of objects that are smaller than 64 KB to IA, Archive, or Cold Archive. Objects that are smaller than 64 KB are charged as 64 KB. Objects that are greater than or equal to 64 KB are charged based on their actual sizes. If you set this parameter to true, the storage fees may increase.* false: does not convert the storage class of an object that is smaller than 64 KB. + AllowSmallFile *bool `xml:"AllowSmallFile"` +} + +type LifecycleConfiguration struct { + // The container that stores the lifecycle rules. + Rules []LifecycleRule `xml:"Rule"` +} + +type PutBucketLifecycleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // Specifies whether to allow overlapped prefixes. Valid values:true: Overlapped prefixes are allowed.false: Overlapped prefixes are not allowed. + AllowSameActionOverlap *string `input:"header,x-oss-allow-same-action-overlap"` + + // The container of the request body. + LifecycleConfiguration *LifecycleConfiguration `input:"body,LifecycleConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketLifecycleResult struct { + ResultCommon +} + +// PutBucketLifecycle Configures a lifecycle rule for a bucket. After you configure a lifecycle rule for a bucket, Object Storage Service (OSS) automatically deletes the objects that match the rule or converts the storage type of the objects based on the point in time that is specified in the lifecycle rule. +func (c *Client) PutBucketLifecycle(ctx context.Context, request *PutBucketLifecycleRequest, optFns ...func(*Options)) (*PutBucketLifecycleResult, error) { + var err error + if request == nil { + request = &PutBucketLifecycleRequest{} + } + input := &OperationInput{ + OpName: "PutBucketLifecycle", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "lifecycle": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"lifecycle"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketLifecycleResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketLifecycleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketLifecycleResult struct { + // The container that stores the lifecycle rules configured for the bucket. + LifecycleConfiguration *LifecycleConfiguration `output:"body,LifecycleConfiguration,xml"` + + ResultCommon +} + +// GetBucketLifecycle Queries the lifecycle rules configured for a bucket. Only the owner of a bucket has the permissions to query the lifecycle rules configured for the bucket. +func (c *Client) GetBucketLifecycle(ctx context.Context, request *GetBucketLifecycleRequest, optFns ...func(*Options)) (*GetBucketLifecycleResult, error) { + var err error + if request == nil { + request = &GetBucketLifecycleRequest{} + } + input := &OperationInput{ + OpName: "GetBucketLifecycle", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "lifecycle": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"lifecycle"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketLifecycleResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketLifecycleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketLifecycleResult struct { + ResultCommon +} + +// DeleteBucketLifecycle Deletes the lifecycle rules of a bucket. +func (c *Client) DeleteBucketLifecycle(ctx context.Context, request *DeleteBucketLifecycleRequest, optFns ...func(*Options)) (*DeleteBucketLifecycleResult, error) { + var err error + if request == nil { + request = &DeleteBucketLifecycleRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketLifecycle", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "lifecycle": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"lifecycle"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketLifecycleResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go new file mode 100644 index 000000000..640d9d0c6 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go @@ -0,0 +1,320 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type LoggingEnabled struct { + // The bucket that stores access logs. + TargetBucket *string `xml:"TargetBucket"` + + // The prefix of the log objects. This parameter can be left empty. + TargetPrefix *string `xml:"TargetPrefix"` +} + +type BucketLoggingStatus struct { + // Indicates the container used to store access logging information. This element is returned if it is enabled and is not returned if it is disabled. + LoggingEnabled *LoggingEnabled `xml:"LoggingEnabled"` +} + +type LoggingHeaderSet struct { + // The list of the custom request headers. + Headers []string `xml:"header"` +} + +type LoggingParamSet struct { + // The list of the custom URL parameters. + Parameters []string `xml:"parameter"` +} + +type UserDefinedLogFieldsConfiguration struct { + // The container that stores the configurations of custom request headers. + HeaderSet *LoggingHeaderSet `xml:"HeaderSet"` + + // The container that stores the configurations of custom URL parameters. + ParamSet *LoggingParamSet `xml:"ParamSet"` +} + +type PutBucketLoggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketLoggingStatus *BucketLoggingStatus `input:"body,BucketLoggingStatus,xml,required"` + + RequestCommon +} + +type PutBucketLoggingResult struct { + ResultCommon +} + +// PutBucketLogging Enables logging for a bucket. After you enable logging for a bucket, Object Storage Service (OSS) generates logs every hour based on the defined naming rule and stores the logs as objects in the specified destination bucket. +func (c *Client) PutBucketLogging(ctx context.Context, request *PutBucketLoggingRequest, optFns ...func(*Options)) (*PutBucketLoggingResult, error) { + var err error + if request == nil { + request = &PutBucketLoggingRequest{} + } + input := &OperationInput{ + OpName: "PutBucketLogging", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "logging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"logging"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketLoggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketLoggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketLoggingResult struct { + // Indicates the container used to store access logging configuration of a bucket. + BucketLoggingStatus *BucketLoggingStatus `output:"body,BucketLoggingStatus,xml"` + + ResultCommon +} + +// GetBucketLogging Queries the configurations of access log collection of a bucket. Only the owner of a bucket can query the configurations of access log collection of the bucket. +func (c *Client) GetBucketLogging(ctx context.Context, request *GetBucketLoggingRequest, optFns ...func(*Options)) (*GetBucketLoggingResult, error) { + var err error + if request == nil { + request = &GetBucketLoggingRequest{} + } + input := &OperationInput{ + OpName: "GetBucketLogging", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "logging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"logging"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketLoggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketLoggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketLoggingResult struct { + ResultCommon +} + +// DeleteBucketLogging Disables the logging feature for a bucket. +func (c *Client) DeleteBucketLogging(ctx context.Context, request *DeleteBucketLoggingRequest, optFns ...func(*Options)) (*DeleteBucketLoggingResult, error) { + var err error + if request == nil { + request = &DeleteBucketLoggingRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketLogging", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "logging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"logging"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketLoggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutUserDefinedLogFieldsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container that stores the specified log configurations. + UserDefinedLogFieldsConfiguration *UserDefinedLogFieldsConfiguration `input:"body,UserDefinedLogFieldsConfiguration,xml,required"` + + RequestCommon +} + +type PutUserDefinedLogFieldsConfigResult struct { + ResultCommon +} + +// PutUserDefinedLogFieldsConfig Customizes the user_defined_log_fields field in real-time logs by adding custom request headers or query parameters to the field for subsequent analysis of requests. +func (c *Client) PutUserDefinedLogFieldsConfig(ctx context.Context, request *PutUserDefinedLogFieldsConfigRequest, optFns ...func(*Options)) (*PutUserDefinedLogFieldsConfigResult, error) { + var err error + if request == nil { + request = &PutUserDefinedLogFieldsConfigRequest{} + } + input := &OperationInput{ + OpName: "PutUserDefinedLogFieldsConfig", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "userDefinedLogFieldsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"userDefinedLogFieldsConfig"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutUserDefinedLogFieldsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetUserDefinedLogFieldsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetUserDefinedLogFieldsConfigResult struct { + // The container for the user-defined logging configuration. + UserDefinedLogFieldsConfiguration *UserDefinedLogFieldsConfiguration `output:"body,UserDefinedLogFieldsConfiguration,xml"` + + ResultCommon +} + +// GetUserDefinedLogFieldsConfig Queries the custom configurations of the user_defined_log_fields field in the real-time logs of a bucket. +func (c *Client) GetUserDefinedLogFieldsConfig(ctx context.Context, request *GetUserDefinedLogFieldsConfigRequest, optFns ...func(*Options)) (*GetUserDefinedLogFieldsConfigResult, error) { + var err error + if request == nil { + request = &GetUserDefinedLogFieldsConfigRequest{} + } + input := &OperationInput{ + OpName: "GetUserDefinedLogFieldsConfig", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "userDefinedLogFieldsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"userDefinedLogFieldsConfig"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetUserDefinedLogFieldsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteUserDefinedLogFieldsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteUserDefinedLogFieldsConfigResult struct { + ResultCommon +} + +// DeleteUserDefinedLogFieldsConfig Deletes the custom configurations of the user_defined_log_fields field in the real-time logs of a bucket. +func (c *Client) DeleteUserDefinedLogFieldsConfig(ctx context.Context, request *DeleteUserDefinedLogFieldsConfigRequest, optFns ...func(*Options)) (*DeleteUserDefinedLogFieldsConfigResult, error) { + var err error + if request == nil { + request = &DeleteUserDefinedLogFieldsConfigRequest{} + } + input := &OperationInput{ + OpName: "DeleteUserDefinedLogFieldsConfig", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "userDefinedLogFieldsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"userDefinedLogFieldsConfig"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteUserDefinedLogFieldsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go new file mode 100644 index 000000000..27f0df73c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go @@ -0,0 +1,534 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type MetaQueryAggregation struct { + // The field name. + Field *string `xml:"Field"` + + // The operator for aggregate operations.* min* max* average* sum* count* distinct* group + Operation *string `xml:"Operation"` + + // The result of the aggregate operation. + Value *float64 `xml:"Value"` + + // The grouped aggregations. + Groups *MetaQueryGroups `xml:"Groups"` +} + +type MetaQueryGroups struct { + // The grouped aggregations. + Groups []MetaQueryGroup `xml:"Group"` +} + +type MetaQueryGroup struct { + // The value for the grouped aggregation. + Value *string `xml:"Value"` + + // The number of results in the grouped aggregation. + Count *int64 `xml:"Count"` +} + +type MetaQueryAggregations struct { + // The container that stores the information about a single aggregate operation. + Aggregations []MetaQueryAggregation `xml:"Aggregation"` +} + +type MetaQueryUserMeta struct { + // The key of the user metadata item. + Key *string `xml:"Key"` + + // The value of the user metadata item. + Value *string `xml:"Value"` +} + +type MetaQueryFile struct { + // The time when the object was last modified. + FileModifiedTime *string `xml:"FileModifiedTime"` + + // The type of the object.Valid values:* Multipart : The object is uploaded by using multipart upload .* Symlink : The object is a symbolic link that was created by calling the PutSymlink operation. * Appendable : The object is uploaded by using AppendObject .* Normal : The object is uploaded by using PutObject. + OSSObjectType *string `xml:"OSSObjectType"` + + // The ETag of the object. + ETag *string `xml:"ETag"` + + // The server-side encryption algorithm used when the object was created. + ServerSideEncryptionCustomerAlgorithm *string `xml:"ServerSideEncryptionCustomerAlgorithm"` + + // The number of the tags of the object. + OSSTaggingCount *int64 `xml:"OSSTaggingCount"` + + // The tags. + OSSTagging []MetaQueryTagging `xml:"OSSTagging>Tagging"` + + // The user metadata items. + OSSUserMeta []MetaQueryUserMeta `xml:"OSSUserMeta>UserMeta"` + + // The full path of the object. + Filename *string `xml:"Filename"` + + // The storage class of the object.Valid values:* Archive : the Archive storage class .* ColdArchive : the Cold Archive storage class .* IA : the Infrequent Access (IA) storage class .* Standard : The Standard storage class . + OSSStorageClass *string `xml:"OSSStorageClass"` + + // The access control list (ACL) of the object.Valid values:* default : the ACL of the bucket .* private : private .* public-read : public-read .* public-read-write : public-read-write . + ObjectACL *string `xml:"ObjectACL"` + + // The CRC-64 value of the object. + OSSCRC64 *string `xml:"OSSCRC64"` + + // The server-side encryption of the object. + ServerSideEncryption *string `xml:"ServerSideEncryption"` + + // The object size. + Size *int64 `xml:"Size"` + + // The list of audio streams. + AudioStreams []MetaQueryAudioStream `xml:"AudioStreams>AudioStream"` + + // The algorithm used to encrypt objects. + ServerSideDataEncryption *string `xml:"ServerSideDataEncryption"` + + // The cross-origin request methods that are allowed. + AccessControlRequestMethod *string `xml:"AccessControlRequestMethod"` + + // The artist. + Artist *string `xml:"Artist"` + + // The total duration of the video. Unit: seconds. + Duration *float64 `xml:"Duration"` + + // The longitude and latitude information. + LatLong *string `xml:"LatLong"` + + // The list of subtitle streams. + Subtitles []MetaQuerySubtitle `xml:"Subtitles>Subtitle"` + + // The time when the image or video was taken. + ProduceTime *string `xml:"ProduceTime"` + + // The origins allowed in cross-origin requests. + AccessControlAllowOrigin *string `xml:"AccessControlAllowOrigin"` + + // The name of the object when it is downloaded. + ContentDisposition *string `xml:"ContentDisposition"` + + // The player. + Performer *string `xml:"Performer"` + + // The album. + Album *string `xml:"Album"` + + // The addresses. + Addresses []MetaQueryAddress `xml:"Addresses>Address"` + + // The Multipurpose Internet Mail Extensions (MIME) type of the object. + ContentType *string `xml:"ContentType"` + + // The content encoding format of the object when the object is downloaded. + ContentEncoding *string `xml:"ContentEncoding"` + + // The language of the object content. + ContentLanguage *string `xml:"ContentLanguage"` + + // The height of the image. Unit: pixel. + ImageHeight *int64 `xml:"ImageHeight"` + + // The type of multimedia. + MediaType *string `xml:"MediaType"` + + // The time when the object expires. + OSSExpiration *string `xml:"OSSExpiration"` + + // The width of the image. Unit: pixel. + ImageWidth *int64 `xml:"ImageWidth"` + + // The width of the video image. Unit: pixel. + VideoWidth *int64 `xml:"VideoWidth"` + + // The composer. + Composer *string `xml:"Composer"` + + // The full path of the object. + URI *string `xml:"URI"` + + // The height of the video image. Unit: pixel. + VideoHeight *int64 `xml:"VideoHeight"` + + // The list of video streams. + VideoStreams []MetaQueryVideoStream `xml:"VideoStreams>VideoStream"` + + // The web page caching behavior that is performed when the object is downloaded. + CacheControl *string `xml:"CacheControl"` + + // The bitrate. Unit: bit/s. + Bitrate *int64 `xml:"Bitrate"` + + // The singer. + AlbumArtist *string `xml:"AlbumArtist"` + + // The title of the object. + Title *string `xml:"Title"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + ServerSideEncryptionKeyId *string `xml:"ServerSideEncryptionKeyId"` +} + +type MetaQueryVideoStream struct { + // The bitrate. Unit: bit/s. + Bitrate *int64 `xml:"Bitrate"` + + // The start time of the audio stream in seconds. + StartTime *float64 `xml:"StartTime"` + + // The duration of the audio stream in seconds. + Duration *float64 `xml:"Duration"` + + // The pixel format of the video stream. + PixelFormat *string `xml:"PixelFormat"` + + // The image height of the video stream. Unit: pixel. + Height *int64 `xml:"Height"` + + // The color space. + ColorSpace *string `xml:"ColorSpace"` + + // The image width of the video stream. Unit: pixels. + Width *int64 `xml:"Width"` + + // The abbreviated name of the codec. + CodecName *string `xml:"CodecName"` + + // The language used in the audio stream. The value follows the BCP 47 format. + Language *string `xml:"Language"` + + // The frame rate of the video stream. + FrameRate *string `xml:"FrameRate"` + + // The number of video frames. + FrameCount *int64 `xml:"FrameCount"` + + // The bit depth. + BitDepth *int64 `xml:"BitDepth"` +} + +type MetaQueryAddress struct { + // The country. + Country *string `xml:"Country"` + + // The city. + City *string `xml:"City"` + + // The district. + District *string `xml:"District"` + + // The language of the address. The value follows the BCP 47 format. + Language *string `xml:"Language"` + + // The province. + Province *string `xml:"Province"` + + // The street. + Township *string `xml:"Township"` + + // The full address. + AddressLine *string `xml:"AddressLine"` +} + +type MetaQuerySubtitle struct { + // The start time of the subtitle stream in seconds. + StartTime *float64 `xml:"StartTime"` + + // The duration of the subtitle stream in seconds. + Duration *float64 `xml:"Duration"` + + // The abbreviated name of the codec. + CodecName *string `xml:"CodecName"` + + // The language of the subtitle. The value follows the BCP 47 format. + Language *string `xml:"Language"` +} + +type MetaQueryAudioStream struct { + // The sampling rate. + SampleRate *int64 `xml:"SampleRate"` + + // The start time of the video stream. + StartTime *float64 `xml:"StartTime"` + + // The duration of the video stream. + Duration *float64 `xml:"Duration"` + + // The number of sound channels. + Channels *int64 `xml:"Channels"` + + // The language used in the audio stream. The value follows the BCP 47 format. + Language *string `xml:"Language"` + + // The abbreviated name of the codec. + CodecName *string `xml:"CodecName"` + + // The bitrate. Unit: bit/s. + Bitrate *int64 `xml:"Bitrate"` +} + +type MetaQuery struct { + // The maximum number of objects to return. Valid values: 0 to 100. If this parameter is not set or is set to 0, up to 100 objects are returned. + MaxResults *int64 `xml:"MaxResults"` + + // The query conditions. A query condition includes the following elements:* Operation: the operator. Valid values: eq (equal to), gt (greater than), gte (greater than or equal to), lt (less than), lte (less than or equal to), match (fuzzy query), prefix (prefix query), and (AND), or (OR), and not (NOT).* Field: the field name.* Value: the field value.* SubQueries: the subquery conditions. Options that are included in this element are the same as those of simple query. You need to set subquery conditions only when Operation is set to and, or, or not. + Query *string `xml:"Query"` + + // The field based on which the results are sorted. + Sort *string `xml:"Sort"` + + // The sort order. + Order *MetaQueryOrderType `xml:"Order"` + + // The container that stores the information about aggregate operations. + Aggregations *MetaQueryAggregations `xml:"Aggregations"` + + // The pagination token used to obtain information in the next request. The object information is returned in alphabetical order starting from the value of NextToken. + NextToken *string `xml:"NextToken"` + + // The type of multimedia that you want to query. Valid values: image, video, audio, document + MediaType *string `xml:"MediaTypes>MediaType"` + + //The query conditions + SimpleQuery *string `xml:"SimpleQuery"` +} + +type MetaQueryStatus struct { + // The time when the metadata index library was created. The value follows the RFC 3339 standard in the YYYY-MM-DDTHH:mm:ss+TIMEZONE format. YYYY-MM-DD indicates the year, month, and day. T indicates the beginning of the time element. HH:mm:ss indicates the hour, minute, and second. TIMEZONE indicates the time zone. + CreateTime *string `xml:"CreateTime"` + + // The time when the metadata index library was updated. The value follows the RFC 3339 standard in the YYYY-MM-DDTHH:mm:ss+TIMEZONE format. YYYY-MM-DD indicates the year, month, and day. T indicates the beginning of the time element. HH:mm:ss indicates the hour, minute, and second. TIMEZONE indicates the time zone. + UpdateTime *string `xml:"UpdateTime"` + + // The status of the metadata index library. Valid values:- Ready: The metadata index library is being prepared after it is created.In this case, the metadata index library cannot be used to query data.- Stop: The metadata index library is paused.- Running: The metadata index library is running.- Retrying: The metadata index library failed to be created and is being created again.- Failed: The metadata index library failed to be created.- Deleted: The metadata index library is deleted. + State *string `xml:"State"` + + // The scan type. Valid values:- FullScanning: Full scanning is in progress.- IncrementalScanning: Incremental scanning is in progress. + Phase *string `xml:"Phase"` +} + +type MetaQueryTagging struct { + // The tag key. + Key *string `xml:"Key"` + + // The tag value. + Value *string `xml:"Value"` +} + +type GetMetaQueryStatusRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetMetaQueryStatusResult struct { + // The container that stores the metadata information. + MetaQueryStatus *MetaQueryStatus `output:"body,MetaQueryStatus,xml"` + + ResultCommon +} + +// GetMetaQueryStatus Queries the information about the metadata index library of a bucket. +func (c *Client) GetMetaQueryStatus(ctx context.Context, request *GetMetaQueryStatusRequest, optFns ...func(*Options)) (*GetMetaQueryStatusResult, error) { + var err error + if request == nil { + request = &GetMetaQueryStatusRequest{} + } + input := &OperationInput{ + OpName: "GetMetaQueryStatus", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetMetaQueryStatusResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CloseMetaQueryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type CloseMetaQueryResult struct { + ResultCommon +} + +// CloseMetaQuery Disables the metadata management feature for an Object Storage Service (OSS) bucket. After the metadata management feature is disabled for a bucket, OSS automatically deletes the metadata index library of the bucket and you cannot perform metadata indexing. +func (c *Client) CloseMetaQuery(ctx context.Context, request *CloseMetaQueryRequest, optFns ...func(*Options)) (*CloseMetaQueryResult, error) { + var err error + if request == nil { + request = &CloseMetaQueryRequest{} + } + input := &OperationInput{ + OpName: "CloseMetaQuery", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "delete", + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CloseMetaQueryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DoMetaQueryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + Mode *string `input:"query,mode"` + + // The request body schema. + MetaQuery *MetaQuery `input:"body,MetaQuery,xml,required"` + + RequestCommon +} + +type DoMetaQueryResult struct { + // The token that is used for the next query when the total number of objects exceeds the value of MaxResults.The value of NextToken is used to return the unreturned results in the next query.This parameter has a value only when not all objects are returned. + NextToken *string `xml:"NextToken"` + + // The list of file information. + Files []MetaQueryFile `xml:"Files>File"` + + // The list of file information. + Aggregations []MetaQueryAggregation `xml:"Aggregations>Aggregation"` + + ResultCommon +} + +// DoMetaQuery Queries the objects in a bucket that meet the specified conditions by using the data indexing feature. The information about the objects is listed based on the specified fields and sorting methods. +func (c *Client) DoMetaQuery(ctx context.Context, request *DoMetaQueryRequest, optFns ...func(*Options)) (*DoMetaQueryResult, error) { + var err error + if request == nil { + request = &DoMetaQueryRequest{} + } + input := &OperationInput{ + OpName: "DoMetaQuery", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "query", + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DoMetaQueryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type OpenMetaQueryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + Mode *string `input:"query,mode"` + + RequestCommon +} + +type OpenMetaQueryResult struct { + ResultCommon +} + +// OpenMetaQuery Enables metadata management for a bucket. After you enable the metadata management feature for a bucket, Object Storage Service (OSS) creates a metadata index library for the bucket and creates metadata indexes for all objects in the bucket. After the metadata index library is created, OSS continues to perform quasi-real-time scans on incremental objects in the bucket and creates metadata indexes for the incremental objects. +func (c *Client) OpenMetaQuery(ctx context.Context, request *OpenMetaQueryRequest, optFns ...func(*Options)) (*OpenMetaQueryResult, error) { + var err error + if request == nil { + request = &OpenMetaQueryRequest{} + } + input := &OperationInput{ + OpName: "OpenMetaQuery", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "add", + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &OpenMetaQueryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go new file mode 100644 index 000000000..8f162a35c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go @@ -0,0 +1,710 @@ +package oss + +import ( + "context" + "io" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type AccessPointsForObjectProcess struct { + // The container that stores information about a single Object FC Access Point. + AccessPointForObjectProcesss []AccessPointForObjectProcess `xml:"AccessPointForObjectProcess"` +} + +type TransformationConfiguration struct { + // The container that stores the operations. + Actions *AccessPointActions `xml:"Actions"` + + // The container that stores the content of the transformation configurations. + ContentTransformation *ContentTransformation `xml:"ContentTransformation"` +} + +type ObjectProcessConfiguration struct { + // Specifies that Function Compute supports Range GetObject requests. + AllowedFeatures []string `xml:"AllowedFeatures>AllowedFeature"` + + // The container that stores the transformation configurations. + TransformationConfigurations []TransformationConfiguration `xml:"TransformationConfigurations>TransformationConfiguration"` +} + +type CreateAccessPointForObjectProcessConfiguration struct { + // Whether allow anonymous user to access this FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` + + // The container that stores the processing information about the Object FC Access Point. + ObjectProcessConfiguration *ObjectProcessConfiguration `xml:"ObjectProcessConfiguration"` +} + +type AccessPointEndpoints struct { + // The internal endpoint of the Object FC Access Point. + InternalEndpoint *string `xml:"InternalEndpoint"` + + // The public endpoint of the Object FC Access Point. + PublicEndpoint *string `xml:"PublicEndpoint"` +} + +type AccessPointForObjectProcess struct { + // The status of the Object FC Access Point. Valid values:enable: The Object FC Access Point is created.disable: The Object FC Access Point is disabled.creating: The Object FC Access Point is being created.deleting: The Object FC Access Point is deleted. + Status *string `xml:"Status"` + + // Whether allow anonymous user access this FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The name of the Object FC Access Point. + AccessPointNameForObjectProcess *string `xml:"AccessPointNameForObjectProcess"` + + // The alias of the Object FC Access Point. + AccessPointForObjectProcessAlias *string `xml:"AccessPointForObjectProcessAlias"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` +} + +type AccessPointActions struct { + // The supported OSS API operations. Only the GetObject operation is supported. + Actions []string `xml:"Action"` +} + +type CustomForwardHeaders struct { + CustomForwardHeaders []string `xml:"CustomForwardHeader"` +} + +type ContentTransformation struct { + // The Alibaba Cloud Resource Name (ARN) of the role that Function Compute uses to access your resources in other cloud services. The default role is AliyunFCDefaultRole. + FunctionAssumeRoleArn *string `xml:"FunctionCompute>FunctionAssumeRoleArn"` + + // The ARN of the function. For more information, + FunctionArn *string `xml:"FunctionCompute>FunctionArn"` + + //CustomForwardHeaders *CustomForwardHeaders `xml:"AdditionalFeatures>CustomForwardHeaders"` +} + +type PutAccessPointConfigForObjectProcessConfiguration struct { + // Whether allow anonymous user to access this FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + // The container that stores the processing information about the Object FC Access Point. + ObjectProcessConfiguration *ObjectProcessConfiguration `xml:"ObjectProcessConfiguration"` +} + +type CreateAccessPointForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + // The request body. + CreateAccessPointForObjectProcessConfiguration *CreateAccessPointForObjectProcessConfiguration `input:"body,CreateAccessPointForObjectProcessConfiguration,xml,required"` + + RequestCommon +} + +type CreateAccessPointForObjectProcessResult struct { + // The ARN of the Object FC Access Point. + AccessPointForObjectProcessArn *string `xml:"AccessPointForObjectProcessArn"` + + // The alias of the Object FC Access Point. + AccessPointForObjectProcessAlias *string `xml:"AccessPointForObjectProcessAlias"` + + ResultCommon +} + +// CreateAccessPointForObjectProcess Creates an Object FC Access Point. +func (c *Client) CreateAccessPointForObjectProcess(ctx context.Context, request *CreateAccessPointForObjectProcessRequest, optFns ...func(*Options)) (*CreateAccessPointForObjectProcessResult, error) { + var err error + if request == nil { + request = &CreateAccessPointForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "CreateAccessPointForObjectProcess", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateAccessPointForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. The name of an Object FC Access Point must meet the following requirements:The name cannot exceed 63 characters in length.The name can contain only lowercase letters, digits, and hyphens (-) and cannot start or end with a hyphen (-).The name must be unique in the current region. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type GetAccessPointForObjectProcessResult struct { + // The public endpoint of the Object FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + // The internal endpoint of the Object FC Access Point. + AccessPointNameForObjectProcess *string `xml:"AccessPointNameForObjectProcess"` + + // The ARN of the Object FC Access Point. + AccessPointForObjectProcessArn *string `xml:"AccessPointForObjectProcessArn"` + + // The time when the Object FC Access Point was created. The value is a timestamp. + CreationDate *string `xml:"CreationDate"` + + // The status of the Object FC Access Point. Valid values:enable: The Object FC Access Point is created.disable: The Object FC Access Point is disabled.creating: The Object FC Access Point is being created.deleting: The Object FC Access Point is deleted. + AccessPointForObjectProcessStatus *string `xml:"Status"` + + // The container that stores the endpoints of the Object FC Access Point. + Endpoints *AccessPointEndpoints `xml:"Endpoints"` + + // The alias of the Object FC Access Point. + AccessPointForObjectProcessAlias *string `xml:"AccessPointForObjectProcessAlias"` + + // The public endpoint of the Object FC Access Point. + AccessPointName *string `xml:"AccessPointName"` + + // The public endpoint of the Object FC Access Point. + AccountId *string `xml:"AccountId"` + + ResultCommon +} + +// GetAccessPointForObjectProcess Queries basic information about an Object FC Access Point. +func (c *Client) GetAccessPointForObjectProcess(ctx context.Context, request *GetAccessPointForObjectProcessRequest, optFns ...func(*Options)) (*GetAccessPointForObjectProcessResult, error) { + var err error + if request == nil { + request = &GetAccessPointForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListAccessPointsForObjectProcessRequest struct { + // The maximum number of Object FC Access Points to return.Valid values: 1 to 1000 If the list cannot be complete at a time due to the configurations of the max-keys element, the NextContinuationToken element is included in the response as the token for the next list. + MaxKeys int64 `input:"query,max-keys"` + + // The token from which the list operation must start. You can obtain this token from the NextContinuationToken element in the returned result. + ContinuationToken *string `input:"query,continuation-token"` + + RequestCommon +} + +type ListAccessPointsForObjectProcessResult struct { + // The container that stores information about all Object FC Access Points. + AccessPointsForObjectProcess *AccessPointsForObjectProcess `xml:"AccessPointsForObjectProcess"` + + // Indicates whether the returned results are truncated. Valid values:true: indicates that not all results are returned for the request.false: indicates that all results are returned for the request. + IsTruncated *bool `xml:"IsTruncated"` + + // The container that stores information about a single Object FC Access Point. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The UID of the Alibaba Cloud account to which the Object FC Access Points belong. + AccountId *string `xml:"AccountId"` + + ResultCommon +} + +// ListAccessPointsForObjectProcess Lists information about Object FC Access Points in an Alibaba Cloud account. +func (c *Client) ListAccessPointsForObjectProcess(ctx context.Context, request *ListAccessPointsForObjectProcessRequest, optFns ...func(*Options)) (*ListAccessPointsForObjectProcessResult, error) { + var err error + if request == nil { + request = &ListAccessPointsForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "ListAccessPointsForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListAccessPointsForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type DeleteAccessPointForObjectProcessResult struct { + ResultCommon +} + +// DeleteAccessPointForObjectProcess Deletes an Object FC Access Point. +func (c *Client) DeleteAccessPointForObjectProcess(ctx context.Context, request *DeleteAccessPointForObjectProcessRequest, optFns ...func(*Options)) (*DeleteAccessPointForObjectProcessResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointForObjectProcess", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointConfigForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type GetAccessPointConfigForObjectProcessResult struct { + // The container that stores the processing information about the Object FC Access Point. + ObjectProcessConfiguration *ObjectProcessConfiguration `xml:"ObjectProcessConfiguration"` + + // Whether allow anonymous user to access this FC Access Points. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + ResultCommon +} + +// GetAccessPointConfigForObjectProcess Queries the configurations of an Object FC Access Point. +func (c *Client) GetAccessPointConfigForObjectProcess(ctx context.Context, request *GetAccessPointConfigForObjectProcessRequest, optFns ...func(*Options)) (*GetAccessPointConfigForObjectProcessResult, error) { + var err error + if request == nil { + request = &GetAccessPointConfigForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointConfigForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointConfigForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointConfigForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointConfigForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointConfigForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. The name of an Object FC Access Point must meet the following requirements:The name cannot exceed 63 characters in length.The name can contain only lowercase letters, digits, and hyphens (-) and cannot start or end with a hyphen (-).The name must be unique in the current region. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + // The request body. + PutAccessPointConfigForObjectProcessConfiguration *PutAccessPointConfigForObjectProcessConfiguration `input:"body,PutAccessPointConfigForObjectProcessConfiguration,xml,required"` + + RequestCommon +} + +type PutAccessPointConfigForObjectProcessResult struct { + ResultCommon +} + +// PutAccessPointConfigForObjectProcess Changes the configurations of an Object FC Access Point. +func (c *Client) PutAccessPointConfigForObjectProcess(ctx context.Context, request *PutAccessPointConfigForObjectProcessRequest, optFns ...func(*Options)) (*PutAccessPointConfigForObjectProcessResult, error) { + var err error + if request == nil { + request = &PutAccessPointConfigForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointConfigForObjectProcess", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointConfigForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointConfigForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointConfigForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointPolicyForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + // The json format permission policies for an Object FC Access Point. + Body io.Reader `input:"body,nop,required"` + + RequestCommon +} + +type PutAccessPointPolicyForObjectProcessResult struct { + ResultCommon +} + +// PutAccessPointPolicyForObjectProcess Configures policies for an Object FC Access Point. +func (c *Client) PutAccessPointPolicyForObjectProcess(ctx context.Context, request *PutAccessPointPolicyForObjectProcessRequest, optFns ...func(*Options)) (*PutAccessPointPolicyForObjectProcessResult, error) { + var err error + if request == nil { + request = &PutAccessPointPolicyForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointPolicyForObjectProcess", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicyForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicyForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointPolicyForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointPolicyForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type GetAccessPointPolicyForObjectProcessResult struct { + // The configurations of the access point policy for object process. + Body string + + ResultCommon +} + +// GetAccessPointPolicyForObjectProcess Queries the policies of an Object FC Access Point. +func (c *Client) GetAccessPointPolicyForObjectProcess(ctx context.Context, request *GetAccessPointPolicyForObjectProcessRequest, optFns ...func(*Options)) (*GetAccessPointPolicyForObjectProcessResult, error) { + var err error + if request == nil { + request = &GetAccessPointPolicyForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointPolicyForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicyForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicyForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(output.Body) + defer output.Body.Close() + if err != nil { + return nil, err + } + result := &GetAccessPointPolicyForObjectProcessResult{ + Body: string(body), + } + + if err = c.unmarshalOutput(result, output); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointPolicyForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type DeleteAccessPointPolicyForObjectProcessResult struct { + ResultCommon +} + +// DeleteAccessPointPolicyForObjectProcess Deletes the policies of an Object FC Access Point. +func (c *Client) DeleteAccessPointPolicyForObjectProcess(ctx context.Context, request *DeleteAccessPointPolicyForObjectProcessRequest, optFns ...func(*Options)) (*DeleteAccessPointPolicyForObjectProcessResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointPolicyForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointPolicyForObjectProcess", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicyForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicyForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointPolicyForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type WriteGetObjectResponseRequest struct { + // The router forwarding address obtained from the event parameter of Function Compute. + RequestRoute *string `input:"header,x-oss-request-route,required"` + + // The unique forwarding token obtained from the event parameter of Function Compute. + RequestToken *string `input:"header,x-oss-request-token,required"` + + // The HTTP status code returned by the backend server. + FwdStatus *string `input:"header,x-oss-fwd-status,required"` + + // The HTTP response header returned by the backend server. It is used to specify the scope of the resources that you want to query. + FwdHeaderAcceptRanges *string `input:"header,x-oss-fwd-header-Accept-Ranges"` + + // The HTTP response header returned by the backend server. It is used to specify the resource cache method that the client uses. Valid values: no-cache, no-store, public, private, max-age + FwdHeaderCacheControl *string `input:"header,x-oss-fwd-header-Cache-Control"` + + FwdHeaderContentDisposition *string `input:"header,x-oss-fwd-header-Content-Disposition"` + + FwdHeaderContentEncoding *string `input:"header,x-oss-fwd-header-Content-Encoding"` + + FwdHeaderContentLanguage *string `input:"header,x-oss-fwd-header-Content-Language"` + + FwdHeaderContentRange *string `input:"header,x-oss-fwd-header-Content-Range"` + + // The HTTP response header returned by the backend server. It is used to specify the type of the received or sent data. + FwdHeaderContentType *string `input:"header,x-oss-fwd-header-Content-Type"` + + // The HTTP response header returned by the backend server. It uniquely identifies the object. + FwdHeaderEtag *string `input:"header,x-oss-fwd-header-ETag"` + + // The HTTP response header returned by the backend server. It specifies the absolute expiration time of the cache. + FwdHeaderExpires *string `input:"header,x-oss-fwd-header-Expires"` + + // The HTTP response header returned by the backend server. It specifies the time when the requested resource was last modified. + FwdHeaderLastModified *string `input:"header,x-oss-fwd-header-Last-Modified"` + + Body io.Reader `input:"body,nop"` + + RequestCommon +} + +type WriteGetObjectResponseResult struct { + ResultCommon +} + +// WriteGetObjectResponse Customize return data and response headers +func (c *Client) WriteGetObjectResponse(ctx context.Context, request *WriteGetObjectResponseRequest, optFns ...func(*Options)) (*WriteGetObjectResponseResult, error) { + var err error + if request == nil { + request = &WriteGetObjectResponseRequest{} + } + input := &OperationInput{ + OpName: "WriteGetObjectResponse", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "x-oss-write-get-object-response": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"x-oss-write-get-object-response"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &WriteGetObjectResponseResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go new file mode 100644 index 000000000..9d80157ec --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go @@ -0,0 +1,209 @@ +package oss + +import ( + "context" + "io" + "io/ioutil" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PolicyStatus struct { + // Indicates whether the current bucket policy allows public access.true false + IsPublic *bool `xml:"IsPublic"` +} + +type PutBucketPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request parameters. + Body io.Reader `input:"body,nop,required"` + + RequestCommon +} + +type PutBucketPolicyResult struct { + ResultCommon +} + +// PutBucketPolicy Configures a policy for a bucket. +func (c *Client) PutBucketPolicy(ctx context.Context, request *PutBucketPolicyRequest, optFns ...func(*Options)) (*PutBucketPolicyResult, error) { + var err error + if request == nil { + request = &PutBucketPolicyRequest{} + } + input := &OperationInput{ + OpName: "PutBucketPolicy", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketPolicyResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketPolicyResult struct { + // The configurations of the bucket policy. + Body string + + ResultCommon +} + +// GetBucketPolicy Queries the policies configured for a bucket. +func (c *Client) GetBucketPolicy(ctx context.Context, request *GetBucketPolicyRequest, optFns ...func(*Options)) (*GetBucketPolicyResult, error) { + var err error + if request == nil { + request = &GetBucketPolicyRequest{} + } + input := &OperationInput{ + OpName: "GetBucketPolicy", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + body, err := ioutil.ReadAll(output.Body) + defer output.Body.Close() + if err != nil { + return nil, err + } + result := &GetBucketPolicyResult{ + Body: string(body), + } + if err = c.unmarshalOutput(result, output); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketPolicyResult struct { + ResultCommon +} + +// DeleteBucketPolicy Deletes a policy for a bucket. +func (c *Client) DeleteBucketPolicy(ctx context.Context, request *DeleteBucketPolicyRequest, optFns ...func(*Options)) (*DeleteBucketPolicyResult, error) { + var err error + if request == nil { + request = &DeleteBucketPolicyRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketPolicy", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketPolicyResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketPolicyStatusRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketPolicyStatusResult struct { + // The container that stores public access information. + PolicyStatus *PolicyStatus `output:"body,PolicyStatus,xml"` + + ResultCommon +} + +// GetBucketPolicyStatus Checks whether the current bucket policy allows public access. +func (c *Client) GetBucketPolicyStatus(ctx context.Context, request *GetBucketPolicyStatusRequest, optFns ...func(*Options)) (*GetBucketPolicyStatusResult, error) { + var err error + if request == nil { + request = &GetBucketPolicyStatusRequest{} + } + input := &OperationInput{ + OpName: "GetBucketPolicyStatus", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policyStatus": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policyStatus"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketPolicyStatusResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go new file mode 100644 index 000000000..e9aedd44e --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go @@ -0,0 +1,154 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type GetBucketPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketPublicAccessBlockResult struct { + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `output:"body,PublicAccessBlockConfiguration,xml"` + + ResultCommon +} + +// GetBucketPublicAccessBlock Queries the Block Public Access configurations of a bucket. +func (c *Client) GetBucketPublicAccessBlock(ctx context.Context, request *GetBucketPublicAccessBlockRequest, optFns ...func(*Options)) (*GetBucketPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &GetBucketPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "GetBucketPublicAccessBlock", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // Request body. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `input:"body,PublicAccessBlockConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketPublicAccessBlockResult struct { + ResultCommon +} + +// PutBucketPublicAccessBlock Enables or disables Block Public Access for a bucket. +func (c *Client) PutBucketPublicAccessBlock(ctx context.Context, request *PutBucketPublicAccessBlockRequest, optFns ...func(*Options)) (*PutBucketPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &PutBucketPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "PutBucketPublicAccessBlock", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketPublicAccessBlockResult struct { + ResultCommon +} + +// DeleteBucketPublicAccessBlock Deletes the Block Public Access configurations of a bucket. +func (c *Client) DeleteBucketPublicAccessBlock(ctx context.Context, request *DeleteBucketPublicAccessBlockRequest, optFns ...func(*Options)) (*DeleteBucketPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &DeleteBucketPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketPublicAccessBlock", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go new file mode 100644 index 000000000..d35c0cdc4 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go @@ -0,0 +1,310 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type BucketDataRedundancyTransition struct { + // The progress of the redundancy type change task in percentage. Valid values: 0 to 100. This element is available when the task is in the Processing or Finished state. + ProcessPercentage *int32 `xml:"ProcessPercentage"` + + // The estimated period of time that is required for the redundancy type change task. Unit: hours. This element is available when the task is in the Processing or Finished state. + EstimatedRemainingTime *int64 `xml:"EstimatedRemainingTime"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The ID of the redundancy type change task. + TaskId *string `xml:"TaskId"` + + // The state of the redundancy type change task. Valid values:QueueingProcessingFinished + Status *string `xml:"Status"` + + // The time when the redundancy type change task was created. + CreateTime *string `xml:"CreateTime"` + + // The time when the redundancy type change task was performed. This element is available when the task is in the Processing or Finished state. + StartTime *string `xml:"StartTime"` + + // The time when the redundancy type change task was finished. This element is available when the task is in the Finished state. + EndTime *string `xml:"EndTime"` +} + +type ListBucketDataRedundancyTransition struct { + // Indicates that this ListUserDataRedundancyTransition request contains subsequent results. + // You must set NextContinuationToken to continuation-token to continue obtaining the results. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The container in which the redundancy type conversion task is stored. + BucketDataRedundancyTransitions []BucketDataRedundancyTransition `xml:"BucketDataRedundancyTransition"` + + // Indicates whether the returned results are truncated. + // Valid values:true: indicates that not all results are returned for the request. + // false: indicates that all results are returned for the request. + IsTruncated *bool `xml:"IsTruncated"` +} + +type ListBucketDataRedundancyTransitionRequest struct { + // The name of the bucket + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type ListBucketDataRedundancyTransitionResult struct { + // The container for listed redundancy type change tasks. + ListBucketDataRedundancyTransition *ListBucketDataRedundancyTransition `output:"body,ListBucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// ListBucketDataRedundancyTransition Lists all redundancy type conversion tasks of a bucket. +func (c *Client) ListBucketDataRedundancyTransition(ctx context.Context, request *ListBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*ListBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &ListBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "ListBucketDataRedundancyTransition", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketDataRedundancyTransitionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the redundancy change task. + RedundancyTransitionTaskid *string `input:"query,x-oss-redundancy-transition-taskid,required"` + + RequestCommon +} + +type GetBucketDataRedundancyTransitionResult struct { + // The container for a specific redundancy type change task. + BucketDataRedundancyTransition *BucketDataRedundancyTransition `output:"body,BucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// GetBucketDataRedundancyTransition Queries the redundancy type conversion tasks of a bucket. +func (c *Client) GetBucketDataRedundancyTransition(ctx context.Context, request *GetBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*GetBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &GetBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "GetBucketDataRedundancyTransition", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CreateBucketDataRedundancyTransitionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The redundancy type to which you want to convert the bucket. You can only convert the redundancy type of a bucket from LRS to ZRS. + TargetRedundancyType *string `input:"query,x-oss-target-redundancy-type,required"` + + RequestCommon +} + +type CreateBucketDataRedundancyTransitionResult struct { + // The container in which the redundancy type conversion task is stored. + BucketDataRedundancyTransition *BucketDataRedundancyTransition `output:"body,BucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// CreateBucketDataRedundancyTransition Creates a redundancy type conversion task for a bucket. +func (c *Client) CreateBucketDataRedundancyTransition(ctx context.Context, request *CreateBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*CreateBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &CreateBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "CreateBucketDataRedundancyTransition", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketDataRedundancyTransitionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the redundancy type change task. + RedundancyTransitionTaskid *string `input:"query,x-oss-redundancy-transition-taskid,required"` + + RequestCommon +} + +type DeleteBucketDataRedundancyTransitionResult struct { + ResultCommon +} + +// DeleteBucketDataRedundancyTransition Deletes a redundancy type conversion task of a bucket. +func (c *Client) DeleteBucketDataRedundancyTransition(ctx context.Context, request *DeleteBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*DeleteBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &DeleteBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketDataRedundancyTransition", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListUserDataRedundancyTransitionRequest struct { + // The token from which the list operation must start. + ContinuationToken *string `input:"query,continuation-token"` + + // The maximum number of redundancy type conversion tasks that can be returned. Valid values: 1 to 100. + MaxKeys int32 `input:"query,max-keys"` + + RequestCommon +} + +type ListUserDataRedundancyTransitionResult struct { + // The container in which the listed redundancy type conversion tasks are stored. + ListBucketDataRedundancyTransition *ListBucketDataRedundancyTransition `output:"body,ListBucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// ListUserDataRedundancyTransition 列举请求者所有的存储冗余转换任务 +func (c *Client) ListUserDataRedundancyTransition(ctx context.Context, request *ListUserDataRedundancyTransitionRequest, optFns ...func(*Options)) (*ListUserDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &ListUserDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "ListUserDataRedundancyTransition", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListUserDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go new file mode 100644 index 000000000..e907e410d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go @@ -0,0 +1,133 @@ +package oss + +import ( + "context" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type RefererList struct { + // The addresses in the Referer whitelist. + Referers []string `xml:"Referer"` +} + +type RefererBlacklist struct { + // The addresses in the Referer blacklist. + Referers []string `xml:"Referer"` +} + +type RefererConfiguration struct { + // Specifies whether to allow a request whose Referer field is empty. Valid values:* true (default)* false + AllowEmptyReferer *bool `xml:"AllowEmptyReferer"` + + // Specifies whether to truncate the query string in the URL when the Referer is matched. Valid values:* true (default)* false + AllowTruncateQueryString *bool `xml:"AllowTruncateQueryString"` + + // Specifies whether to truncate the path and parts that follow the path in the URL when the Referer is matched. Valid values:* true* false + TruncatePath *bool `xml:"TruncatePath"` + + // The container that stores the Referer whitelist. ****The PutBucketReferer operation overwrites the existing Referer whitelist with the Referer whitelist specified in RefererList. If RefererList is not specified in the request, which specifies that no Referer elements are included, the operation clears the existing Referer whitelist. + RefererList *RefererList `xml:"RefererList"` + + // The container that stores the Referer blacklist. + RefererBlacklist *RefererBlacklist `xml:"RefererBlacklist"` +} + +type PutBucketRefererRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + RefererConfiguration *RefererConfiguration `input:"body,RefererConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketRefererResult struct { + ResultCommon +} + +// PutBucketReferer Configures a Referer whitelist for an Object Storage Service (OSS) bucket. You can specify whether to allow the requests whose Referer field is empty or whose query strings are truncated. +func (c *Client) PutBucketReferer(ctx context.Context, request *PutBucketRefererRequest, optFns ...func(*Options)) (*PutBucketRefererResult, error) { + var err error + if request == nil { + request = &PutBucketRefererRequest{} + } + input := &OperationInput{ + OpName: "PutBucketReferer", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "referer": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"referer"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketRefererResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketRefererRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketRefererResult struct { + // The container that stores the hotlink protection configurations. + RefererConfiguration *RefererConfiguration `output:"body,RefererConfiguration,xml"` + + ResultCommon +} + +// GetBucketReferer Queries the hotlink protection configurations for a bucket. +func (c *Client) GetBucketReferer(ctx context.Context, request *GetBucketRefererRequest, optFns ...func(*Options)) (*GetBucketRefererResult, error) { + var err error + if request == nil { + request = &GetBucketRefererRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReferer", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "referer": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"referer"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketRefererResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go new file mode 100644 index 000000000..50df77fc3 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go @@ -0,0 +1,469 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type RtcConfiguration struct { + // The container that stores the status of RTC. + RTC *ReplicationTimeControl `xml:"RTC"` + + // The ID of the data replication rule for which you want to configure RTC. + ID *string `xml:"ID"` +} + +type ReplicationSourceSelectionCriteria struct { + // The container that is used to filter the source objects that are encrypted by using SSE-KMS. This parameter must be specified if the SourceSelectionCriteria parameter is specified in the data replication rule. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `xml:"SseKmsEncryptedObjects"` +} + +type ReplicationPrefixSet struct { + // The prefix that is used to specify the object that you want to replicate. Only objects whose names contain the specified prefix are replicated to the destination bucket.* The value of the Prefix parameter can be up to 1,023 characters in length.* If you specify the Prefix parameter in a data replication rule, OSS synchronizes new data and historical data based on the value of the Prefix parameter. + Prefixs []string `xml:"Prefix"` +} + +type ReplicationProgressRule struct { + // The container that stores the information about the destination bucket. + Destination *ReplicationDestination `xml:"Destination"` + + // The status of the data replication task. Valid values:* starting: OSS creates a data replication task after a data replication rule is configured.* doing: The replication rule is effective and the replication task is in progress.* closing: OSS clears a data replication task after the corresponding data replication rule is deleted. + Status *string `xml:"Status"` + + // Specifies whether to replicate historical data that exists before data replication is enabled from the source bucket to the destination bucket.* enabled (default): replicates historical data to the destination bucket.* disabled: ignores historical data and replicates only data uploaded to the source bucket after data replication is enabled for the source bucket. + HistoricalObjectReplication *string `xml:"HistoricalObjectReplication"` + + // The container that stores the progress of the data replication task. This parameter is returned only when the data replication task is in the doing state. + Progress *ReplicationProgressInformation `xml:"Progress"` + + // The ID of the data replication rule. + ID *string `xml:"ID"` + + // The container that stores prefixes. You can specify up to 10 prefixes in each data replication rule. + PrefixSet *ReplicationPrefixSet `xml:"PrefixSet"` + + // The operations that are synchronized to the destination bucket.* ALL: PUT, DELETE, and ABORT operations are synchronized to the destination bucket.* PUT: Write operations are synchronized to the destination bucket, including PutObject, PostObject, AppendObject, CopyObject, PutObjectACL, InitiateMultipartUpload, UploadPart, UploadPartCopy, and CompleteMultipartUpload. + Action *string `xml:"Action"` +} + +type ReplicationDestination struct { + // The destination bucket to which data is replicated. + Bucket *string `xml:"Bucket"` + + // The region in which the destination bucket is located. + Location *string `xml:"Location"` + + // The link that is used to transfer data during data replication. Valid values:* internal (default): the default data transfer link used in OSS.* oss_acc: the transfer acceleration link. You can set TransferType to oss_acc only when you create CRR rules. + TransferType TransferTypeType `xml:"TransferType"` +} + +type SseKmsEncryptedObjects struct { + // Specifies whether to replicate objects that are encrypted by using SSE-KMS. Valid values:* Enabled* Disabled + Status StatusType `xml:"Status"` +} + +type LocationTransferType struct { + // The regions in which the destination bucket can be located. + Location *string `xml:"Location"` + + // The container that stores the transfer type. + TransferTypes *TransferTypes `xml:"TransferTypes"` +} + +type ReplicationTimeControl struct { + // Specifies whether to enable RTC.Valid values:* disabled * enabled + Status *string `xml:"Status"` +} + +type ReplicationRule struct { + // The container that stores the information about the destination bucket. + Destination *ReplicationDestination `xml:"Destination"` + + // The role that you want to authorize OSS to use to replicate data. If you want to use SSE-KMS to encrypt the objects that are replicated to the destination bucket, you must specify this parameter. + SyncRole *string `xml:"SyncRole"` + + // The container that specifies other conditions used to filter the source objects that you want to replicate. Filter conditions can be specified only for source objects encrypted by using SSE-KMS. + SourceSelectionCriteria *ReplicationSourceSelectionCriteria `xml:"SourceSelectionCriteria"` + + // The encryption configuration for the objects replicated to the destination bucket. If the Status parameter is set to Enabled, you must specify this parameter. + EncryptionConfiguration *ReplicationEncryptionConfiguration `xml:"EncryptionConfiguration"` + + // Specifies whether to replicate historical data that exists before data replication is enabled from the source bucket to the destination bucket. Valid values:* enabled (default): replicates historical data to the destination bucket.* disabled: does not replicate historical data to the destination bucket. Only data uploaded to the source bucket after data replication is enabled for the source bucket is replicated. + HistoricalObjectReplication HistoricalObjectReplicationType `xml:"HistoricalObjectReplication"` + + // The container that stores the status of the RTC feature. + RTC *ReplicationTimeControl `xml:"RTC"` + + // The ID of the rule. + ID *string `xml:"ID"` + + // The container that stores prefixes. You can specify up to 10 prefixes in each data replication rule. + PrefixSet *ReplicationPrefixSet `xml:"PrefixSet"` + + // The operations that can be synchronized to the destination bucket. If you configure Action in a data replication rule, OSS synchronizes new data and historical data based on the specified value of Action. You can set Action to one or more of the following operation types. Valid values:* ALL (default): PUT, DELETE, and ABORT operations are synchronized to the destination bucket.* PUT: Write operations are synchronized to the destination bucket, including PutObject, PostObject, AppendObject, CopyObject, PutObjectACL, InitiateMultipartUpload, UploadPart, UploadPartCopy, and CompleteMultipartUpload. + Action *string `xml:"Action"` + + // The status of the data replication task. Valid values:* starting: OSS creates a data replication task after a data replication rule is configured.* doing: The replication rule is effective and the replication task is in progress.* closing: OSS clears a data replication task after the corresponding data replication rule is deleted. + Status *string `xml:"Status"` +} + +type ReplicationConfiguration struct { + // The container that stores the data replication rules. + Rules []ReplicationRule `xml:"Rule"` +} + +type LocationTransferTypeConstraint struct { + // The container that stores regions in which the destination bucket can be located with the TransferType information. + LocationTransferTypes []LocationTransferType `xml:"LocationTransferType"` +} + +type LocationRTCConstraint struct { + // The regions where RTC is supported. + Locations []string `xml:"Location"` +} + +type ReplicationLocation struct { + // The regions in which the destination bucket can be located. + Locations []string `xml:"Location"` + + // The container that stores regions in which the destination bucket can be located with TransferType specified. + LocationTransferTypeConstraint *LocationTransferTypeConstraint `xml:"LocationTransferTypeConstraint"` + + // The container that stores regions in which the RTC can be enabled. + LocationRTCConstraint *LocationRTCConstraint `xml:"LocationRTCConstraint"` +} + +type ReplicationProgress struct { + // The container that stores the progress of the data replication task corresponding to each data replication rule. + Rules []ReplicationProgressRule `xml:"Rule"` +} + +type ReplicationEncryptionConfiguration struct { + ReplicaKmsKeyID *string `xml:"ReplicaKmsKeyID"` +} + +type TransferTypes struct { + // The data transfer type that is used to transfer data in data replication. Valid values:* internal (default): the default data transfer link used in OSS.* oss_acc: the link in which data transmission is accelerated. You can set TransferType to oss_acc only when you create CRR rules. + Types []string `xml:"Type"` +} + +type ReplicationProgressInformation struct { + // The percentage of the replicated historical data. This parameter is valid only when HistoricalObjectReplication is set to enabled. + HistoricalObject *string `xml:"HistoricalObject"` + + // The time used to determine whether data is replicated to the destination bucket. Data that is written to the source bucket before the time is replicated to the destination bucket. The value of this parameter is in the GMT format. Example: Thu, 24 Sep 2015 15:39:18 GMT. + NewObject *string `xml:"NewObject"` +} + +type PutBucketRtcRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + RtcConfiguration *RtcConfiguration `input:"body,ReplicationRule,xml,required"` + + RequestCommon +} + +type PutBucketRtcResult struct { + ResultCommon +} + +// PutBucketRtc Enables or disables the Replication Time Control (RTC) feature for existing cross-region replication (CRR) rules. +func (c *Client) PutBucketRtc(ctx context.Context, request *PutBucketRtcRequest, optFns ...func(*Options)) (*PutBucketRtcResult, error) { + var err error + if request == nil { + request = &PutBucketRtcRequest{} + } + input := &OperationInput{ + OpName: "PutBucketRtc", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "rtc": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"rtc"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketRtcResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketReplicationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + ReplicationConfiguration *ReplicationConfiguration `input:"body,ReplicationConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketReplicationResult struct { + ReplicationRuleId *string `output:"header,x-oss-replication-rule-id"` + + ResultCommon +} + +// PutBucketReplication Configures data replication rules for a bucket. Object Storage Service (OSS) supports cross-region replication (CRR) and same-region replication (SRR). +func (c *Client) PutBucketReplication(ctx context.Context, request *PutBucketReplicationRequest, optFns ...func(*Options)) (*PutBucketReplicationResult, error) { + var err error + if request == nil { + request = &PutBucketReplicationRequest{} + } + input := &OperationInput{ + OpName: "PutBucketReplication", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "add", + "replication": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replication", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketReplicationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalHeader, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketReplicationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketReplicationResult struct { + // The container that stores data replication configurations. + ReplicationConfiguration *ReplicationConfiguration `output:"body,ReplicationConfiguration,xml"` + + ResultCommon +} + +// GetBucketReplication Queries the data replication rules configured for a bucket. +func (c *Client) GetBucketReplication(ctx context.Context, request *GetBucketReplicationRequest, optFns ...func(*Options)) (*GetBucketReplicationResult, error) { + var err error + if request == nil { + request = &GetBucketReplicationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReplication", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "replication": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replication"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketReplicationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketReplicationLocationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketReplicationLocationResult struct { + // The container that stores the region in which the destination bucket can be located. + ReplicationLocation *ReplicationLocation `output:"body,ReplicationLocation,xml"` + + ResultCommon +} + +// GetBucketReplicationLocation Queries the regions in which available destination buckets reside. You can determine the region of the destination bucket to which the data in the source bucket are replicated based on the returned response. +func (c *Client) GetBucketReplicationLocation(ctx context.Context, request *GetBucketReplicationLocationRequest, optFns ...func(*Options)) (*GetBucketReplicationLocationResult, error) { + var err error + if request == nil { + request = &GetBucketReplicationLocationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReplicationLocation", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "replicationLocation": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replicationLocation"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketReplicationLocationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketReplicationProgressRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the data replication rule. You can call the GetBucketReplication operation to query the ID. + RuleId *string `input:"query,rule-id,required"` + + RequestCommon +} + +type GetBucketReplicationProgressResult struct { + // The container that is used to store the progress of data replication tasks. + ReplicationProgress *ReplicationProgress `output:"body,ReplicationProgress,xml"` + + ResultCommon +} + +// GetBucketReplicationProgress Queries the information about the data replication process of a bucket. +func (c *Client) GetBucketReplicationProgress(ctx context.Context, request *GetBucketReplicationProgressRequest, optFns ...func(*Options)) (*GetBucketReplicationProgressResult, error) { + var err error + if request == nil { + request = &GetBucketReplicationProgressRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReplicationProgress", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "replicationProgress": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replicationProgress"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketReplicationProgressResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ReplicationRules struct { + // The ID of data replication rules that you want to delete. You can call the GetBucketReplication operation to obtain the ID. + IDs []string `xml:"ID"` +} + +type DeleteBucketReplicationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + ReplicationRules *ReplicationRules `input:"body,ReplicationRules,xml,required"` + + RequestCommon +} + +type DeleteBucketReplicationResult struct { + ResultCommon +} + +// DeleteBucketReplication Disables data replication for a bucket and deletes the data replication rule configured for the bucket. After you call this operation, all operations performed on the source bucket are not synchronized to the destination bucket. +func (c *Client) DeleteBucketReplication(ctx context.Context, request *DeleteBucketReplicationRequest, optFns ...func(*Options)) (*DeleteBucketReplicationResult, error) { + var err error + if request == nil { + request = &DeleteBucketReplicationRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketReplication", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "delete", + "replication": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"comp", "replication"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketReplicationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go new file mode 100644 index 000000000..8abc9ec28 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go @@ -0,0 +1,104 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type BucketResourceGroupConfiguration struct { + // The ID of the resource group to which the bucket belongs. + ResourceGroupId *string `xml:"ResourceGroupId"` +} + +type GetBucketResourceGroupRequest struct { + // The name of the bucket that you want to query. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketResourceGroupResult struct { + // The container that stores the ID of the resource group. + BucketResourceGroupConfiguration *BucketResourceGroupConfiguration `output:"body,BucketResourceGroupConfiguration,xml"` + + ResultCommon +} + +// GetBucketResourceGroup Queries the ID of the resource group to which a bucket belongs. +func (c *Client) GetBucketResourceGroup(ctx context.Context, request *GetBucketResourceGroupRequest, optFns ...func(*Options)) (*GetBucketResourceGroupResult, error) { + var err error + if request == nil { + request = &GetBucketResourceGroupRequest{} + } + input := &OperationInput{ + OpName: "GetBucketResourceGroup", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "resourceGroup": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"resourceGroup"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketResourceGroupResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketResourceGroupRequest struct { + // The bucket for which you want to modify the ID of the resource group. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketResourceGroupConfiguration *BucketResourceGroupConfiguration `input:"body,BucketResourceGroupConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketResourceGroupResult struct { + ResultCommon +} + +// PutBucketResourceGroup Modifies the ID of the resource group to which a bucket belongs. +func (c *Client) PutBucketResourceGroup(ctx context.Context, request *PutBucketResourceGroupRequest, optFns ...func(*Options)) (*PutBucketResourceGroupResult, error) { + var err error + if request == nil { + request = &PutBucketResourceGroupRequest{} + } + input := &OperationInput{ + OpName: "PutBucketResourceGroup", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "resourceGroup": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"resourceGroup"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketResourceGroupResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go new file mode 100644 index 000000000..e0fa5e811 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go @@ -0,0 +1,244 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type StyleList struct { + // The list of styles. + Styles []StyleInfo `xml:"Style"` +} + +type StyleInfo struct { + // The time when the style was created. + CreateTime *string `xml:"CreateTime"` + + // The time when the style was last modified. + LastModifyTime *string `xml:"LastModifyTime"` + + // The category of this style。 Invalid value:image、document、video。 + Category *string `xml:"Category"` + + // The style name. + Name *string `xml:"Name"` + + // The content of the style. + Content *string `xml:"Content"` +} + +type StyleContent struct { + // The content of the style. + Content *string `xml:"Content"` +} + +type PutStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the image style. + StyleName *string `input:"query,styleName,required"` + + // The category of the style. + Category *string `input:"query,category"` + + // The container that stores the content information about the image style. + Style *StyleContent `input:"body,Style,xml,required"` + + RequestCommon +} + +type PutStyleResult struct { + ResultCommon +} + +// PutStyle Adds an image style to a bucket. An image style contains one or more image processing parameters. +func (c *Client) PutStyle(ctx context.Context, request *PutStyleRequest, optFns ...func(*Options)) (*PutStyleResult, error) { + var err error + if request == nil { + request = &PutStyleRequest{} + } + input := &OperationInput{ + OpName: "PutStyle", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style", "styleName"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type ListStyleResult struct { + + // The container that was used to query the information about image styles. + StyleList *StyleList `output:"body,StyleList,xml"` + + ResultCommon +} + +// ListStyle Queries all image styles that are created for a bucket. +func (c *Client) ListStyle(ctx context.Context, request *ListStyleRequest, optFns ...func(*Options)) (*ListStyleResult, error) { + var err error + if request == nil { + request = &ListStyleRequest{} + } + input := &OperationInput{ + OpName: "ListStyle", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the image style. + StyleName *string `input:"query,styleName,required"` + + RequestCommon +} + +type GetStyleResult struct { + // The container that stores the information about the image style. + Style *StyleInfo `output:"body,Style,xml"` + + ResultCommon +} + +// GetStyle Queries the information about an image style of a bucket. +func (c *Client) GetStyle(ctx context.Context, request *GetStyleRequest, optFns ...func(*Options)) (*GetStyleResult, error) { + var err error + if request == nil { + request = &GetStyleRequest{} + } + input := &OperationInput{ + OpName: "GetStyle", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style", "styleName"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the image style. + StyleName *string `input:"query,styleName,required"` + + RequestCommon +} + +type DeleteStyleResult struct { + ResultCommon +} + +// DeleteStyle Deletes an image style from a bucket. +func (c *Client) DeleteStyle(ctx context.Context, request *DeleteStyleRequest, optFns ...func(*Options)) (*DeleteStyleResult, error) { + var err error + if request == nil { + request = &DeleteStyleRequest{} + } + input := &OperationInput{ + OpName: "DeleteStyle", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style", "styleName"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go new file mode 100644 index 000000000..17c6ade86 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go @@ -0,0 +1,146 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PutBucketTagsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + Tagging *Tagging `input:"body,Tagging,xml,required"` + + RequestCommon +} + +type PutBucketTagsResult struct { + ResultCommon +} + +// PutBucketTags Adds tags to or modifies the existing tags of a bucket. +func (c *Client) PutBucketTags(ctx context.Context, request *PutBucketTagsRequest, optFns ...func(*Options)) (*PutBucketTagsResult, error) { + var err error + if request == nil { + request = &PutBucketTagsRequest{} + } + input := &OperationInput{ + OpName: "PutBucketTags", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "tagging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"tagging"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketTagsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketTagsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketTagsResult struct { + // The container that stores the returned tags of the bucket. If no tags are configured for the bucket, an XML message body is returned in which the Tagging element is empty. + Tagging *Tagging `output:"body,Tagging,xml"` + + ResultCommon +} + +// GetBucketTags Queries the tags of a bucket. +func (c *Client) GetBucketTags(ctx context.Context, request *GetBucketTagsRequest, optFns ...func(*Options)) (*GetBucketTagsResult, error) { + var err error + if request == nil { + request = &GetBucketTagsRequest{} + } + input := &OperationInput{ + OpName: "GetBucketTags", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "tagging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"tagging"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketTagsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketTagsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + Tagging *string `input:"query,tagging"` + + RequestCommon +} + +type DeleteBucketTagsResult struct { + ResultCommon +} + +// DeleteBucketTags Deletes tags configured for a bucket. +func (c *Client) DeleteBucketTags(ctx context.Context, request *DeleteBucketTagsRequest, optFns ...func(*Options)) (*DeleteBucketTagsResult, error) { + var err error + if request == nil { + request = &DeleteBucketTagsRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketTags", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "tagging": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"tagging"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteBucketTagsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go new file mode 100644 index 000000000..e2fdb2d33 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go @@ -0,0 +1,113 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type TransferAccelerationConfiguration struct { + // Whether the transfer acceleration is enabled for this bucket. + Enabled *bool `xml:"Enabled"` +} + +type PutBucketTransferAccelerationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + TransferAccelerationConfiguration *TransferAccelerationConfiguration `input:"body,TransferAccelerationConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketTransferAccelerationResult struct { + ResultCommon +} + +// PutBucketTransferAcceleration Configures transfer acceleration for a bucket. After you enable transfer acceleration for a bucket, the object access speed is accelerated for users worldwide. The transfer acceleration feature is applicable to scenarios where data needs to be transferred over long geographical distances. This feature can also be used to download or upload objects that are gigabytes or terabytes in size. +func (c *Client) PutBucketTransferAcceleration(ctx context.Context, request *PutBucketTransferAccelerationRequest, optFns ...func(*Options)) (*PutBucketTransferAccelerationResult, error) { + var err error + if request == nil { + request = &PutBucketTransferAccelerationRequest{} + } + input := &OperationInput{ + OpName: "PutBucketTransferAcceleration", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "transferAcceleration": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"transferAcceleration"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketTransferAccelerationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketTransferAccelerationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketTransferAccelerationResult struct { + + // The container that stores the transfer acceleration configurations. + TransferAccelerationConfiguration *TransferAccelerationConfiguration `output:"body,TransferAccelerationConfiguration,xml"` + + ResultCommon +} + +// GetBucketTransferAcceleration Queries the transfer acceleration configurations of a bucket. +func (c *Client) GetBucketTransferAcceleration(ctx context.Context, request *GetBucketTransferAccelerationRequest, optFns ...func(*Options)) (*GetBucketTransferAccelerationResult, error) { + var err error + if request == nil { + request = &GetBucketTransferAccelerationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketTransferAcceleration", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "transferAcceleration": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"transferAcceleration"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketTransferAccelerationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go new file mode 100644 index 000000000..4a80ae9f5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go @@ -0,0 +1,280 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type MirrorHeaders struct { + // Specifies whether to pass through all request headers other than the following headers to the origin. This parameter takes effect only when the value of RedirectType is Mirror.* Headers such as content-length, authorization2, authorization, range, and date* Headers that start with oss-, x-oss-, and x-drs-Default value: false.Valid values:* true * false + PassAll *bool `xml:"PassAll"` + + // The headers to pass through to the origin. This parameter takes effect only when the value of RedirectType is Mirror. Each specified header can be up to 1,024 bytes in length and can contain only letters, digits, and hyphens (-). You can specify up to 10 headers. + Passs []string `xml:"Pass"` + + // The headers that are not allowed to pass through to the origin. This parameter takes effect only when the value of RedirectType is Mirror. Each header can be up to 1,024 bytes in length and can contain only letters, digits, and hyphens (-). You can specify up to 10 headers. This parameter is used together with PassAll. + Removes []string `xml:"Remove"` + + // The headers that are sent to the origin. The specified headers are configured in the data returned by the origin regardless of whether the headers are contained in the request. This parameter takes effect only when the value of RedirectType is Mirror. You can specify up to 10 headers. + Sets []MirrorHeadersSet `xml:"Set"` +} + +type RoutingRule struct { + // The sequence number that is used to match and run the redirection rules. OSS matches redirection rules based on this parameter. If a match succeeds, only the rule is run and the subsequent rules are not run. This parameter must be specified if RoutingRule is specified. + RuleNumber *int64 `xml:"RuleNumber"` + + // The matching condition. If all of the specified conditions are met, the rule is run. A rule is considered matched only when the rule meets the conditions that are specified by all nodes in Condition. This parameter must be specified if RoutingRule is specified. + Condition *RoutingRuleCondition `xml:"Condition"` + + // The operation to perform after the rule is matched. This parameter must be specified if RoutingRule is specified. + Redirect *RoutingRuleRedirect `xml:"Redirect"` +} + +type WebsiteConfiguration struct { + // The container that stores the default homepage. You must specify at least one of the following containers: IndexDocument, ErrorDocument, and RoutingRules. + IndexDocument *IndexDocument `xml:"IndexDocument"` + + // The container that stores the default 404 page. You must specify at least one of the following containers: IndexDocument, ErrorDocument, and RoutingRules. + ErrorDocument *ErrorDocument `xml:"ErrorDocument"` + + // The container that stores the redirection rules. You must specify at least one of the following containers: IndexDocument, ErrorDocument, and RoutingRules. + RoutingRules *RoutingRules `xml:"RoutingRules"` +} + +type IndexDocument struct { + // The default homepage. + Suffix *string `xml:"Suffix"` + + // Specifies whether to redirect the access to the default homepage of the subdirectory when the subdirectory is accessed. Valid values:* **true**: The access is redirected to the default homepage of the subdirectory.* **false** (default): The access is redirected to the default homepage of the root directory.For example, the default homepage is set to index.html, and `bucket.oss-cn-hangzhou.aliyuncs.com/subdir/` is the site that you want to access. If **SupportSubDir** is set to false, the access is redirected to `bucket.oss-cn-hangzhou.aliyuncs.com/index.html`. If **SupportSubDir** is set to true, the access is redirected to `bucket.oss-cn-hangzhou.aliyuncs.com/subdir/index.html`. + SupportSubDir *bool `xml:"SupportSubDir"` + + // The operation to perform when the default homepage is set, the name of the accessed object does not end with a forward slash (/), and the object does not exist. This parameter takes effect only when **SupportSubDir** is set to true. It takes effect after RoutingRule but before ErrorFile. For example, the default homepage is set to index.html, `bucket.oss-cn-hangzhou.aliyuncs.com/abc` is the site that you want to access, and the abc object does not exist. In this case, different operations are performed based on the value of **Type**.* **0** (default): OSS checks whether the object named abc/index.html, which is in the `Object + Forward slash (/) + Homepage` format, exists. If the object exists, OSS returns HTTP status code 302 and the Location header value that contains URL-encoded `/abc/`. The URL-encoded /abc/ is in the `Forward slash (/) + Object + Forward slash (/)` format. If the object does not exist, OSS returns HTTP status code 404 and continues to check ErrorFile.* **1**: OSS returns HTTP status code 404 and the NoSuchKey error code and continues to check ErrorFile.* **2**: OSS checks whether abc/index.html exists. If abc/index.html exists, the content of the object is returned. If abc/index.html does not exist, OSS returns HTTP status code 404 and continues to check ErrorFile. + Type *int64 `xml:"Type"` +} + +type ErrorDocument struct { + // The error page. + Key *string `xml:"Key"` + + // The HTTP status code returned with the error page. + HttpStatus *int64 `xml:"HttpStatus"` +} + +type RoutingRuleIncludeHeader struct { + // The key of the header. The rule is matched only when the specified header is included in the request and the header value equals the value specified by Equals. + Key *string `xml:"Key"` + + // The value of the header. The rule is matched only when the header specified by Key is included in the request and the header value equals the specified value. + Equals *string `xml:"Equals"` +} + +type RoutingRuleCondition struct { + // The prefix of object names. Only objects whose names contain the specified prefix match the rule. + KeyPrefixEquals *string `xml:"KeyPrefixEquals"` + + // Only objects that match this suffix can match this rule. + KeySuffixEquals *string `xml:"KeySuffixEquals"` + + // The HTTP status code. The rule is matched only when the specified object is accessed and the specified HTTP status code is returned. If the redirection rule is the mirroring-based back-to-origin rule, the value of this parameter is 404. + HttpErrorCodeReturnedEquals *int64 `xml:"HttpErrorCodeReturnedEquals"` + + // This rule can only be matched if the request contains the specified header and the value is the specified value. This container can specify up to 10. + IncludeHeaders []RoutingRuleIncludeHeader `xml:"IncludeHeader"` +} + +type MirrorHeadersSet struct { + // The key of the header. The key can be up to 1,024 bytes in length and can contain only letters, digits, and hyphens (-). This parameter takes effect only when the value of RedirectType is Mirror. This parameter must be specified if Set is specified. + Key *string `xml:"Key"` + + // The value of the header. The value can be up to 1,024 bytes in length and cannot contain `\r\n`. This parameter takes effect only when the value of RedirectType is Mirror. This parameter must be specified if Set is specified. + Value *string `xml:"Value"` +} + +type RoutingRuleRedirect struct { + // The origin URL for mirroring-based back-to-origin. This parameter takes effect only when the value of RedirectType is Mirror. The origin URL must start with \*\*http://** or **https://\*\* and end with a forward slash (/). OSS adds an object name to the end of the URL to generate a back-to-origin URL. For example, the name of the object to access is myobject. If MirrorURL is set to `http://example.com/`, the back-to-origin URL is `http://example.com/myobject`. If MirrorURL is set to `http://example.com/dir1/`, the back-to-origin URL is `http://example.com/dir1/myobject`. This parameter must be specified if RedirectType is set to Mirror.Valid values:* true * false + MirrorURL *string `xml:"MirrorURL"` + + // Specifies whether to redirect the access to the address specified by Location if the origin returns an HTTP 3xx status code. This parameter takes effect only when the value of RedirectType is Mirror. For example, when a mirroring-based back-to-origin request is initiated, the origin returns 302 and Location is specified.* If you set MirrorFollowRedirect to true, OSS continues requesting the resource at the address specified by Location. The access can be redirected up to 10 times. If the access is redirected more than 10 times, the mirroring-based back-to-origin request fails.* If you set MirrorFollowRedirect to false, OSS returns 302 and passes through Location.Default value: true. + MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` + + // If this parameter is set to true, the prefix of the object names is replaced with the value specified by ReplaceKeyPrefixWith. If this parameter is not specified or empty, the prefix of object names is truncated. When the ReplaceKeyWith parameter is not empty, the EnableReplacePrefix parameter cannot be set to true.Default value: false. + EnableReplacePrefix *bool `xml:"EnableReplacePrefix"` + + // The string that is used to replace the requested object name when the request is redirected. This parameter can be set to the ${key} variable, which indicates the object name in the request. For example, if ReplaceKeyWith is set to `prefix/${key}.suffix` and the object to access is test, the value of the Location header is `http://example.com/prefix/test.suffix`. + ReplaceKeyWith *string `xml:"ReplaceKeyWith"` + + // The domain name used for redirection. The domain name must comply with the domain naming rules. For example, if you access an object named test, Protocol is set to https, and Hostname is set to `example.com`, the value of the Location header is `https://example.com/test`. + HostName *string `xml:"HostName"` + + // Specifies whether to include parameters of the original request in the redirection request when the system runs the redirection rule or mirroring-based back-to-origin rule. For example, if the **PassQueryString** parameter is set to true, the `?a=b&c=d` parameter string is included in a request sent to OSS, and the redirection mode is 302, this parameter is added to the Location header. For example, if the request is `Location:example.com?a=b&c=d` and the redirection type is mirroring-based back-to-origin, the ?a=b\&c=d parameter string is also included in the back-to-origin request. Valid values: true and false (default). + PassQueryString *bool `xml:"PassQueryString"` + + // The headers contained in the response that is returned when you use mirroring-based back-to-origin. This parameter takes effect only when the value of RedirectType is Mirror. + MirrorHeaders *MirrorHeaders `xml:"MirrorHeaders"` + + // The string that is used to replace the prefix of the object name during redirection. If the prefix of an object name is empty, the string precedes the object name. You can specify only one of the ReplaceKeyWith and ReplaceKeyPrefixWith parameters in a rule. For example, if you access an object named abc/test.txt, KeyPrefixEquals is set to abc/, ReplaceKeyPrefixWith is set to def/, the value of the Location header is `http://example.com/def/test.txt`. + ReplaceKeyPrefixWith *string `xml:"ReplaceKeyPrefixWith"` + + // The redirection type. Valid values:* **Mirror**: mirroring-based back-to-origin.* **External**: external redirection. OSS returns an HTTP 3xx status code and returns an address for you to redirect to.* **AliCDN**: redirection based on Alibaba Cloud CDN. Compared with external redirection, OSS adds an additional header to the request. After Alibaba Cloud CDN identifies the header, Alibaba Cloud CDN redirects the access to the specified address and returns the obtained data instead of the HTTP 3xx status code that redirects the access to another address. This parameter must be specified if Redirect is specified. + RedirectType *string `xml:"RedirectType"` + + // Is SNI transparent. + MirrorSNI *bool `xml:"MirrorSNI"` + + // The protocol used for redirection. This parameter takes effect only when RedirectType is set to External or AliCDN. For example, if you access an object named test, Protocol is set to https, and Hostname is set to `example.com`, the value of the Location header is `https://example.com/test`. Valid values: **http** and **https**. + Protocol *string `xml:"Protocol"` + + // Specifies whether to check the MD5 hash of the body of the response returned by the origin. This parameter takes effect only when the value of RedirectType is Mirror. When **MirrorCheckMd5** is set to true and the response returned by the origin includes the Content-Md5 header, OSS checks whether the MD5 hash of the obtained data matches the header value. If the MD5 hash of the obtained data does not match the header value, the obtained data is not stored in OSS. Default value: false. + MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` + + // The HTTP redirect code in the response. This parameter takes effect only when RedirectType is set to External or AliCDN. Valid values: 301, 302, and 307. + HttpRedirectCode *int64 `xml:"HttpRedirectCode"` + + // Is it transmitted transparently '/' to the source site + MirrorPassOriginalSlashes *bool `xml:"MirrorPassOriginalSlashes"` + + // This parameter plays the same role as PassQueryString and has a higher priority than PassQueryString. This parameter takes effect only when the value of RedirectType is Mirror. Default value: false.Valid values:* true * false + MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` +} + +type RoutingRules struct { + // The specified redirection rule or mirroring-based back-to-origin rule. You can specify up to 20 rules. + RoutingRules []RoutingRule `xml:"RoutingRule"` +} + +type GetBucketWebsiteRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketWebsiteResult struct { + // The containers of the website configuration. + WebsiteConfiguration *WebsiteConfiguration `output:"body,WebsiteConfiguration,xml"` + + ResultCommon +} + +// GetBucketWebsite Queries the static website hosting status and redirection rules configured for a bucket. +func (c *Client) GetBucketWebsite(ctx context.Context, request *GetBucketWebsiteRequest, optFns ...func(*Options)) (*GetBucketWebsiteResult, error) { + var err error + if request == nil { + request = &GetBucketWebsiteRequest{} + } + input := &OperationInput{ + OpName: "GetBucketWebsite", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "website": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"website"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketWebsiteResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketWebsiteRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + WebsiteConfiguration *WebsiteConfiguration `input:"body,WebsiteConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketWebsiteResult struct { + ResultCommon +} + +// PutBucketWebsite Enables the static website hosting mode for a bucket and configures redirection rules for the bucket. +func (c *Client) PutBucketWebsite(ctx context.Context, request *PutBucketWebsiteRequest, optFns ...func(*Options)) (*PutBucketWebsiteResult, error) { + var err error + if request == nil { + request = &PutBucketWebsiteRequest{} + } + input := &OperationInput{ + OpName: "PutBucketWebsite", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "website": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"website"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketWebsiteResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketWebsiteRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketWebsiteResult struct { + ResultCommon +} + +// DeleteBucketWebsite Disables the static website hosting mode and deletes the redirection rules for a bucket. +func (c *Client) DeleteBucketWebsite(ctx context.Context, request *DeleteBucketWebsiteRequest, optFns ...func(*Options)) (*DeleteBucketWebsiteResult, error) { + var err error + if request == nil { + request = &DeleteBucketWebsiteRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketWebsite", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "website": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"website"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteBucketWebsiteResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go new file mode 100644 index 000000000..48f40e21a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go @@ -0,0 +1,273 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type InitiateWormConfiguration struct { + // The number of days for which objects can be retained. + RetentionPeriodInDays *int32 `xml:"RetentionPeriodInDays"` +} + +type ExtendWormConfiguration struct { + // The number of days for which objects can be retained. + RetentionPeriodInDays *int32 `xml:"RetentionPeriodInDays"` +} + +type WormConfiguration struct { + // The ID of the retention policy.>Note If the specified retention policy ID that is used to query the retention policy configurations of the bucket does not exist, OSS returns the 404 error code. + WormId *string `xml:"WormId"` + + // The status of the retention policy. Valid values:- InProgress: indicates that the retention policy is in the InProgress state. By default, a retention policy is in the InProgress state after it is created. The policy remains in this state for 24 hours.- Locked: indicates that the retention policy is in the Locked state. + State BucketWormStateType `xml:"State"` + + // The number of days for which objects can be retained. + RetentionPeriodInDays *int32 `xml:"RetentionPeriodInDays"` + + // The time at which the retention policy was created. + CreationDate *string `xml:"CreationDate"` + + // The time at which the retention policy will be expired. + //ExpirationDate *string `xml:"ExpirationDate"` +} + +type InitiateBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + InitiateWormConfiguration *InitiateWormConfiguration `input:"body,InitiateWormConfiguration,xml,required"` + + RequestCommon +} + +type InitiateBucketWormResult struct { + // The ID of the retention policy. + WormId *string `output:"header,x-oss-worm-id"` + + ResultCommon +} + +// InitiateBucketWorm Creates a retention policy. +func (c *Client) InitiateBucketWorm(ctx context.Context, request *InitiateBucketWormRequest, optFns ...func(*Options)) (*InitiateBucketWormResult, error) { + var err error + if request == nil { + request = &InitiateBucketWormRequest{} + } + input := &OperationInput{ + OpName: "InitiateBucketWorm", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "worm": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"worm"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &InitiateBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalHeader, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type AbortBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type AbortBucketWormResult struct { + ResultCommon +} + +// AbortBucketWorm Deletes an unlocked retention policy for a bucket. +func (c *Client) AbortBucketWorm(ctx context.Context, request *AbortBucketWormRequest, optFns ...func(*Options)) (*AbortBucketWormResult, error) { + var err error + if request == nil { + request = &AbortBucketWormRequest{} + } + input := &OperationInput{ + OpName: "AbortBucketWorm", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "worm": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"worm"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AbortBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type CompleteBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the retention policy. + WormId *string `input:"query,wormId,required"` + + RequestCommon +} + +type CompleteBucketWormResult struct { + ResultCommon +} + +// CompleteBucketWorm Locks a retention policy. +func (c *Client) CompleteBucketWorm(ctx context.Context, request *CompleteBucketWormRequest, optFns ...func(*Options)) (*CompleteBucketWormResult, error) { + var err error + if request == nil { + request = &CompleteBucketWormRequest{} + } + input := &OperationInput{ + OpName: "CompleteBucketWorm", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"wormId"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CompleteBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type ExtendBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the retention policy.> If the ID of the retention policy that specifies the number of days for which objects can be retained does not exist, the HTTP status code 404 is returned. + WormId *string `input:"query,wormId,required"` + + // The container of the request body. + ExtendWormConfiguration *ExtendWormConfiguration `input:"body,ExtendWormConfiguration,xml,required"` + + RequestCommon +} + +type ExtendBucketWormResult struct { + ResultCommon +} + +// ExtendBucketWorm Extends the retention period of objects in a bucket for which a retention policy is locked. +func (c *Client) ExtendBucketWorm(ctx context.Context, request *ExtendBucketWormRequest, optFns ...func(*Options)) (*ExtendBucketWormResult, error) { + var err error + if request == nil { + request = &ExtendBucketWormRequest{} + } + input := &OperationInput{ + OpName: "ExtendBucketWorm", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "wormExtend": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"wormExtend", "wormId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ExtendBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketWormResult struct { + // The container that stores the information about retention policies of the bucket. + WormConfiguration *WormConfiguration `output:"body,WormConfiguration,xml"` + + ResultCommon +} + +// GetBucketWorm Queries the retention policy configured for a bucket. +func (c *Client) GetBucketWorm(ctx context.Context, request *GetBucketWormRequest, optFns ...func(*Options)) (*GetBucketWormResult, error) { + var err error + if request == nil { + request = &GetBucketWormRequest{} + } + input := &OperationInput{ + OpName: "GetBucketWorm", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "worm": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"worm"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go new file mode 100644 index 000000000..3ca4a62d2 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go @@ -0,0 +1,89 @@ +package oss + +import ( + "context" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type ListCloudBoxesRequest struct { + // The name of the bucket from which the list operation begins. + Marker *string `input:"query,marker"` + + // The maximum number of buckets that can be returned in the single query. + // Valid values: 1 to 1000. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of returned buckets must contain. + Prefix *string `input:"query,prefix"` + + RequestCommon +} + +type ListCloudBoxesResult struct { + // The prefix contained in the names of the returned bucket. + Prefix *string `xml:"Prefix"` + + // The name of the bucket after which the ListBuckets operation starts. + Marker *string `xml:"Marker"` // The marker filter. + + // The maximum number of buckets that can be returned for the request. + MaxKeys int32 `xml:"MaxKeys"` + + // Indicates whether all results are returned. + // true: Only part of the results are returned for the request. + // false: All results are returned for the request. + IsTruncated bool `xml:"IsTruncated"` + + // The marker for the next ListBuckets request, which can be used to return the remaining results. + NextMarker *string `xml:"NextMarker"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The container that stores information about cloud box bucket. + CloudBoxes []CloudBoxProperties `xml:"CloudBoxes>CloudBox"` + + ResultCommon +} + +type CloudBoxProperties struct { + ID *string `xml:"ID"` + Name *string `xml:"Name"` + Region *string `xml:"Region"` + ControlEndpoint *string `xml:"ControlEndpoint"` + DataEndpoint *string `xml:"DataEndpoint"` +} + +// ListCloudBoxes Lists cloud box buckets that belong to the current account. +func (c *Client) ListCloudBoxes(ctx context.Context, request *ListCloudBoxesRequest, optFns ...func(*Options)) (*ListCloudBoxesResult, error) { + var err error + if request == nil { + request = &ListCloudBoxesRequest{} + } + input := &OperationInput{ + OpName: "ListCloudBoxes", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cloudboxes": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"cloudboxes"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListCloudBoxesResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go new file mode 100644 index 000000000..e38bc8e53 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go @@ -0,0 +1,12 @@ +package oss + +import ( + "context" +) + +func (c *Client) InvokeOperation(ctx context.Context, input *OperationInput, optFns ...func(*Options)) (*OperationOutput, error) { + if err := validateInput(input); err != nil { + return nil, err + } + return c.invokeOperation(ctx, input, optFns) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go new file mode 100644 index 000000000..52ce1cc23 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go @@ -0,0 +1,2572 @@ +package oss + +import ( + "context" + "fmt" + "hash" + "io" + "sort" + "strconv" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PutObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength *int64 `input:"header,Content-Length"` + + // The MD5 hash of the object that you want to upload. + ContentMD5 *string `input:"header,Content-MD5"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // Specifies whether the object that is uploaded by calling the PutObject operation overwrites the existing object that has the same name. + // When versioning is enabled or suspended for the bucket to which you want to upload the object, the x-oss-forbid-overwrite header does not take effect. In this case, the object that is uploaded by calling the PutObject operation overwrites the existing object that has the same name. Default value: false. + // If you do not specify the x-oss-forbid-overwrite header or you set the x-oss-forbid-overwrite header to false, the object that is uploaded by calling the PutObject operation overwrites the existing object that has the same name. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name cannot be overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The encryption method on the server side when an object is created. Valid values: AES256, KMS, SM4. + // If you specify the header, the header is returned in the response. + // OSS uses the method that is specified by this header to encrypt the uploaded object. + // When you download the encrypted object, the x-oss-server-side-encryption header is included in the response and the header value is set to the algorithm that is used to encrypt the object. + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // Specify the encryption algorithm for the object. Valid values: SM4. + // If this option is not specified, it indicates that the Object uses AES256 encryption algorithm. + // This option is only valid when x-oss-ser-side-encryption is KMS. + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // A callback parameter is a Base64-encoded string that contains multiple fields in the JSON format. + Callback *string `input:"header,x-oss-callback"` + + // Configure custom parameters by using the callback-var parameter. + CallbackVar *string `input:"header,x-oss-callback-var"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Object data. + Body io.Reader `input:"body,nop"` + + // Progress callback function + ProgressFn ProgressFunc + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type PutObjectResult struct { + // Content-Md5 for the uploaded object. + ContentMD5 *string `output:"header,Content-MD5"` + + // Entity tag for the uploaded object. + ETag *string `output:"header,ETag"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + CallbackResult map[string]any + + ResultCommon +} + +// PutObject Uploads a object. +func (c *Client) PutObject(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) { + var err error + if request == nil { + request = &PutObjectRequest{} + } + input := &OperationInput{ + OpName: "PutObject", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + } + + marshalFns := []func(any, *OperationInput) error{ + addProgress, + c.updateContentType, + c.addCrcCheck, + } + unmarshalFns := []func(result any, output *OperationOutput) error{ + unmarshalHeader, + } + + if request.Callback != nil { + marshalFns = append(marshalFns, addCallback) + unmarshalFns = append(unmarshalFns, unmarshalCallbackBody) + } else { + unmarshalFns = append(unmarshalFns, discardBody) + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type HTTPRange struct { + Offset int64 + Count int64 +} + +func (r HTTPRange) FormatHTTPRange() *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} + +type HTTPContentRange struct { + Offset int64 + Count int64 + Total int64 +} + +func (r HTTPContentRange) FormatHTTPContentRange() *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes %v-%s/%s", r.Offset, endOffset, strconv.FormatInt(r.Total, 10)) + return &dataRange +} + +type GetObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // If the ETag specified in the request matches the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + IfMatch *string `input:"header,If-Match"` + + // If the ETag specified in the request does not match the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + IfNoneMatch *string `input:"header,If-None-Match"` + + // If the time specified in this header is earlier than the object modified time or is invalid, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,If-Modified-Since"` + + // If the time specified in this header is the same as or later than the object modified time, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,If-Unmodified-Since"` + + // The content range of the object to be returned. + // If the value of Range is valid, the total size of the object and the content range are returned. + // For example, Content-Range: bytes 0~9/44 indicates that the total size of the object is 44 bytes, + // and the range of data returned is the first 10 bytes. + // However, if the value of Range is invalid, the entire object is returned, + // and the response does not include the Content-Range parameter. + Range *string `input:"header,Range"` + + // Specify standard behaviors to download data by range + // If the value is "standard", the download behavior is modified when the specified range is not within the valid range. + // For an object whose size is 1,000 bytes: + // 1) If you set Range: bytes to 500-2000, the value at the end of the range is invalid. + // In this case, OSS returns HTTP status code 206 and the data that is within the range of byte 500 to byte 999. + // 2) If you set Range: bytes to 1000-2000, the value at the start of the range is invalid. + // In this case, OSS returns HTTP status code 416 and the InvalidRange error code. + RangeBehavior *string `input:"header,x-oss-range-behavior"` + + // The cache-control header to be returned in the response. + ResponseCacheControl *string `input:"query,response-cache-control"` + + // The content-disposition header to be returned in the response. + ResponseContentDisposition *string `input:"query,response-content-disposition"` + + // The content-encoding header to be returned in the response. + ResponseContentEncoding *string `input:"query,response-content-encoding"` + + // The content-language header to be returned in the response. + ResponseContentLanguage *string `input:"query,response-content-language"` + + // The content-type header to be returned in the response. + ResponseContentType *string `input:"query,response-content-type"` + + // The expires header to be returned in the response. + ResponseExpires *string `input:"query,response-expires"` + + // VersionId used to reference a specific version of the object. + VersionId *string `input:"query,versionId"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Progress callback function + ProgressFn ProgressFunc + + // Image processing parameters + Process *string `input:"query,x-oss-process"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectResult struct { + // Size of the body in bytes. -1 indicates that the Content-Length dose not exist. + ContentLength int64 `output:"header,Content-Length"` + + // The portion of the object returned in the response. + ContentRange *string `output:"header,Content-Range"` + + // A standard MIME type describing the format of the object data. + ContentType *string `output:"header,Content-Type"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `output:"header,ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `output:"header,Last-Modified,time"` + + // The storage class of the object. + StorageClass *string `output:"header,x-oss-storage-class"` + + // Content-Md5 for the uploaded object. + ContentMD5 *string `output:"header,Content-MD5"` + + // A map of metadata to store with the object. + Metadata map[string]string `output:"header,x-oss-meta-,usermeta"` + + // If the requested object is encrypted by using a server-side encryption algorithm based on entropy encoding, + // OSS automatically decrypts the object and returns the decrypted object after OSS receives the GetObject request. + // The x-oss-server-side-encryption header is included in the response to indicate + // the encryption algorithm used to encrypt the object on the server. + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The type of the object. + ObjectType *string `output:"header,x-oss-object-type"` + + // The position for the next append operation. + // If the type of the object is Appendable, this header is included in the response. + NextAppendPosition *string `output:"header,x-oss-next-append-position"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The lifecycle information about the object. + // If lifecycle rules are configured for the object, this header is included in the response. + // This header contains the following parameters: expiry-date that indicates the expiration time of the object, + // and rule-id that indicates the ID of the matched lifecycle rule. + Expiration *string `output:"header,x-oss-expiration"` + + // The status of the object when you restore an object. + // If the storage class of the bucket is Archive and a RestoreObject request is submitted, + Restore *string `output:"header,x-oss-restore"` + + // The result of an event notification that is triggered for the object. + ProcessStatus *string `output:"header,x-oss-process-status"` + + // The number of tags added to the object. + // This header is included in the response only when you have read permissions on tags. + TaggingCount int32 `output:"header,x-oss-tagging-count"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + DeleteMarker bool `output:"header,x-oss-delete-marker"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // Object data. + Body io.ReadCloser + + ResultCommon +} + +func (c *Client) GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) { + var err error + if request == nil { + request = &GetObjectRequest{} + } + input := &OperationInput{ + OpName: "GetObject", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Body: output.Body, + } + if err = c.unmarshalOutput(result, output, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CopyObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The name of the source bucket. + SourceBucket *string `input:"nop,bucket"` + + // The path of the source object. + SourceKey *string `input:"nop,key,required"` + + // The version ID of the source object. + SourceVersionId *string `input:"nop,versionId"` + + // Specifies whether the CopyObject operation overwrites objects with the same name. The x-oss-forbid-overwrite request header does not take effect when versioning is enabled or suspended for the destination bucket. In this case, the CopyObject operation overwrites the existing object that has the same name as the destination object. + // If you do not specify the x-oss-forbid-overwrite header or set the header to false, an existing object that has the same name as the object that you want to copy is overwritten. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name as the object that you want to copy is not overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // If the ETag specified in the request matches the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + IfMatch *string `input:"header,x-oss-copy-source-if-match"` + + // If the ETag specified in the request does not match the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + IfNoneMatch *string `input:"header,x-oss-copy-source-if-none-match"` + + // If the time specified in this header is earlier than the object modified time or is invalid, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,x-oss-copy-source-if-modified-since"` + + // If the time specified in this header is the same as or later than the object modified time, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,x-oss-copy-source-if-unmodified-since"` + + // The method that is used to configure the metadata of the destination object. + // COPY (default): The metadata of the source object is copied to the destination object. + // The configurations of the x-oss-server-side-encryption + // header of the source object are not copied to the destination object. + // The x-oss-server-side-encryption header in the CopyObject request specifies + // the method used to encrypt the destination object. + // REPLACE: The metadata specified in the request is used as the metadata of the destination object. + MetadataDirective *string `input:"header,x-oss-metadata-directive"` + + // The entropy coding-based encryption algorithm that OSS uses to encrypt an object when you create the object. + // Valid values: AES256, KMS, SM4 + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. Invalid value: SM4 + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // The method that is used to configure tags for the destination object. + // Valid values: Copy (default): The tags of the source object are copied to the destination object. + // Replace: The tags specified in the request are configured for the destination object. + TaggingDirective *string `input:"header,x-oss-tagging-directive"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Progress callback function, it works in Copier.Copy only. + ProgressFn ProgressFunc + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type CopyObjectResult struct { + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The version ID of the source object. + SourceVersionId *string `output:"header,x-oss-copy-source-version-id"` + + // If the requested object is encrypted by using a server-side encryption algorithm based on entropy encoding, + // OSS automatically decrypts the object and returns the decrypted object after OSS receives the GetObject request. + // The x-oss-server-side-encryption header is included in the response to indicate + // the encryption algorithm used to encrypt the object on the server. + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `xml:"ETag"` + + ResultCommon +} + +// CopyObject Copies objects within a bucket or between buckets in the same region +func (c *Client) CopyObject(ctx context.Context, request *CopyObjectRequest, optFns ...func(*Options)) (*CopyObjectResult, error) { + var err error + if request == nil { + request = &CopyObjectRequest{} + } + + input := &OperationInput{ + OpName: "CopyObject", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + "x-oss-copy-source": encodeSourceObject(request), + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CopyObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type AppendObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The position from which the AppendObject operation starts. + // Each time an AppendObject operation succeeds, the x-oss-next-append-position header is included in + // the response to specify the position from which the next AppendObject operation starts. + Position *int64 `input:"query,position,required"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength *int64 `input:"header,Content-Length"` + + // The MD5 hash of the object that you want to upload. + ContentMD5 *string `input:"header,Content-MD5"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // Specifies whether the AppendObject operation overwrites objects with the same name. The x-oss-forbid-overwrite request header does not take effect when versioning is enabled or suspended for the destination bucket. In this case, the AppendObject operation overwrites the existing object that has the same name as the destination object. + // If you do not specify the x-oss-forbid-overwrite header or set the header to false, an existing object that has the same name as the object that you want to copy is overwritten. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name as the object that you want to copy is not overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The method used to encrypt objects on the specified OSS server.Valid values: AES256, KMS, SM4 + // AES256: Keys managed by OSS are used for encryption and decryption (SSE-OSS). + // KMS: Keys managed by Key Management Service (KMS) are used for encryption and decryption. + // SM4: The SM4 block cipher algorithm is used for encryption and decryption. + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // Specify the encryption algorithm for the object. Valid values: SM4. + // If this option is not specified, it indicates that the Object uses AES256 encryption algorithm. + // This option is only valid when x-oss-ser-side-encryption is KMS. + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Object data. + Body io.Reader `input:"body,nop"` + + // Specify the initial value of CRC64. If not set, the crc check is ignored. + InitHashCRC64 *string + + // Progress callback function + ProgressFn ProgressFunc + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type AppendObjectResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The position that must be provided in the next request, which is the current length of the object. + NextPosition int64 `output:"header,x-oss-next-append-position"` + + // The encryption method on the server side when an object is created. + // Valid values: AES256, KMS, SM4 + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + ResultCommon +} + +// AppendObject Uploads an object by appending the object to an existing object. +// Objects created by using the AppendObject operation are appendable objects. +func (c *Client) AppendObject(ctx context.Context, request *AppendObjectRequest, optFns ...func(*Options)) (*AppendObjectResult, error) { + var err error + if request == nil { + request = &AppendObjectRequest{} + } + input := &OperationInput{ + OpName: "AppendObject", + Method: "POST", + Parameters: map[string]string{"append": ""}, + Bucket: request.Bucket, + Key: request.Key, + } + + marshalFns := []func(any, *OperationInput) error{ + addProgress, + c.updateContentType, + } + + unmarshalFns := []func(any, *OperationOutput) error{ + discardBody, + unmarshalHeader, + } + + // AppendObject is not idempotent, and cannot be retried + if c.hasFeature(FeatureEnableCRC64CheckUpload) && request.InitHashCRC64 != nil { + var init uint64 + init, err = strconv.ParseUint(ToString(request.InitHashCRC64), 10, 64) + if err != nil { + return nil, NewErrParamInvalid("request.InitHashCRC64") + } + var w io.Writer = NewCRC64(init) + input.OpMetadata.Add(OpMetaKeyRequestBodyTracker, w) + unmarshalFns = append(unmarshalFns, func(result any, output *OperationOutput) error { + return checkResponseHeaderCRC64(fmt.Sprint(w.(hash.Hash64).Sum64()), output.Headers) + }) + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AppendObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type DeleteObjectResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + DeleteMarker bool `output:"header,x-oss-delete-marker"` + + ResultCommon +} + +// DeleteObject Deletes an object. +func (c *Client) DeleteObject(ctx context.Context, request *DeleteObjectRequest, optFns ...func(*Options)) (*DeleteObjectResult, error) { + var err error + if request == nil { + request = &DeleteObjectRequest{} + } + input := &OperationInput{ + OpName: "DeleteObject", + Method: "DELETE", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteObjectResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteMultipleObjectsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength int64 `input:"header,Content-Length"` + + // The container that stores information about you want to delete objects. + Objects []DeleteObject `input:"nop,objects,required"` + + // Specifies whether to enable the Quiet return mode. + // The DeleteMultipleObjects operation provides the following return modes: Valid value: true,false + Quiet bool + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type DeleteObject struct { + // The name of the object that you want to delete. + Key *string `xml:"Key"` + + // The version ID of the object that you want to delete. + VersionId *string `xml:"VersionId"` +} + +type DeleteMultipleObjectsResult struct { + // The container that stores information about the deleted objects. + DeletedObjects []DeletedInfo `xml:"Deleted"` + + // The encoding type of the name of the deleted object in the response. + // If encoding-type is specified in the request, the object name is encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + ResultCommon +} + +type DeletedInfo struct { + // The name of the deleted object. + Key *string `xml:"Key"` + + // The version ID of the object that you deleted. + VersionId *string `xml:"VersionId"` + + // Indicates whether the deleted version is a delete marker. + DeleteMarker bool `xml:"DeleteMarker"` + + // The version ID of the delete marker. + DeleteMarkerVersionId *string `xml:"DeleteMarkerVersionId"` +} + +// DeleteMultipleObjects Deletes multiple objects from a bucket. +func (c *Client) DeleteMultipleObjects(ctx context.Context, request *DeleteMultipleObjectsRequest, optFns ...func(*Options)) (*DeleteMultipleObjectsResult, error) { + var err error + if request == nil { + request = &DeleteMultipleObjectsRequest{} + } + input := &OperationInput{ + OpName: "DeleteMultipleObjects", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "delete": "", + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, marshalDeleteObjects, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteMultipleObjectsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type HeadObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // If the ETag specified in the request matches the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + IfMatch *string `input:"header,If-Match"` + + // If the ETag specified in the request does not match the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + IfNoneMatch *string `input:"header,If-None-Match"` + + // If the time specified in this header is earlier than the object modified time or is invalid, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,If-Modified-Since"` + + // If the time specified in this header is the same as or later than the object modified time, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,If-Unmodified-Since"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type HeadObjectResult struct { + // Size of the body in bytes. -1 indicates that the Content-Length dose not exist. + ContentLength int64 `output:"header,Content-Length"` + + // A standard MIME type describing the format of the object data. + ContentType *string `output:"header,Content-Type"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `output:"header,ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `output:"header,Last-Modified,time"` + + // The storage class of the object. + StorageClass *string `output:"header,x-oss-storage-class"` + + // Content-Md5 for the uploaded object. + ContentMD5 *string `output:"header,Content-MD5"` + + // A map of metadata to store with the object. + Metadata map[string]string `output:"header,x-oss-meta-,usermeta"` + + // If the requested object is encrypted by using a server-side encryption algorithm based on entropy encoding, + // OSS automatically decrypts the object and returns the decrypted object after OSS receives the GetObject request. + // The x-oss-server-side-encryption header is included in the response to indicate + // the encryption algorithm used to encrypt the object on the server. + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The type of the object. + ObjectType *string `output:"header,x-oss-object-type"` + + // The position for the next append operation. + // If the type of the object is Appendable, this header is included in the response. + NextAppendPosition *string `output:"header,x-oss-next-append-position"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The lifecycle information about the object. + // If lifecycle rules are configured for the object, this header is included in the response. + // This header contains the following parameters: expiry-date that indicates the expiration time of the object, + // and rule-id that indicates the ID of the matched lifecycle rule. + Expiration *string `output:"header,x-oss-expiration"` + + // The status of the object when you restore an object. + // If the storage class of the bucket is Archive and a RestoreObject request is submitted, + Restore *string `output:"header,x-oss-restore"` + + // The result of an event notification that is triggered for the object. + ProcessStatus *string `output:"header,x-oss-process-status"` + + // The requester. This header is included in the response if the pay-by-requester mode + // is enabled for the bucket and the requester is not the bucket owner. The value of this header is requester + RequestCharged *string `output:"header,x-oss-request-charged"` + + // The number of tags added to the object. + // This header is included in the response only when you have read permissions on tags. + TaggingCount int32 `output:"header,x-oss-tagging-count"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The origins allowed for cross-origin resource sharing (CORS). + // If a CORS rule is configured for the bucket that stores the object and the Origin header + // in the request meets the CORS rule, this header is included in the response. + AllowOrigin *string `output:"header,Access-Control-Allow-Origin"` + + // The methods allowed for CORS. If a CORS rule is configured for the bucket that stores the object + // and the Access-Control-Request-Method header in the request meets the CORS rule, this header is included in the response. + AllowMethods *string `output:"header,Access-Control-Allow-Methods"` + + // The maximum caching period for CORS. If a CORS rule is configured for the bucket that stores + // the object and the request meets the CORS rule, this header is included in the response. + AllowAge *string `output:"header,Access-Control-Allow-Age"` + + // The headers allowed for CORS. If a CORS rule is configured for the bucket that stores + // the object and the request meets the CORS rule, this header is included in the response + AllowHeaders *string `output:"header,Access-Control-Allow-Headers"` + + // The headers that can be accessed by JavaScript applications on the client. + // If a CORS rule is configured for the bucket that stores the object and the request meets + // the CORS rule, this header is included in the response + ExposeHeaders *string `output:"header,Access-Control-Expose-Headers"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `output:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `output:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `output:"header,Content-Encoding"` + + // The expiration time of the cache in UTC. + Expires *string `output:"header,Expires"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `output:"header,x-oss-transition-time,time"` + + ResultCommon +} + +// HeadObject Queries information about all objects in a bucket. +func (c *Client) HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) { + var err error + if request == nil { + request = &HeadObjectRequest{} + } + input := &OperationInput{ + OpName: "HeadObject", + Method: "HEAD", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &HeadObjectResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetObjectMetaRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectMetaResult struct { + // Size of the body in bytes. -1 indicates that the Content-Length dose not exist. + ContentLength int64 `output:"header,Content-Length"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `output:"header,ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `output:"header,Last-Modified,time"` + + // The time when the object was last accessed. + LastAccessTime *time.Time `output:"header,x-oss-last-access-time,time"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `output:"header,x-oss-transition-time,time"` + + ResultCommon +} + +// GetObjectMeta Queries the metadata of an object, including ETag, Size, and LastModified. +// The content of the object is not returned. +func (c *Client) GetObjectMeta(ctx context.Context, request *GetObjectMetaRequest, optFns ...func(*Options)) (*GetObjectMetaResult, error) { + var err error + if request == nil { + request = &GetObjectMetaRequest{} + } + input := &OperationInput{ + OpName: "GetObjectMeta", + Method: "HEAD", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "objectMeta": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetObjectMetaResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type RestoreObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // The container that stores information about the RestoreObject request. + RestoreRequest *RestoreRequest `input:"body,RestoreRequest,xml"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type RestoreRequest struct { + // The duration within which the restored object remains in the restored state. + Days int32 `xml:"Days"` + + // The restoration priority of Cold Archive or Deep Cold Archive objects. Valid values:Expedited,Standard,Bulk + Tier *string `xml:"JobParameters>Tier"` +} + +type RestoreObjectResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The restoration priority. + // This header is displayed only for the Cold Archive or Deep Cold Archive object in the restored state. + RestorePriority *string `output:"header,x-oss-object-restore-priority"` + + ResultCommon +} + +// RestoreObject Restores Archive, Cold Archive, or Deep Cold Archive objects. +func (c *Client) RestoreObject(ctx context.Context, request *RestoreObjectRequest, optFns ...func(*Options)) (*RestoreObjectResult, error) { + var err error + if request == nil { + request = &RestoreObjectRequest{} + } + input := &OperationInput{ + OpName: "RestoreObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "restore": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &RestoreObjectResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutObjectAclRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type PutObjectAclResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// PutObjectAcl You can call this operation to modify the access control list (ACL) of an object. +func (c *Client) PutObjectAcl(ctx context.Context, request *PutObjectAclRequest, optFns ...func(*Options)) (*PutObjectAclResult, error) { + var err error + if request == nil { + request = &PutObjectAclRequest{} + } + input := &OperationInput{ + OpName: "PutObjectAcl", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "acl": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutObjectAclResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetObjectAclRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectAclResult struct { + // The ACL of the object. Default value: default. + ACL *string `xml:"AccessControlList>Grant"` + + // The container that stores information about the object owner. + Owner *Owner `xml:"Owner"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// GetObjectAcl Queries the access control list (ACL) of an object in a bucket. +func (c *Client) GetObjectAcl(ctx context.Context, request *GetObjectAclRequest, optFns ...func(*Options)) (*GetObjectAclResult, error) { + var err error + if request == nil { + request = &GetObjectAclRequest{} + } + input := &OperationInput{ + OpName: "GetObjectAcl", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "acl": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetObjectAclResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type InitiateMultipartUploadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // Specifies whether the InitiateMultipartUpload operation overwrites the existing object that has the same name as the object that you want to upload. If versioning is enabled or suspended for the bucket to which you want to upload the object, the x-oss-forbid-overwrite header does not take effect. As a result, the object that is uploaded by calling the InitiateMultipartUpload operation overwrites the existing object that has the same name. + // If you do not specify the x-oss-forbid-overwrite header or you set the x-oss-forbid-overwrite header to false, the operation overwrites an existing object that has the same name. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name cannot be overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The server-side encryption method that is used to encrypt each part of the object that you want to upload.Valid values: AES256, KMS, SM4. + // If you specify this header in the request, this header is included in the response. + // OSS uses the method specified by this header to encrypt each uploaded part. + // When you download the object, the x-oss-server-side-encryption header is included in the response and the header value is set to the method that is used to encrypt the object. + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. Valid values: SM4 + // If this option is not specified, it indicates that the Object uses AES256 encryption algorithm. + // This option is only valid when x-oss-ser-side-encryption is KMS. + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // The total size when using client side encryption, only valid in EncryptionClient + CSEDataSize *int64 + + // The part size when using client side encryption, only valid in EncryptionClient + // CSEPartSize must aligned to the secret iv length + CSEPartSize *int64 + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + // To disable the feature that Content-Type is automatically added based on the object name if not specified. + DisableAutoDetectMimeType bool + + RequestCommon +} + +type InitiateMultipartUploadResult struct { + // The name of the bucket to which the object is uploaded by the multipart upload task. + Bucket *string `xml:"Bucket"` + + // The name of the object that is uploaded by the multipart upload task. + Key *string `xml:"Key"` + + // The upload ID that uniquely identifies the multipart upload task. + UploadId *string `xml:"UploadId"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `xml:"EncodingType"` + + // The encryption context for multipart upload when using client side encryption, only valid in EncryptionClient + CSEMultiPartContext *EncryptionMultiPartContext + + ResultCommon +} + +// InitiateMultipartUpload Initiates a multipart upload task before you can upload data in parts to Object Storage Service (OSS). +func (c *Client) InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) { + var err error + if request == nil { + request = &InitiateMultipartUploadRequest{} + } + input := &OperationInput{ + OpName: "InitiateMultipartUpload", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "uploads": "", + "encoding-type": "url", + }, + } + + marshalFns := []func(any, *OperationInput) error{ + updateContentMd5, + } + if !request.DisableAutoDetectMimeType { + marshalFns = append(marshalFns, c.updateContentType) + } + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &InitiateMultipartUploadResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type UploadPartRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Each uploaded part is identified by a number. + // Value: 1-10000 + //The size limit of a single part is between 100 KB and 5 GB. + PartNumber int32 `input:"query,partNumber,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The MD5 hash of the object that you want to upload. + ContentMD5 *string `input:"header,Content-MD5"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Object data. + Body io.Reader `input:"body,nop"` + + // Progress callback function + ProgressFn ProgressFunc + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength *int64 `input:"header,Content-Length"` + + // The encryption context for multipart upload when using client side encryption, only valid in EncryptionClient + CSEMultiPartContext *EncryptionMultiPartContext + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type UploadPartResult struct { + // Entity tag for the uploaded part. + ETag *string `output:"header,ETag"` + + // The MD5 hash of the part that you want to upload. + ContentMD5 *string `output:"header,Content-MD5"` + + // The 64-bit CRC value of the part. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + ResultCommon +} + +// UploadPart Call the UploadPart interface to upload data in blocks (parts) based on the specified Object name and uploadId. +func (c *Client) UploadPart(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) { + var err error + if request == nil { + request = &UploadPartRequest{} + } + input := &OperationInput{ + OpName: "UploadPart", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + } + + marshalFns := []func(any, *OperationInput) error{ + addProgress, + c.addCrcCheck, + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &UploadPartResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type UploadPartCopyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Each uploaded part is identified by a number. + // Value: 1-10000 + //The size limit of a single part is between 100 KB and 5 GB. + PartNumber int32 `input:"query,partNumber,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The name of the source bucket. + SourceBucket *string `input:"nop,bucket"` + + // The path of the source object. + SourceKey *string `input:"nop,key,required"` + + // The version ID of the source object. + SourceVersionId *string `input:"nop,versionId"` + + // The range of bytes to copy data from the source object. + Range *string `input:"header,x-oss-copy-source-range"` + + // The copy operation condition. If the ETag value of the source object is + // the same as the ETag value provided by the user, OSS copies data. Otherwise, + // OSS returns 412 Precondition Failed. + IfMatch *string `input:"header,x-oss-copy-source-if-match"` + + // The object transfer condition. If the input ETag value does not match the ETag value of the object + // the system transfers the object normally and returns 200 OK. Otherwise, OSS returns 304 Not Modified. + IfNoneMatch *string `input:"header,x-oss-copy-source-if-none-match"` + + // The object transfer condition. If the specified time is earlier than the actual modified time of the object, + // the system transfers the object normally and returns 200 OK. Otherwise, OSS returns 304 Not Modified. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,x-oss-copy-source-if-modified-since"` + + // The object transfer condition. If the specified time is the same as or later than the actual modified time of the object, + // OSS transfers the object normally and returns 200 OK. Otherwise, OSS returns 412 Precondition Failed. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,x-oss-copy-source-if-unmodified-since"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type UploadPartCopyResult struct { + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // Entity tag for the uploaded part. + ETag *string `xml:"ETag"` + + // The version ID of the source object. + VersionId *string `output:"header,x-oss-copy-source-version-id"` + + ResultCommon +} + +// UploadPartCopy You can call this operation to copy data from an existing object to upload a part by adding a x-oss-copy-request header to UploadPart. +func (c *Client) UploadPartCopy(ctx context.Context, request *UploadPartCopyRequest, optFns ...func(*Options)) (*UploadPartCopyResult, error) { + var err error + if request == nil { + request = &UploadPartCopyRequest{} + } + input := &OperationInput{ + OpName: "UploadPartCopy", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + "x-oss-copy-source": encodeSourceObject(request), + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &UploadPartCopyResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CompleteMultipartUploadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // Specifies whether the object with the same object name is overwritten when you call the CompleteMultipartUpload operation. + // If x-oss-forbid-overwrite is not specified or set to false, existing objects can be overwritten by objects that have the same names. + // If x-oss-forbid-overwrite is set to true, existing objects cannot be overwritten by objects that have the same names. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // Specifies whether to list all parts that are uploaded by using the current upload ID. Valid value: yes + CompleteAll *string `input:"header,x-oss-complete-all"` + + // The container that stores the content of the CompleteMultipartUpload + CompleteMultipartUpload *CompleteMultipartUpload `input:"body,CompleteMultipartUpload,xml"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // A callback parameter is a Base64-encoded string that contains multiple fields in the JSON format. + Callback *string `input:"header,x-oss-callback"` + + // Configure custom parameters by using the callback-var parameter. + CallbackVar *string `input:"header,x-oss-callback-var"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type UploadPart struct { + // The number of parts. + PartNumber int32 `xml:"PartNumber"` + + // The ETag values that are returned by OSS after parts are uploaded. + ETag *string `xml:"ETag"` +} + +type CompleteMultipartUpload struct { + Parts []UploadPart `xml:"Part"` +} +type UploadParts []UploadPart + +func (slice UploadParts) Len() int { + return len(slice) +} +func (slice UploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} +func (slice UploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +type CompleteMultipartUploadResult struct { + // The version ID of the source object. + VersionId *string `output:"header,x-oss-version-id"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The encoding type of the name of the deleted object in the response. + // If encoding-type is specified in the request, the object name is encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + // The URL that is used to access the uploaded object. + Location *string `xml:"Location"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The name of the uploaded object. + Key *string `xml:"Key"` + + // The ETag that is generated when an object is created. + // ETags are used to identify the content of objects. + ETag *string `xml:"ETag"` + + CallbackResult map[string]any + + ResultCommon +} + +// CompleteMultipartUpload Completes the multipart upload task of an object after all parts of the object are uploaded. +func (c *Client) CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) { + var err error + if request == nil { + request = &CompleteMultipartUploadRequest{} + } + input := &OperationInput{ + OpName: "CompleteMultipartUpload", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "encoding-type": "url", + }, + } + + if request.CompleteMultipartUpload != nil && len(request.CompleteMultipartUpload.Parts) > 0 { + sort.Sort(UploadParts(request.CompleteMultipartUpload.Parts)) + } + + marshalFns := []func(any, *OperationInput) error{ + updateContentMd5, + } + unmarshalFns := []func(result any, output *OperationOutput) error{ + unmarshalHeader, + } + + if request.Callback != nil { + marshalFns = append(marshalFns, addCallback) + unmarshalFns = append(unmarshalFns, unmarshalCallbackBody) + } else { + unmarshalFns = append(unmarshalFns, unmarshalBodyXml, unmarshalEncodeType) + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CompleteMultipartUploadResult{} + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type AbortMultipartUploadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type AbortMultipartUploadResult struct { + ResultCommon +} + +// AbortMultipartUpload Cancels a multipart upload task and deletes the parts uploaded in the task. +func (c *Client) AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) { + var err error + if request == nil { + request = &AbortMultipartUploadRequest{} + } + input := &OperationInput{ + OpName: "AbortMultipartUpload", + Method: "DELETE", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AbortMultipartUploadResult{} + if err = c.unmarshalOutput(result, output, discardBody); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListMultipartUploadsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // This parameter is used together with the upload-id-marker parameter to specify + // the position from which the next list begins. + KeyMarker *string `input:"query,key-marker"` + + // The maximum number of multipart upload tasks that can be returned for the current request. + // Default value: 1000. Maximum value: 1000. + MaxUploads int32 `input:"query,max-uploads"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // The upload ID of the multipart upload task after which the list begins. + // This parameter is used together with the key-marker parameter. + UploadIdMarker *string `input:"query,upload-id-marker"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListMultipartUploadsResult struct { + // The method used to encode the object name in the response. + // If encoding-type is specified in the request, values of those elements including + // Delimiter, KeyMarker, Prefix, NextKeyMarker, and Key are encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The name of the object that corresponds to the multipart upload task after which the list begins. + KeyMarker *string `xml:"KeyMarker"` + + // The upload ID of the multipart upload task after which the list begins. + UploadIdMarker *string `xml:"UploadIdMarker"` + + // The upload ID of the multipart upload task after which the list begins. + NextKeyMarker *string `xml:"NextKeyMarker"` + + // The NextUploadMarker value that is used for the UploadMarker value in + // the next request if the response does not contain all required results. + NextUploadIdMarker *string `xml:"NextUploadIdMarker"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // The maximum number of multipart upload tasks returned by OSS. + MaxUploads int32 `xml:"MaxUploads"` + + // Indicates whether the list of multipart upload tasks returned in the response is truncated. + // true: Only part of the results are returned this time. + // false: All results are returned. + IsTruncated bool `xml:"IsTruncated"` + + Uploads []Upload `xml:"Upload"` + + ResultCommon +} + +type Upload struct { + // The name of the object for which a multipart upload task was initiated. + Key *string `xml:"Key"` + + // The ID of the multipart upload task + UploadId *string `xml:"UploadId"` + + // The time when the multipart upload task was initialized. + Initiated *time.Time `xml:"Initiated"` +} + +// ListMultipartUploads Lists all multipart upload tasks in progress. The tasks are not completed or canceled. +func (c *Client) ListMultipartUploads(ctx context.Context, request *ListMultipartUploadsRequest, optFns ...func(*Options)) (*ListMultipartUploadsResult, error) { + var err error + if request == nil { + request = &ListMultipartUploadsRequest{} + } + input := &OperationInput{ + OpName: "ListMultipartUploads", + Method: "GET", + Bucket: request.Bucket, + Parameters: map[string]string{ + "encoding-type": "url", + "uploads": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListMultipartUploadsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListPartsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The maximum number of parts that can be returned by OSS. + // Default value: 1000. Maximum value: 1000. + MaxParts int32 `input:"query,max-parts"` + + // The position from which the list starts. + // All parts whose part numbers are greater than the value of this parameter are listed. + PartNumberMarker int32 `input:"query,part-number-marker"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListPartsResult struct { + // The method used to encode the object name in the response. + // If encoding-type is specified in the request, values of those elements including + // Delimiter, KeyMarker, Prefix, NextKeyMarker, and Key are encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The name of the object that corresponds to the multipart upload task after which the list begins. + Key *string `xml:"Key"` + + // The ID of the upload task. + UploadId *string `xml:"UploadId"` + + // The position from which the list starts. + // All parts whose part numbers are greater than the value of this parameter are listed. + PartNumberMarker int32 `xml:"PartNumberMarker"` + + // The NextPartNumberMarker value that is used for the PartNumberMarker value in a subsequent + // request when the response does not contain all required results. + NextPartNumberMarker int32 `xml:"NextPartNumberMarker"` + + // he maximum number of parts in the response. + MaxParts int32 `xml:"MaxParts"` + + // Indicates whether the list of parts returned in the response has been truncated. + // true: Only part of the results are returned this time. + // false: All results are returned. + IsTruncated bool `xml:"IsTruncated"` + + // The storage class of the object. + StorageClass *string `xml:"StorageClass"` + + // The encrypted data key. + // The encrypted data key is a string encrypted by a customer master key and encoded in Base64. + // Only available in client-side encryption + ClientEncryptionKey *string `xml:"ClientEncryptionKey"` + + // The initial value that is randomly generated for data encryption. + // The initial value is is a string encrypted by a customer master key and encoded in Base64. + // Only available in client-side encryption + ClientEncryptionStart *string `xml:"ClientEncryptionStart"` + + // The algorithm used to encrypt data. + // Only available in client-side encryption + ClientEncryptionCekAlg *string `xml:"ClientEncryptionCekAlg"` + + // The algorithm used to encrypt the data key. + // Only available in client-side encryption + ClientEncryptionWrapAlg *string `xml:"ClientEncryptionWrapAlg"` + + // The total size of the data to encrypt for multipart upload when init_multipart is called. + // Only available in client-side encryption + ClientEncryptionDataSize *int64 `xml:"ClientEncryptionDataSize"` + + // The size of each part to encrypt for multipart upload when init_multipart is called. + // Only available in client-side encryption + ClientEncryptionPartSize *int64 `xml:"ClientEncryptionPartSize"` + + Parts []Part `xml:"Part"` + + ResultCommon +} + +type Part struct { + // The number that identifies a part. + PartNumber int32 `xml:"PartNumber"` + + // The ETag value of the content of the uploaded part. + ETag *string `xml:"ETag"` + + // The time when the part was uploaded. + LastModified *time.Time `xml:"LastModified"` + + // The size of the uploaded parts. + Size int64 `xml:"Size"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `xml:"HashCrc64ecma"` +} + +// ListParts Lists all parts that are uploaded by using a specified upload ID. +func (c *Client) ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) { + var err error + if request == nil { + request = &ListPartsRequest{} + } + input := &OperationInput{ + OpName: "ListParts", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "encoding-type": "url", + }, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListPartsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutSymlinkRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The destination object to which the symbolic link points. + Target *string `input:"header,x-oss-symlink-target,required"` + + // Specifies whether the PutSymlink operation overwrites the object that has the same name. + // If you do not specify the x-oss-forbid-overwrite header or if you set the x-oss-forbid-overwrite header to false, the object that has the same name is overwritten. + // If you set the x-oss-forbid-overwrite header to true, the object that has the same name cannot be overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The ACL of the object. Default value: default. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to symlink. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type PutSymlinkResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// PutSymlink Creates a symbolic link that points to a destination object. You can use the symbolic link to access the destination object. +func (c *Client) PutSymlink(ctx context.Context, request *PutSymlinkRequest, optFns ...func(*Options)) (*PutSymlinkResult, error) { + var err error + if request == nil { + request = &PutSymlinkRequest{} + } + input := &OperationInput{ + OpName: "PutSymlink", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "symlink": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutSymlinkResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetSymlinkRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetSymlinkResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // Indicates the target object that the symbol link directs to. + Target *string `output:"header,x-oss-symlink-target"` + + // Entity tag for the uploaded object. + ETag *string `output:"header,ETag"` + + // The metadata of the object that you want to symlink. + Metadata map[string]string `output:"header,x-oss-meta-,usermeta"` + + ResultCommon +} + +// GetSymlink Obtains a symbol link. To perform GetSymlink operations, you must have the read permission on the symbol link. +func (c *Client) GetSymlink(ctx context.Context, request *GetSymlinkRequest, optFns ...func(*Options)) (*GetSymlinkResult, error) { + var err error + if request == nil { + request = &GetSymlinkRequest{} + } + input := &OperationInput{ + OpName: "GetSymlink", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "symlink": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetSymlinkResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutObjectTaggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + Tagging *Tagging `input:"body,Tagging,xml,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type Tagging struct { + // The container used to store a set of Tags. + TagSet *TagSet `xml:"TagSet"` +} + +type TagSet struct { + // The tags. + Tags []Tag `xml:"Tag"` +} + +type Tag struct { + // The key of a tag. + // * A tag key can be up to 64 bytes in length. + // * A tag key cannot start with `http://`, `https://`, or `Aliyun`. + // * A tag key must be UTF-8 encoded. + // * A tag key cannot be left empty. + Key *string `xml:"Key"` + + // The value of the tag that you want to add or modify. + // * A tag value can be up to 128 bytes in length. + // * A tag value must be UTF-8 encoded. + // * The tag value can be left empty. + Value *string `xml:"Value"` +} + +type PutObjectTaggingResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// PutObjectTagging Adds tags to an object or updates the tags added to the object. Each tag added to an object is a key-value pair. +func (c *Client) PutObjectTagging(ctx context.Context, request *PutObjectTaggingRequest, optFns ...func(*Options)) (*PutObjectTaggingResult, error) { + var err error + if request == nil { + request = &PutObjectTaggingRequest{} + } + input := &OperationInput{ + OpName: "PutObjectTagging", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "tagging": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutObjectTaggingResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetObjectTaggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectTaggingResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The container used to store the collection of tags. + Tags []Tag `xml:"TagSet>Tag"` + + ResultCommon +} + +// GetObjectTagging You can call this operation to query the tags of an object. +func (c *Client) GetObjectTagging(ctx context.Context, request *GetObjectTaggingRequest, optFns ...func(*Options)) (*GetObjectTaggingResult, error) { + var err error + if request == nil { + request = &GetObjectTaggingRequest{} + } + input := &OperationInput{ + OpName: "GetObjectTagging", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "tagging": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetObjectTaggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteObjectTaggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type DeleteObjectTaggingResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// DeleteObjectTagging You can call this operation to delete the tags of a specified object. +func (c *Client) DeleteObjectTagging(ctx context.Context, request *DeleteObjectTaggingRequest, optFns ...func(*Options)) (*DeleteObjectTaggingResult, error) { + var err error + if request == nil { + request = &DeleteObjectTaggingRequest{} + } + input := &OperationInput{ + OpName: "DeleteObjectTagging", + Method: "DELETE", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "tagging": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteObjectTaggingResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ProcessObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Image processing parameters + Process *string `input:"x-oss-process,nop,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + ProcessStatus string `json:"status"` + ResultCommon +} + +// ProcessObject apply process on the specified image file. +func (c *Client) ProcessObject(ctx context.Context, request *ProcessObjectRequest, optFns ...func(*Options)) (*ProcessObjectResult, error) { + var err error + if request == nil { + request = &ProcessObjectRequest{} + } + input := &OperationInput{ + OpName: "ProcessObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "x-oss-process": "", + }, + } + if err = c.marshalInput(request, input, addProcess, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ProcessObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyDefault, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type AsyncProcessObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Image async processing parameters + AsyncProcess *string `input:"x-async-oss-process,nop,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type AsyncProcessObjectResult struct { + EventId string `json:"EventId"` + RequestId string `json:"RequestId"` + TaskId string `json:"TaskId"` + ResultCommon +} + +// AsyncProcessObject apply async process on the specified image file. +func (c *Client) AsyncProcessObject(ctx context.Context, request *AsyncProcessObjectRequest, optFns ...func(*Options)) (*AsyncProcessObjectResult, error) { + var err error + if request == nil { + request = &AsyncProcessObjectRequest{} + } + input := &OperationInput{ + OpName: "AsyncProcessObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "x-oss-async-process": "", + }, + } + if err = c.marshalInput(request, input, addProcess, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AsyncProcessObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyDefault, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CleanRestoredObjectRequest struct { + // The name of the bucket + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type CleanRestoredObjectResult struct { + ResultCommon +} + +// CleanRestoredObject You can call this operation to clean an object restored from Archive or Cold Archive state. After that, the restored object returns to the frozen state. +func (c *Client) CleanRestoredObject(ctx context.Context, request *CleanRestoredObjectRequest, optFns ...func(*Options)) (*CleanRestoredObjectResult, error) { + var err error + if request == nil { + request = &CleanRestoredObjectRequest{} + } + input := &OperationInput{ + OpName: "CleanRestoredObject", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cleanRestoredObject": "", + }, + Bucket: request.Bucket, + Key: request.Key, + } + + input.OpMetadata.Set(signer.SubResource, []string{"cleanRestoredObject"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CleanRestoredObjectResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go new file mode 100644 index 000000000..40710bc5c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go @@ -0,0 +1,147 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PublicAccessBlockConfiguration struct { + // Specifies whether to enable Block Public Access.true: enables Block Public Access.false (default): disables Block Public Access. + BlockPublicAccess *bool `xml:"BlockPublicAccess"` +} + +type GetPublicAccessBlockRequest struct { + RequestCommon +} + +type GetPublicAccessBlockResult struct { + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `output:"body,PublicAccessBlockConfiguration,xml"` + + ResultCommon +} + +// GetPublicAccessBlock Queries the Block Public Access configurations of OSS resources. +func (c *Client) GetPublicAccessBlock(ctx context.Context, request *GetPublicAccessBlockRequest, optFns ...func(*Options)) (*GetPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &GetPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "GetPublicAccessBlock", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutPublicAccessBlockRequest struct { + // Request body. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `input:"body,PublicAccessBlockConfiguration,xml,required"` + + RequestCommon +} + +type PutPublicAccessBlockResult struct { + ResultCommon +} + +// PutPublicAccessBlock Enables or disables Block Public Access for Object Storage Service (OSS) resources. +func (c *Client) PutPublicAccessBlock(ctx context.Context, request *PutPublicAccessBlockRequest, optFns ...func(*Options)) (*PutPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &PutPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "PutPublicAccessBlock", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeletePublicAccessBlockRequest struct { + RequestCommon +} + +type DeletePublicAccessBlockResult struct { + ResultCommon +} + +// DeletePublicAccessBlock Deletes the Block Public Access configurations of OSS resources. +func (c *Client) DeletePublicAccessBlock(ctx context.Context, request *DeletePublicAccessBlockRequest, optFns ...func(*Options)) (*DeletePublicAccessBlockResult, error) { + var err error + if request == nil { + request = &DeletePublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "DeletePublicAccessBlock", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeletePublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go new file mode 100644 index 000000000..aefb1f1b2 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go @@ -0,0 +1,72 @@ +package oss + +import ( + "context" +) + +type RegionInfo struct { + // The region ID. + Region *string `xml:"Region"` + + // The public endpoint of the region. + InternetEndpoint *string `xml:"InternetEndpoint"` + + // The internal endpoint of the region. + InternalEndpoint *string `xml:"InternalEndpoint"` + + // The acceleration endpoint of the region. The value is always oss-accelerate.aliyuncs.com. + AccelerateEndpoint *string `xml:"AccelerateEndpoint"` +} + +type RegionInfoList struct { + // The information about the regions. + RegionInfos []RegionInfo `xml:"RegionInfo"` +} + +type DescribeRegionsRequest struct { + // The region ID of the request. + Regions *string `input:"query,regions"` + + RequestCommon +} + +type DescribeRegionsResult struct { + // The information about the regions. + RegionInfoList *RegionInfoList `output:"body,RegionInfoList,xml"` + + ResultCommon +} + +// DescribeRegions Queries the endpoints of all supported regions or the endpoints of a specific region. +func (c *Client) DescribeRegions(ctx context.Context, request *DescribeRegionsRequest, optFns ...func(*Options)) (*DescribeRegionsResult, error) { + var err error + if request == nil { + request = &DescribeRegionsRequest{} + } + input := &OperationInput{ + OpName: "DescribeRegions", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "regions": "", + }, + } + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DescribeRegionsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go new file mode 100644 index 000000000..24056e662 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go @@ -0,0 +1,740 @@ +package oss + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "encoding/xml" + "fmt" + "hash" + "hash/crc32" + "io" + "strconv" + "strings" +) + +// FrameType +const ( + DataFrameType = 8388609 + ContinuousFrameType = 8388612 + EndFrameType = 8388613 + MetaEndFrameCSVType = 8388614 + MetaEndFrameJSONType = 8388615 +) + +type CreateSelectObjectMetaRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + MetaRequest any `input:"nop,meta-request,required"` + + RequestCommon +} + +type JsonMetaRequest struct { + InputSerialization *InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists"` +} + +type CsvMetaRequest struct { + InputSerialization *InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists"` +} + +type InputSerialization struct { + CSV *InputSerializationCSV `xml:"CSV"` + JSON *InputSerializationJSON `xml:"JSON"` + CompressionType *string `xml:"CompressionType"` +} + +type InputSerializationCSV struct { + RecordDelimiter *string `xml:"RecordDelimiter"` + FieldDelimiter *string `xml:"FieldDelimiter"` + QuoteCharacter *string `xml:"QuoteCharacter"` +} + +type InputSerializationJSON struct { + JSONType *string `xml:"Type"` +} + +type CreateSelectObjectMetaResult struct { + TotalScanned int64 + MetaStatus int + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string + ResultCommon +} + +type ReadFlagInfo struct { + OpenLine bool + ConsumedBytesLength int32 + EnablePayloadCrc bool + OutputRawData bool +} + +// CreateSelectObjectMeta You can call the CreateSelectObjectMeta operation to obtain information about an object, such as the total number of rows and the number of splits. +func (c *Client) CreateSelectObjectMeta(ctx context.Context, request *CreateSelectObjectMetaRequest, optFns ...func(*Options)) (*CreateSelectObjectMetaResult, error) { + var err error + if request == nil { + request = &CreateSelectObjectMetaRequest{} + } + input := &OperationInput{ + OpName: "CreateSelectObjectMeta", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + } + if err = c.marshalInput(request, input, marshalMetaRequest, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &CreateSelectObjectMetaResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyCreateSelectObjectMeta); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +func marshalMetaRequest(request any, input *OperationInput) error { + var builder strings.Builder + var process string + switch r := request.(*CreateSelectObjectMetaRequest).MetaRequest.(type) { + case *JsonMetaRequest: + process = "json/meta" + builder.WriteString("") + if r.InputSerialization != nil { + bs, err := xml.Marshal(r.InputSerialization) + if err != nil { + return err + } + builder.WriteString(string(bs)) + } + if r.OverwriteIfExists != nil { + builder.WriteString("") + builder.WriteString(strconv.FormatBool(*r.OverwriteIfExists)) + builder.WriteString("") + } + builder.WriteString("") + case *CsvMetaRequest: + r.encodeBase64() + process = "csv/meta" + builder.WriteString("") + if r.InputSerialization != nil { + bs, err := xml.Marshal(r.InputSerialization) + if err != nil { + return err + } + builder.WriteString(string(bs)) + } + if r.OverwriteIfExists != nil { + builder.WriteString("") + builder.WriteString(strconv.FormatBool(*r.OverwriteIfExists)) + builder.WriteString("") + } + builder.WriteString("") + default: + return NewErrParamInvalid("MetaRequest") + } + input.Body = strings.NewReader(builder.String()) + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + input.Parameters["x-oss-process"] = process + return nil +} + +func unmarshalBodyCreateSelectObjectMeta(result any, output *OperationOutput) error { + var err error + if output.Body != nil { + defer output.Body.Close() + readerWrapper := &ReaderWrapper{ + Body: output.Body, + WriterForCheckCrc32: crc32.NewIEEE(), + } + if _, err = io.ReadAll(readerWrapper); err != nil { + return err + } + result.(*CreateSelectObjectMetaResult).TotalScanned = readerWrapper.TotalScanned + result.(*CreateSelectObjectMetaResult).MetaStatus = int(readerWrapper.Status) + result.(*CreateSelectObjectMetaResult).SplitsCount = readerWrapper.SplitsCount + result.(*CreateSelectObjectMetaResult).RowsCount = readerWrapper.RowsCount + result.(*CreateSelectObjectMetaResult).ColumnsCount = readerWrapper.ColumnsCount + result.(*CreateSelectObjectMetaResult).ErrorMsg = readerWrapper.ErrorMsg + } + return err +} + +type SelectObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + SelectRequest *SelectRequest `input:"nop,SelectRequest,required"` + + RequestCommon +} + +type SelectObjectResult struct { + Body io.ReadCloser + ResultCommon +} + +type SelectRequest struct { + Expression *string `xml:"Expression"` + InputSerializationSelect InputSerializationSelect `xml:"InputSerialization"` + OutputSerializationSelect OutputSerializationSelect `xml:"OutputSerialization"` + SelectOptions *SelectOptions `xml:"Options"` +} + +type OutputSerializationSelect struct { + CsvBodyOutput *CSVSelectOutput `xml:"CSV"` + JsonBodyOutput *JSONSelectOutput `xml:"JSON"` + OutputRawData *bool `xml:"OutputRawData"` + KeepAllColumns *bool `xml:"KeepAllColumns"` + EnablePayloadCrc *bool `xml:"EnablePayloadCrc"` + OutputHeader *bool `xml:"OutputHeader"` +} +type CSVSelectOutput struct { + RecordDelimiter *string `xml:"RecordDelimiter"` + FieldDelimiter *string `xml:"FieldDelimiter"` +} +type JSONSelectOutput struct { + RecordDelimiter *string `xml:"RecordDelimiter"` +} + +type SelectOptions struct { + SkipPartialDataRecord *bool `xml:"SkipPartialDataRecord"` + MaxSkippedRecordsAllowed *int `xml:"MaxSkippedRecordsAllowed"` +} + +type InputSerializationSelect struct { + CsvBodyInput *CSVSelectInput `xml:"CSV"` + JsonBodyInput *JSONSelectInput `xml:"JSON"` + CompressionType *string `xml:"CompressionType"` +} + +type CSVSelectInput struct { + FileHeaderInfo *string `xml:"FileHeaderInfo"` + RecordDelimiter *string `xml:"RecordDelimiter"` + FieldDelimiter *string `xml:"FieldDelimiter"` + QuoteCharacter *string `xml:"QuoteCharacter"` + CommentCharacter *string `xml:"CommentCharacter"` + Range *string `xml:"Range"` + SplitRange *string + AllowQuotedRecordDelimiter *bool `xml:"AllowQuotedRecordDelimiter"` +} + +type JSONSelectInput struct { + JSONType *string `xml:"Type"` + Range *string `xml:"Range"` + ParseJSONNumberAsString *bool `xml:"ParseJsonNumberAsString"` + SplitRange *string +} + +// SelectObject Executes SQL statements to perform operations on an object and obtains the execution results. +func (c *Client) SelectObject(ctx context.Context, request *SelectObjectRequest, optFns ...func(*Options)) (*SelectObjectResult, error) { + var err error + if request == nil { + request = &SelectObjectRequest{} + } + input := &OperationInput{ + OpName: "SelectObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + } + if err = c.marshalInput(request, input, marshalSelectObjectRequest, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &SelectObjectResult{} + err = unmarshalResultSelectObject(request, result, output) + if err != nil { + return nil, err + } + if err = c.unmarshalOutput(result, output); err != nil { + return nil, err + } + return result, err +} + +func marshalSelectObjectRequest(request any, input *OperationInput) error { + var process string + if request.(*SelectObjectRequest).SelectRequest != nil { + if request.(*SelectObjectRequest).SelectRequest.InputSerializationSelect.JsonBodyInput == nil { + process = "csv/select" + } else { + process = "json/select" + } + request.(*SelectObjectRequest).SelectRequest.encodeBase64() + } + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + input.Parameters["x-oss-process"] = process + bs, err := xml.Marshal(request.(*SelectObjectRequest).SelectRequest) + if err != nil { + return err + } + input.Body = strings.NewReader(string(bs)) + return err +} + +func unmarshalResultSelectObject(request *SelectObjectRequest, result *SelectObjectResult, output *OperationOutput) error { + var err error + if output.Body != nil { + readerWrapper := &ReaderWrapper{ + Body: output.Body, + WriterForCheckCrc32: crc32.NewIEEE(), + } + if request.SelectRequest.OutputSerializationSelect.EnablePayloadCrc != nil && *request.SelectRequest.OutputSerializationSelect.EnablePayloadCrc == true { + readerWrapper.EnablePayloadCrc = true + } + readerWrapper.OutputRawData = strings.ToUpper(output.Headers.Get("x-oss-select-output-raw")) == "TRUE" + result.Body = readerWrapper + } + return err +} + +// The adapter class for Select object's response. +// The response consists of frames. Each frame has the following format: + +// Type | Payload Length | Header Checksum | Payload | Payload Checksum + +// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes---------> +// And we have three kind of frames. +// Data Frame: +// Type:8388609 +// Payload: Offset | Data +// <-8 bytes> + +// Continuous Frame +// Type:8388612 +// Payload: Offset (8-bytes) + +// End Frame +// Type:8388613 +// Payload: Offset | total scanned bytes | http status code | error message +// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe---> + +// SelectObjectResponse defines HTTP response from OSS SelectObject +//type SelectObjectResponse struct { +// Body io.ReadCloser +// Frame SelectObjectResult +// ReadTimeOut uint +// Finish bool +// ResultCommon +//} + +// ReaderWrapper defines HTTP response from OSS SelectObject +type ReaderWrapper struct { + Body io.ReadCloser + Version byte + FrameType int32 + PayloadLength int32 + HeaderCheckSum uint32 + Offset uint64 + Data string + ClientCRC32 uint32 + ServerCRC32 uint32 + WriterForCheckCrc32 hash.Hash32 + HTTPStatusCode int32 + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string + PayloadChecksum uint32 + ReadFlagInfo + Finish bool +} + +func (rw *ReaderWrapper) Read(p []byte) (n int, err error) { + n, err = rw.readFrames(p) + return +} + +// Close http response body +func (rw *ReaderWrapper) Close() error { + return rw.Body.Close() +} + +// readFrames is read Frame +func (rw *ReaderWrapper) readFrames(p []byte) (int, error) { + var nn int + var err error + var checkValid bool + if rw.OutputRawData == true { + nn, err = rw.Body.Read(p) + return nn, err + } + + if rw.Finish { + return 0, io.EOF + } + + for { + // if this Frame is Read, then not reading Header + if rw.OpenLine != true { + err = rw.analysisHeader() + if err != nil { + return nn, err + } + } + + if rw.FrameType == DataFrameType { + n, err := rw.analysisData(p[nn:]) + if err != nil { + return nn, err + } + nn += n + + // if this Frame is read all data, then empty the Frame to read it with next frame + if rw.ConsumedBytesLength == rw.PayloadLength-8 { + checkValid, err = rw.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + rw.emptyFrame() + } + + if nn == len(p) { + return nn, nil + } + } else if rw.FrameType == ContinuousFrameType { + checkValid, err = rw.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + rw.OpenLine = false + } else if rw.FrameType == EndFrameType { + err = rw.analysisEndFrame() + if err != nil { + return nn, err + } + checkValid, err = rw.checkPayloadSum() + if checkValid { + rw.Finish = true + } + return nn, err + } else if rw.FrameType == MetaEndFrameCSVType { + err = rw.analysisMetaEndFrameCSV() + if err != nil { + return nn, err + } + checkValid, err = rw.checkPayloadSum() + if checkValid { + rw.Finish = true + } + return nn, err + } else if rw.FrameType == MetaEndFrameJSONType { + err = rw.analysisMetaEndFrameJSON() + if err != nil { + return nn, err + } + checkValid, err = rw.checkPayloadSum() + if checkValid { + rw.Finish = true + } + return nn, err + } + } +} + +type chanReadIO struct { + readLen int + err error +} + +func (rw *ReaderWrapper) readLen(p []byte) (int, error) { + r := rw.Body + ch := make(chan chanReadIO, 1) + defer close(ch) + go func(p []byte) { + var needReadLength int + readChan := chanReadIO{} + needReadLength = len(p) + for { + n, err := r.Read(p[readChan.readLen:needReadLength]) + readChan.readLen += n + if err != nil { + readChan.err = err + ch <- readChan + return + } + + if readChan.readLen == needReadLength { + break + } + } + ch <- readChan + }(p) + + select { + case result := <-ch: + return result.readLen, result.err + } +} + +// analysisHeader is reading selectObject response body's header +func (rw *ReaderWrapper) analysisHeader() error { + headFrameByte := make([]byte, 20) + _, err := rw.readLen(headFrameByte) + if err != nil { + return fmt.Errorf("read response frame header failure,err:%s", err.Error()) + } + + frameTypeByte := headFrameByte[0:4] + rw.Version = frameTypeByte[0] + frameTypeByte[0] = 0 + bytesToInt(frameTypeByte, &rw.FrameType) + + if rw.FrameType != DataFrameType && rw.FrameType != ContinuousFrameType && + rw.FrameType != EndFrameType && rw.FrameType != MetaEndFrameCSVType && rw.FrameType != MetaEndFrameJSONType { + return fmt.Errorf("unexpected frame type: %d", rw.FrameType) + } + + payloadLengthByte := headFrameByte[4:8] + bytesToInt(payloadLengthByte, &rw.PayloadLength) + headCheckSumByte := headFrameByte[8:12] + bytesToInt(headCheckSumByte, &rw.HeaderCheckSum) + byteOffset := headFrameByte[12:20] + bytesToInt(byteOffset, &rw.Offset) + rw.OpenLine = true + err = rw.writerCheckCrc32(byteOffset) + return err +} + +// analysisData is reading the DataFrameType data of selectObject response body +func (rw *ReaderWrapper) analysisData(p []byte) (int, error) { + var needReadLength int32 + lenP := int32(len(p)) + restByteLength := rw.PayloadLength - 8 - rw.ConsumedBytesLength + if lenP <= restByteLength { + needReadLength = lenP + } else { + needReadLength = restByteLength + } + n, err := rw.readLen(p[:needReadLength]) + if err != nil { + return n, fmt.Errorf("read frame data error,%s", err.Error()) + } + rw.ConsumedBytesLength += int32(n) + err = rw.writerCheckCrc32(p[:n]) + return n, err +} + +// analysisEndFrame is reading the EndFrameType data of selectObject response body +func (rw *ReaderWrapper) analysisEndFrame() error { + payLoadBytes := make([]byte, rw.PayloadLength-8) + _, err := rw.readLen(payLoadBytes) + if err != nil { + return fmt.Errorf("read end frame error:%s", err.Error()) + } + bytesToInt(payLoadBytes[0:8], &rw.TotalScanned) + bytesToInt(payLoadBytes[8:12], &rw.HTTPStatusCode) + errMsgLength := rw.PayloadLength - 20 + rw.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12]) + err = rw.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body +func (rw *ReaderWrapper) analysisMetaEndFrameCSV() error { + payLoadBytes := make([]byte, rw.PayloadLength-8) + _, err := rw.readLen(payLoadBytes) + if err != nil { + return fmt.Errorf("read meta end csv frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &rw.TotalScanned) + bytesToInt(payLoadBytes[8:12], &rw.Status) + bytesToInt(payLoadBytes[12:16], &rw.SplitsCount) + bytesToInt(payLoadBytes[16:24], &rw.RowsCount) + bytesToInt(payLoadBytes[24:28], &rw.ColumnsCount) + errMsgLength := rw.PayloadLength - 36 + rw.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28]) + err = rw.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body +func (rw *ReaderWrapper) analysisMetaEndFrameJSON() error { + payLoadBytes := make([]byte, rw.PayloadLength-8) + _, err := rw.readLen(payLoadBytes) + if err != nil { + return fmt.Errorf("read meta end json frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &rw.TotalScanned) + bytesToInt(payLoadBytes[8:12], &rw.Status) + bytesToInt(payLoadBytes[12:16], &rw.SplitsCount) + bytesToInt(payLoadBytes[16:24], &rw.RowsCount) + errMsgLength := rw.PayloadLength - 32 + rw.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24]) + err = rw.writerCheckCrc32(payLoadBytes) + return err +} + +func (rw *ReaderWrapper) checkPayloadSum() (bool, error) { + payLoadChecksumByte := make([]byte, 4) + n, err := rw.readLen(payLoadChecksumByte) + if n == 4 { + bytesToInt(payLoadChecksumByte, &rw.PayloadChecksum) + rw.ServerCRC32 = rw.PayloadChecksum + rw.ClientCRC32 = rw.WriterForCheckCrc32.Sum32() + if rw.EnablePayloadCrc == true && rw.ServerCRC32 != 0 && rw.ServerCRC32 != rw.ClientCRC32 { + return false, fmt.Errorf("unexpected frame type: %d, client %d but server %d", rw.FrameType, rw.ClientCRC32, rw.ServerCRC32) + } + return true, err + } + return false, fmt.Errorf("read checksum error:%s", err.Error()) +} + +func (rw *ReaderWrapper) writerCheckCrc32(p []byte) (err error) { + err = nil + if rw.EnablePayloadCrc == true { + _, err = rw.WriterForCheckCrc32.Write(p) + } + return err +} + +// emptyFrame is emptying SelectObjectResponse Frame information +func (rw *ReaderWrapper) emptyFrame() { + rw.WriterForCheckCrc32 = crc32.NewIEEE() + + rw.Finish = false + rw.ConsumedBytesLength = 0 + rw.OpenLine = false + rw.Version = byte(0) + rw.FrameType = 0 + rw.PayloadLength = 0 + rw.HeaderCheckSum = 0 + rw.Offset = 0 + rw.Data = "" + + rw.TotalScanned = 0 + rw.Status = 0 + rw.SplitsCount = 0 + rw.RowsCount = 0 + rw.ColumnsCount = 0 + + rw.ErrorMsg = "" + + rw.PayloadChecksum = 0 +} + +// bytesToInt byte's array trans to int +func bytesToInt(b []byte, ret interface{}) { + binBuf := bytes.NewBuffer(b) + binary.Read(binBuf, binary.BigEndian, ret) +} + +// jsonEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) jsonEncodeBase64() { + if selectReq == nil { + return + } + if selectReq.Expression != nil { + *selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(*selectReq.Expression)) + } + if selectReq.OutputSerializationSelect.JsonBodyOutput == nil { + return + } + if selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter != nil { + *selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter)) + } + if selectReq.InputSerializationSelect.JsonBodyInput.Range != nil { + *selectReq.InputSerializationSelect.JsonBodyInput.Range = "line-range=" + *selectReq.InputSerializationSelect.JsonBodyInput.Range + } + if selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != nil && *selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = Ptr("split-range=" + *selectReq.InputSerializationSelect.JsonBodyInput.SplitRange) + selectReq.InputSerializationSelect.JsonBodyInput.SplitRange = nil + } +} + +// encodeBase64 encode base64 of the CreateSelectObjectMeta api request params +func (meta *CsvMetaRequest) encodeBase64() { + if meta == nil || meta.InputSerialization == nil { + return + } + if meta.InputSerialization.CSV.RecordDelimiter != nil { + *meta.InputSerialization.CSV.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(*meta.InputSerialization.CSV.RecordDelimiter)) + } + if meta.InputSerialization.CSV.FieldDelimiter != nil { + *meta.InputSerialization.CSV.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(*meta.InputSerialization.CSV.FieldDelimiter)) + } + + if meta.InputSerialization.CSV.QuoteCharacter != nil { + *meta.InputSerialization.CSV.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(*meta.InputSerialization.CSV.QuoteCharacter)) + } +} + +func (selectReq *SelectRequest) encodeBase64() { + if selectReq.InputSerializationSelect.JsonBodyInput == nil { + selectReq.csvEncodeBase64() + } else { + selectReq.jsonEncodeBase64() + } +} + +// csvEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) csvEncodeBase64() { + if selectReq == nil { + return + } + if selectReq.Expression != nil { + *selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(*selectReq.Expression)) + } + if selectReq.InputSerializationSelect.CsvBodyInput == nil { + return + } + if selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.Range != nil && *selectReq.InputSerializationSelect.CsvBodyInput.Range != "" { + *selectReq.InputSerializationSelect.CsvBodyInput.Range = "line-range=" + *selectReq.InputSerializationSelect.CsvBodyInput.Range + } + if selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != nil && *selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = Ptr("split-range=" + *selectReq.InputSerializationSelect.CsvBodyInput.SplitRange) + selectReq.InputSerializationSelect.CsvBodyInput.SplitRange = nil + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go new file mode 100644 index 000000000..c9af79ac3 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go @@ -0,0 +1,106 @@ +package oss + +import ( + "context" + "time" +) + +type ListBucketsRequest struct { + // The name of the bucket from which the list operation begins. + Marker *string `input:"query,marker"` + + // The maximum number of buckets that can be returned in the single query. + // Valid values: 1 to 1000. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of returned buckets must contain. + Prefix *string `input:"query,prefix"` // Limits the response to keys that begin with the specified prefix + + // The ID of the resource group. + ResourceGroupId *string `input:"header,x-oss-resource-group-id"` + + RequestCommon +} + +type ListBucketsResult struct { + // The prefix contained in the names of the returned bucket. + Prefix *string `xml:"Prefix"` + + // The name of the bucket after which the ListBuckets operation starts. + Marker *string `xml:"Marker"` // The marker filter. + + // The maximum number of buckets that can be returned for the request. + MaxKeys int32 `xml:"MaxKeys"` + + // Indicates whether all results are returned. + // true: Only part of the results are returned for the request. + // false: All results are returned for the request. + IsTruncated bool `xml:"IsTruncated"` + + // The marker for the next ListBuckets request, which can be used to return the remaining results. + NextMarker *string `xml:"NextMarker"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The container that stores information about buckets. + Buckets []BucketProperties `xml:"Buckets>Bucket"` + + ResultCommon +} + +type BucketProperties struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // The data center in which the bucket is located. + Location *string `xml:"Location"` + + // The time when the bucket was created. Format: yyyy-mm-ddThh:mm:ss.timezone. + CreationDate *time.Time `xml:"CreationDate"` + + // The storage class of the bucket. Valid values: + // Standard, IA, Archive, ColdArchive and DeepColdArchive. + StorageClass *string `xml:"StorageClass"` + + // The public endpoint used to access the bucket over the Internet. + ExtranetEndpoint *string `xml:"ExtranetEndpoint"` + + // The internal endpoint that is used to access the bucket from ECS instances + // that reside in the same region as the bucket. + IntranetEndpoint *string `xml:"IntranetEndpoint"` + + // The region in which the bucket is located. + Region *string `xml:"Region"` + + // The ID of the resource group to which the bucket belongs. + ResourceGroupId *string `xml:"ResourceGroupId"` +} + +// ListBuckets Lists buckets that belong to the current account. +func (c *Client) ListBuckets(ctx context.Context, request *ListBucketsRequest, optFns ...func(*Options)) (*ListBucketsResult, error) { + var err error + if request == nil { + request = &ListBucketsRequest{} + } + input := &OperationInput{ + OpName: "ListBuckets", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListBucketsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go new file mode 100644 index 000000000..4e4fb2bbd --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go @@ -0,0 +1,369 @@ +package oss + +import ( + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" +) + +// ----- download checkpoint ----- +type downloadCheckpoint struct { + CpDirPath string // checkpoint dir full path + CpFilePath string // checkpoint file full path + VerifyData bool // verify downloaded data in FilePath + Loaded bool // If Info.Data.DownloadInfo is loaded from checkpoint + + Info struct { //checkpoint data + Magic string // Magic + MD5 string // The Data's MD5 + Data struct { + // source + ObjectInfo struct { + Name string // oss://bucket/key + VersionId string + Range string + } + ObjectMeta struct { + Size int64 + LastModified string + ETag string + } + + // destination + FilePath string // Local file + + // download info + PartSize int64 + + DownloadInfo struct { + Offset int64 + CRC64 uint64 + } + } + } +} + +func newDownloadCheckpoint(request *GetObjectRequest, filePath string, baseDir string, header http.Header, partSize int64) *downloadCheckpoint { + var buf strings.Builder + name := fmt.Sprintf("%v/%v", ToString(request.Bucket), ToString(request.Key)) + buf.WriteString("oss://" + escapePath(name, false)) + buf.WriteString("\n") + buf.WriteString(ToString(request.VersionId)) + buf.WriteString("\n") + buf.WriteString(ToString(request.Range)) + + hashmd5 := md5.New() + hashmd5.Write([]byte(buf.String())) + srcHash := hex.EncodeToString(hashmd5.Sum(nil)) + + absPath, _ := filepath.Abs(filePath) + hashmd5.Reset() + hashmd5.Write([]byte(absPath)) + destHash := hex.EncodeToString(hashmd5.Sum(nil)) + + var dir string + if baseDir == "" { + dir = os.TempDir() + } else { + dir = filepath.Dir(baseDir) + } + + cpFilePath := filepath.Join(dir, fmt.Sprintf("%v-%v%v", srcHash, destHash, CheckpointFileSuffixDownloader)) + + cp := &downloadCheckpoint{ + CpFilePath: cpFilePath, + CpDirPath: dir, + } + + objectSize, _ := strconv.ParseInt(header.Get("Content-Length"), 10, 64) + + cp.Info.Magic = CheckpointMagic + cp.Info.Data.ObjectInfo.Name = "oss://" + name + cp.Info.Data.ObjectInfo.VersionId = ToString(request.VersionId) + cp.Info.Data.ObjectInfo.Range = ToString(request.Range) + cp.Info.Data.ObjectMeta.Size = objectSize + cp.Info.Data.ObjectMeta.LastModified = header.Get("Last-Modified") + cp.Info.Data.ObjectMeta.ETag = header.Get("ETag") + cp.Info.Data.FilePath = filePath + cp.Info.Data.PartSize = partSize + + return cp +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load() error { + if !DirExists(cp.CpDirPath) { + return fmt.Errorf("Invaid checkpoint dir, %v", cp.CpDirPath) + } + + if !FileExists(cp.CpFilePath) { + return nil + } + + if !cp.valid() { + cp.remove() + return nil + } + + cp.Loaded = true + + return nil +} + +func (cp *downloadCheckpoint) valid() bool { + // Compare the CP's Magic and the MD5 + contents, err := os.ReadFile(cp.CpFilePath) + if err != nil { + return false + } + + dcp := downloadCheckpoint{} + + if err = json.Unmarshal(contents, &dcp.Info); err != nil { + return false + } + + js, _ := json.Marshal(dcp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + + if CheckpointMagic != dcp.Info.Magic || + md5sum != dcp.Info.MD5 { + return false + } + + // compare + if !reflect.DeepEqual(cp.Info.Data.ObjectInfo, dcp.Info.Data.ObjectInfo) || + !reflect.DeepEqual(cp.Info.Data.ObjectMeta, dcp.Info.Data.ObjectMeta) || + cp.Info.Data.FilePath != dcp.Info.Data.FilePath || + cp.Info.Data.PartSize != dcp.Info.Data.PartSize { + return false + } + + // download info + if dcp.Info.Data.DownloadInfo.Offset < 0 { + return false + } + + if dcp.Info.Data.DownloadInfo.Offset == 0 && + dcp.Info.Data.DownloadInfo.CRC64 != 0 { + return false + } + + rOffset := int64(0) + if len(cp.Info.Data.ObjectInfo.Range) > 0 { + if r, err := ParseRange(cp.Info.Data.ObjectInfo.Range); err != nil { + return false + } else { + rOffset = r.Offset + } + } + + if dcp.Info.Data.DownloadInfo.Offset < rOffset { + return false + } + + remains := (dcp.Info.Data.DownloadInfo.Offset - rOffset) % dcp.Info.Data.PartSize + if remains != 0 { + return false + } + + //valid data + if cp.VerifyData && dcp.Info.Data.DownloadInfo.CRC64 != 0 { + if file, err := os.Open(cp.Info.Data.FilePath); err == nil { + hash := NewCRC64(0) + limitN := dcp.Info.Data.DownloadInfo.Offset - rOffset + io.Copy(hash, io.LimitReader(file, limitN)) + file.Close() + if hash.Sum64() != dcp.Info.Data.DownloadInfo.CRC64 { + return false + } + } + } + + // update + cp.Info.Data.DownloadInfo = dcp.Info.Data.DownloadInfo + + return true +} + +// dump dumps to file +func (cp *downloadCheckpoint) dump() error { + // Calculate MD5 + js, _ := json.Marshal(cp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + cp.Info.MD5 = md5sum + + // Serialize + js, err := json.Marshal(cp.Info) + if err != nil { + return err + } + + // Dump + return os.WriteFile(cp.CpFilePath, js, FilePermMode) +} + +func (cp *downloadCheckpoint) remove() error { + return os.Remove(cp.CpFilePath) +} + +// ----- upload chcekpoint ----- +type uploadCheckpoint struct { + CpDirPath string // checkpoint dir full path + CpFilePath string // checkpoint file full path + Loaded bool // If Info.Data.UploadInfo is loaded from checkpoint + + Info struct { //checkpoint data + Magic string // Magic + MD5 string // The Data's MD5 + Data struct { + // source + FilePath string // Local file + + FileMeta struct { + Size int64 + LastModified string + } + + // destination + ObjectInfo struct { + Name string // oss://bucket/key + } + + // upload info + PartSize int64 + + UploadInfo struct { + UploadId string + } + } + } +} + +func newUploadCheckpoint(request *PutObjectRequest, filePath string, baseDir string, fileInfo os.FileInfo, partSize int64) *uploadCheckpoint { + name := fmt.Sprintf("%v/%v", ToString(request.Bucket), ToString(request.Key)) + hashmd5 := md5.New() + hashmd5.Write([]byte("oss://" + escapePath(name, false))) + destHash := hex.EncodeToString(hashmd5.Sum(nil)) + + absPath, _ := filepath.Abs(filePath) + hashmd5.Reset() + hashmd5.Write([]byte(absPath)) + srcHash := hex.EncodeToString(hashmd5.Sum(nil)) + + var dir string + if baseDir == "" { + dir = os.TempDir() + } else { + dir = filepath.Dir(baseDir) + } + + cpFilePath := filepath.Join(dir, fmt.Sprintf("%v-%v%v", srcHash, destHash, CheckpointFileSuffixUploader)) + + cp := &uploadCheckpoint{ + CpFilePath: cpFilePath, + CpDirPath: dir, + } + + cp.Info.Magic = CheckpointMagic + cp.Info.Data.FilePath = filePath + cp.Info.Data.FileMeta.Size = fileInfo.Size() + cp.Info.Data.FileMeta.LastModified = fileInfo.ModTime().String() + cp.Info.Data.ObjectInfo.Name = "oss://" + name + cp.Info.Data.PartSize = partSize + + return cp +} + +// load checkpoint from local file +func (cp *uploadCheckpoint) load() error { + if !DirExists(cp.CpDirPath) { + return fmt.Errorf("Invaid checkpoint dir, %v", cp.CpDirPath) + } + + if !FileExists(cp.CpFilePath) { + return nil + } + + if !cp.valid() { + cp.remove() + return nil + } + + cp.Loaded = true + + return nil +} + +func (cp *uploadCheckpoint) valid() bool { + // Compare the CP's Magic and the MD5 + contents, err := os.ReadFile(cp.CpFilePath) + if err != nil { + return false + } + + dcp := uploadCheckpoint{} + + if err = json.Unmarshal(contents, &dcp.Info); err != nil { + return false + } + + js, _ := json.Marshal(dcp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + + if CheckpointMagic != dcp.Info.Magic || + md5sum != dcp.Info.MD5 { + return false + } + + // compare + if !reflect.DeepEqual(cp.Info.Data.ObjectInfo, dcp.Info.Data.ObjectInfo) || + !reflect.DeepEqual(cp.Info.Data.FileMeta, dcp.Info.Data.FileMeta) || + cp.Info.Data.FilePath != dcp.Info.Data.FilePath || + cp.Info.Data.PartSize != dcp.Info.Data.PartSize { + return false + } + + // download info + if len(dcp.Info.Data.UploadInfo.UploadId) == 0 { + return false + } + + // update + cp.Info.Data.UploadInfo = dcp.Info.Data.UploadInfo + + return true +} + +// dump dumps to file +func (cp *uploadCheckpoint) dump() error { + // Calculate MD5 + js, _ := json.Marshal(cp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + cp.Info.MD5 = md5sum + + // Serialize + js, err := json.Marshal(cp.Info) + if err != nil { + return err + } + + // Dump + return os.WriteFile(cp.CpFilePath, js, FilePermMode) +} + +func (cp *uploadCheckpoint) remove() error { + return os.Remove(cp.CpFilePath) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go new file mode 100644 index 000000000..ee86ec043 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go @@ -0,0 +1,1499 @@ +package oss + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport" +) + +type Options struct { + Product string + + Region string + + Endpoint *url.URL + + RetryMaxAttempts *int + + Retryer retry.Retryer + + Signer signer.Signer + + CredentialsProvider credentials.CredentialsProvider + + HttpClient HTTPClient + + ResponseHandlers []func(*http.Response) error + + UrlStyle UrlStyleType + + FeatureFlags FeatureFlagsType + + OpReadWriteTimeout *time.Duration + + AuthMethod *AuthMethodType + + AdditionalHeaders []string +} + +func (c Options) Copy() Options { + to := c + to.ResponseHandlers = make([]func(*http.Response) error, len(c.ResponseHandlers)) + copy(to.ResponseHandlers, c.ResponseHandlers) + return to +} + +func OpReadWriteTimeout(value time.Duration) func(*Options) { + return func(o *Options) { + o.OpReadWriteTimeout = Ptr(value) + } +} + +type innerOptions struct { + BwTokenBuckets BwTokenBuckets + + // A clock offset that how much client time is different from server time + ClockOffset time.Duration + + // Logger + Log Logger + + // UserAgent + UserAgent string +} + +type Client struct { + options Options + inner innerOptions +} + +func NewClient(cfg *Config, optFns ...func(*Options)) *Client { + options := Options{ + Product: DefaultProduct, + Region: ToString(cfg.Region), + RetryMaxAttempts: cfg.RetryMaxAttempts, + Retryer: cfg.Retryer, + CredentialsProvider: cfg.CredentialsProvider, + HttpClient: cfg.HttpClient, + FeatureFlags: FeatureFlagsDefault, + AdditionalHeaders: cfg.AdditionalHeaders, + } + inner := innerOptions{ + Log: NewLogger(ToInt(cfg.LogLevel), cfg.LogPrinter), + UserAgent: buildUserAgent(cfg), + } + + resolveEndpoint(cfg, &options) + resolveRetryer(cfg, &options) + resolveHTTPClient(cfg, &options, &inner) + resolveSigner(cfg, &options) + resolveUrlStyle(cfg, &options) + resolveFeatureFlags(cfg, &options) + resolveCloudBox(cfg, &options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + inner: inner, + } + + return client +} + +func resolveEndpoint(cfg *Config, o *Options) { + disableSSL := ToBool(cfg.DisableSSL) + endpoint := ToString(cfg.Endpoint) + region := ToString(cfg.Region) + if len(endpoint) > 0 { + endpoint = addEndpointScheme(endpoint, disableSSL) + } else if isValidRegion(region) { + endpoint = endpointFromRegion( + region, + disableSSL, + func() EndpointType { + if ToBool(cfg.UseInternalEndpoint) { + return EndpointInternal + } else if ToBool(cfg.UseDualStackEndpoint) { + return EndpointDualStack + } else if ToBool(cfg.UseAccelerateEndpoint) { + return EndpointAccelerate + } + return EndpointPublic + }(), + ) + } + + if endpoint == "" { + return + } + + o.Endpoint, _ = url.Parse(endpoint) +} + +func resolveRetryer(_ *Config, o *Options) { + if o.Retryer != nil { + return + } + + o.Retryer = retry.NewStandard() +} + +func resolveHTTPClient(cfg *Config, o *Options, inner *innerOptions) { + if o.HttpClient != nil { + return + } + + //config in http.Transport + custom := []func(*http.Transport){} + if cfg.InsecureSkipVerify != nil { + custom = append(custom, transport.InsecureSkipVerify(*cfg.InsecureSkipVerify)) + } + if cfg.ProxyFromEnvironment != nil && *cfg.ProxyFromEnvironment { + custom = append(custom, transport.ProxyFromEnvironment()) + } + if cfg.ProxyHost != nil { + if url, err := url.Parse(*cfg.ProxyHost); err == nil { + custom = append(custom, transport.HttpProxy(url)) + } + } + + //config in transport package + tcfg := &transport.Config{} + if cfg.ConnectTimeout != nil { + tcfg.ConnectTimeout = cfg.ConnectTimeout + } + if cfg.ReadWriteTimeout != nil { + tcfg.ReadWriteTimeout = cfg.ReadWriteTimeout + } + if cfg.EnabledRedirect != nil { + tcfg.EnabledRedirect = cfg.EnabledRedirect + } + if cfg.UploadBandwidthlimit != nil { + value := *cfg.UploadBandwidthlimit * 1024 + tb := newBwTokenBucket(value) + tcfg.PostWrite = append(tcfg.PostWrite, func(n int, _ error) { + tb.LimitBandwidth(n) + }) + inner.BwTokenBuckets[BwTokenBucketSlotTx] = tb + } + if cfg.DownloadBandwidthlimit != nil { + value := *cfg.DownloadBandwidthlimit * 1024 + tb := newBwTokenBucket(value) + tcfg.PostRead = append(tcfg.PostRead, func(n int, _ error) { + tb.LimitBandwidth(n) + }) + inner.BwTokenBuckets[BwTokenBucketSlotRx] = tb + } + + o.HttpClient = transport.NewHttpClient(tcfg, custom...) +} + +func resolveSigner(cfg *Config, o *Options) { + if o.Signer != nil { + return + } + + ver := DefaultSignatureVersion + if cfg.SignatureVersion != nil { + ver = *cfg.SignatureVersion + } + + switch ver { + case SignatureVersionV1: + o.Signer = &signer.SignerV1{} + default: + o.Signer = &signer.SignerV4{} + } +} + +func resolveUrlStyle(cfg *Config, o *Options) { + if cfg.UseCName != nil && *cfg.UseCName { + o.UrlStyle = UrlStyleCName + } else if cfg.UsePathStyle != nil && *cfg.UsePathStyle { + o.UrlStyle = UrlStylePath + } else { + o.UrlStyle = UrlStyleVirtualHosted + } + + // if the endpoint is ip, set to path-style + if o.Endpoint != nil { + if ip := net.ParseIP(o.Endpoint.Hostname()); ip != nil { + o.UrlStyle = UrlStylePath + } + } +} + +func resolveFeatureFlags(cfg *Config, o *Options) { + if ToBool(cfg.DisableDownloadCRC64Check) { + o.FeatureFlags = o.FeatureFlags & ^FeatureEnableCRC64CheckDownload + } + + if ToBool(cfg.DisableUploadCRC64Check) { + o.FeatureFlags = o.FeatureFlags & ^FeatureEnableCRC64CheckUpload + } +} + +func resolveCloudBox(cfg *Config, o *Options) { + if cfg.CloudBoxId != nil { + o.Region = ToString(cfg.CloudBoxId) + o.Product = CloudBoxProduct + return + } + + if !ToBool(cfg.EnableAutoDetectCloudBoxId) { + return + } + + if o.Endpoint == nil { + return + } + + //cb-***.{region}.oss-cloudbox-control.aliyuncs.com + //cb-***.{region}.oss-cloudbox.aliyuncs.com + host := o.Endpoint.Host + if !(strings.HasSuffix(host, ".oss-cloudbox.aliyuncs.com") || + strings.HasSuffix(host, ".oss-cloudbox-control.aliyuncs.com")) { + return + } + + keys := strings.Split(host, ".") + if keys == nil || + len(keys) != 5 || + !strings.HasPrefix(keys[0], "cb-") { + return + } + o.Region = keys[0] + o.Product = CloudBoxProduct +} + +func buildUserAgent(cfg *Config) string { + if cfg.UserAgent == nil { + return defaultUserAgent + } + + return fmt.Sprintf("%s/%s", defaultUserAgent, ToString(cfg.UserAgent)) +} + +func (c *Client) invokeOperation(ctx context.Context, input *OperationInput, optFns []func(*Options)) (output *OperationOutput, err error) { + if c.getLogLevel() >= LogInfo { + c.inner.Log.Infof("InvokeOperation Start: input[%p], OpName:%s, Bucket:%s, Key:%s", + input, input.OpName, + ToString(input.Bucket), ToString(input.Key)) + defer func() { + c.inner.Log.Infof("InvokeOperation End: input[%p], OpName:%s, output:'%v', err:'%v'", + input, input.OpName, + c.dumpOperationOutput(output), err) + }() + } + + options := c.options.Copy() + opOpt := Options{} + + for _, fn := range optFns { + fn(&opOpt) + } + + applyOperationOpt(&options, &opOpt) + + applyOperationMetadata(input, &options) + + ctx = applyOperationContext(ctx, &options) + + output, err = c.sendRequest(ctx, input, &options) + + if err != nil { + return output, &OperationError{ + name: input.OpName, + err: err} + } + + return output, err +} + +func (c *Client) sendRequest(ctx context.Context, input *OperationInput, opts *Options) (output *OperationOutput, err error) { + var request *http.Request + var response *http.Response + if c.getLogLevel() >= LogInfo { + c.inner.Log.Infof("sendRequest Start: input[%p]", input) + defer func() { + c.inner.Log.Infof("sendRequest End: input[%p], http.Request[%p], http.Response[%p]", input, request, response) + }() + } + + // covert input into httpRequest + if !isValidEndpoint(opts.Endpoint) { + return output, NewErrParamInvalid("Endpoint") + } + + var writers []io.Writer + // tracker in OperationMetaData + for _, w := range input.OpMetadata.Values(OpMetaKeyRequestBodyTracker) { + if ww, ok := w.(io.Writer); ok { + writers = append(writers, ww) + } + } + // host & path + host, path := buildURL(input, opts) + strUrl := fmt.Sprintf("%s://%s%s", opts.Endpoint.Scheme, host, path) + + // querys + if len(input.Parameters) > 0 { + var buf bytes.Buffer + for k, v := range input.Parameters { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if len(v) > 0 { + buf.WriteString("=" + strings.Replace(url.QueryEscape(v), "+", "%20", -1)) + } + } + strUrl += "?" + buf.String() + } + + request, err = http.NewRequestWithContext(ctx, input.Method, strUrl, nil) + if err != nil { + return output, err + } + + // headers + for k, v := range input.Headers { + if len(k) > 0 && len(v) > 0 { + request.Header.Add(k, v) + } + } + request.Header.Set("User-Agent", c.inner.UserAgent) + + // body + var body io.Reader + if input.Body == nil { + body = strings.NewReader("") + } else { + body = input.Body + } + var length int64 + if clen := request.Header.Get("Content-Length"); clen != "" { + length, _ = strconv.ParseInt(clen, 10, 64) + } else { + length = GetReaderLen(body) + } + if length >= 0 { + request.ContentLength = length + } + request.Body = TeeReadNopCloser(body, writers...) + + //signing context + subResource, _ := input.OpMetadata.Get(signer.SubResource).([]string) + clockOffset := c.inner.ClockOffset + signingCtx := &signer.SigningContext{ + Product: Ptr(opts.Product), + Region: Ptr(opts.Region), + Bucket: input.Bucket, + Key: input.Key, + Request: request, + SubResource: subResource, + AuthMethodQuery: opts.AuthMethod != nil && *opts.AuthMethod == AuthMethodQuery, + ClockOffset: clockOffset, + AdditionalHeaders: opts.AdditionalHeaders, + } + + if date := request.Header.Get(HeaderOssDate); date != "" { + signingCtx.Time, _ = http.ParseTime(date) + } else if signTime, ok := input.OpMetadata.Get(signer.SignTime).(time.Time); ok { + signingCtx.Time = signTime + } + + // send http request + response, err = c.sendHttpRequest(ctx, signingCtx, opts) + + if err != nil { + return output, err + } + + // covert http response into output context + output = &OperationOutput{ + Input: input, + Status: response.Status, + StatusCode: response.StatusCode, + Body: response.Body, + Headers: response.Header, + httpRequest: request, + } + + // save other info by Metadata filed, ex. retry detail info + //output.OpMetadata.Set(...) + if signingCtx.AuthMethodQuery { + output.OpMetadata.Set(signer.SignTime, signingCtx.Time) + } + + if signingCtx.ClockOffset != clockOffset { + c.inner.ClockOffset = signingCtx.ClockOffset + } + + return output, err +} + +func (c *Client) sendHttpRequest(ctx context.Context, signingCtx *signer.SigningContext, opts *Options) (response *http.Response, err error) { + request := signingCtx.Request + retryer := opts.Retryer + maxAttempts := c.retryMaxAttempts(opts) + body, _ := request.Body.(*teeReadNopCloser) + resetTime := signingCtx.Time.IsZero() + body.Mark() + for tries := 1; tries <= maxAttempts; tries++ { + if tries > 1 { + delay, err := retryer.RetryDelay(tries, err) + if err != nil { + break + } + + if err = sleepWithContext(ctx, delay); err != nil { + err = &CanceledError{Err: err} + break + } + + if err = body.Reset(); err != nil { + break + } + + if resetTime { + signingCtx.Time = time.Time{} + } + + c.inner.Log.Infof("Attempt retry, request[%p], tries:%v, retry delay:%v", request, tries, delay) + } + + if response, err = c.sendHttpRequestOnce(ctx, signingCtx, opts); err == nil { + break + } + + c.postSendHttpRequestOnce(signingCtx, response, err) + + if isContextError(ctx, &err) { + err = &CanceledError{Err: err} + break + } + + if !body.IsSeekable() { + break + } + + if !retryer.IsErrorRetryable(err) { + break + } + } + return response, err +} + +func (c *Client) sendHttpRequestOnce(ctx context.Context, signingCtx *signer.SigningContext, opts *Options) ( + response *http.Response, err error, +) { + if c.getLogLevel() > LogInfo { + c.inner.Log.Infof("sendHttpRequestOnce Start, http.Request[%p]", signingCtx.Request) + defer func() { + c.inner.Log.Infof("sendHttpRequestOnce End, http.Request[%p], response[%p], err:%v", signingCtx.Request, response, err) + }() + } + + if _, anonymous := opts.CredentialsProvider.(*credentials.AnonymousCredentialsProvider); !anonymous { + cred, err := opts.CredentialsProvider.GetCredentials(ctx) + if err != nil { + return response, err + } + + signingCtx.Credentials = &cred + if err = c.options.Signer.Sign(ctx, signingCtx); err != nil { + return response, err + } + c.inner.Log.Debugf("sendHttpRequestOnce::Sign request[%p], StringToSign:%s", signingCtx.Request, signingCtx.StringToSign) + } + + c.logHttpPRequet(signingCtx.Request) + + if response, err = opts.HttpClient.Do(signingCtx.Request); err != nil { + return response, err + } + + c.logHttpResponse(signingCtx.Request, response) + + for _, fn := range opts.ResponseHandlers { + if err = fn(response); err != nil { + return response, err + } + } + + return response, err +} + +func (c *Client) postSendHttpRequestOnce(signingCtx *signer.SigningContext, _ *http.Response, err error) { + if err != nil { + switch e := err.(type) { + case *ServiceError: + if c.hasFeature(FeatureCorrectClockSkew) && + e.Code == "RequestTimeTooSkewed" && + !e.Timestamp.IsZero() { + signingCtx.ClockOffset = e.Timestamp.Sub(signingCtx.Time) + c.inner.Log.Warnf("Got RequestTimeTooSkewed error, correct clock request[%p], ClockOffset:%v, Server Time:%v, Client time:%v", + signingCtx.Request, signingCtx.ClockOffset, e.Timestamp, signingCtx.Time) + } + } + } +} + +func buildURL(input *OperationInput, opts *Options) (host string, path string) { + if input == nil || opts == nil || opts.Endpoint == nil { + return host, path + } + + var paths []string + if input.Bucket == nil { + host = opts.Endpoint.Host + } else { + switch opts.UrlStyle { + default: // UrlStyleVirtualHosted + host = fmt.Sprintf("%s.%s", *input.Bucket, opts.Endpoint.Host) + case UrlStylePath: + host = opts.Endpoint.Host + paths = append(paths, *input.Bucket) + if input.Key == nil { + paths = append(paths, "") + } + case UrlStyleCName: + host = opts.Endpoint.Host + } + } + + if input.Key != nil { + paths = append(paths, escapePath(*input.Key, false)) + } + + return host, ("/" + strings.Join(paths, "/")) +} + +func serviceErrorResponseHandler(response *http.Response) error { + if response.StatusCode/100 == 2 { + return nil + } + return tryConvertServiceError(response) +} + +func callbackErrorResponseHandler(response *http.Response) error { + if response.StatusCode == 203 && + response.Request.Header.Get(HeaderOssCallback) != "" { + return tryConvertServiceError(response) + } + return nil +} + +func tryConvertServiceError(response *http.Response) (err error) { + var respBody []byte + var body []byte + timestamp, err := time.Parse(http.TimeFormat, response.Header.Get("Date")) + if err != nil { + timestamp = time.Now() + } + + defer response.Body.Close() + respBody, err = io.ReadAll(response.Body) + body = respBody + if len(respBody) == 0 && len(response.Header.Get(HeaderOssERR)) > 0 { + body, err = base64.StdEncoding.DecodeString(response.Header.Get(HeaderOssERR)) + if err != nil { + body = respBody + } + } + se := &ServiceError{ + StatusCode: response.StatusCode, + Code: "BadErrorResponse", + RequestID: response.Header.Get(HeaderOssRequestID), + Timestamp: timestamp, + RequestTarget: fmt.Sprintf("%s %s", response.Request.Method, response.Request.URL), + Snapshot: body, + Headers: response.Header, + } + + if err != nil { + se.Message = fmt.Sprintf("The body of the response was not readable, due to :%s", err.Error()) + return se + } + err = xml.Unmarshal(body, &se) + if err != nil { + len := len(body) + if len > 256 { + len = 256 + } + se.Message = fmt.Sprintf("Failed to parse xml from response body due to: %s. With part response body %s.", err.Error(), string(body[:len])) + return se + } + return se +} + +func nonStreamResponseHandler(response *http.Response) error { + body := response.Body + if body == nil { + return nil + } + + defer body.Close() + val, err := io.ReadAll(body) + + if err == nil { + response.Body = io.NopCloser(bytes.NewReader(val)) + } + + return err +} + +func checkResponseHeaderCRC64(ccrc string, header http.Header) (err error) { + if scrc := header.Get(HeaderOssCRC64); scrc != "" { + if scrc != ccrc { + return fmt.Errorf("crc is inconsistent, client %s, server %s", ccrc, scrc) + } + } + return nil +} + +func applyOperationOpt(c *Options, op *Options) { + if c == nil || op == nil { + return + } + + if op.Endpoint != nil { + c.Endpoint = op.Endpoint + } + + if ToInt(op.RetryMaxAttempts) > 0 { + c.RetryMaxAttempts = op.RetryMaxAttempts + } + + if op.Retryer != nil { + c.Retryer = op.Retryer + } + + if c.Retryer == nil { + c.Retryer = retry.NopRetryer{} + } + + if op.OpReadWriteTimeout != nil { + c.OpReadWriteTimeout = op.OpReadWriteTimeout + } + + if op.HttpClient != nil { + c.HttpClient = op.HttpClient + } + + if op.AuthMethod != nil { + c.AuthMethod = op.AuthMethod + } + + //response handler + handlers := []func(*http.Response) error{ + serviceErrorResponseHandler, + } + handlers = append(handlers, c.ResponseHandlers...) + handlers = append(handlers, op.ResponseHandlers...) + c.ResponseHandlers = handlers +} + +func applyOperationContext(ctx context.Context, c *Options) context.Context { + if ctx == nil || c.OpReadWriteTimeout == nil { + return ctx + } + return context.WithValue(ctx, "OpReadWriteTimeout", c.OpReadWriteTimeout) +} + +func applyOperationMetadata(input *OperationInput, c *Options) { + for _, h := range input.OpMetadata.Values(OpMetaKeyResponsHandler) { + if hh, ok := h.(func(*http.Response) error); ok { + c.ResponseHandlers = append(c.ResponseHandlers, hh) + } + } +} + +// fieldInfo holds details for the input/output of a single field. +type fieldInfo struct { + idx int + flags int +} + +const ( + fRequire int = 1 << iota + + fTypeUsermeta + fTypeXml + fTypeTime +) + +func parseFiledFlags(tokens []string) int { + var flags int = 0 + for _, token := range tokens { + switch token { + case "required": + flags |= fRequire + case "time": + flags |= fTypeTime + case "xml": + flags |= fTypeXml + case "usermeta": + flags |= fTypeUsermeta + } + } + return flags +} + +func validateInput(input *OperationInput) error { + if input == nil { + return NewErrParamNull("OperationInput") + } + + if input.Bucket != nil && !isValidBucketName(input.Bucket) { + return NewErrParamInvalid("OperationInput.Bucket") + } + + if input.Key != nil && !isValidObjectName(input.Key) { + return NewErrParamInvalid("OperationInput.Key") + } + + if !isValidMethod(input.Method) { + return NewErrParamInvalid("OperationInput.Method") + } + + return nil +} + +func (c *Client) marshalInput(request any, input *OperationInput, handlers ...func(any, *OperationInput) error) error { + // merge common fields + if cm, ok := request.(RequestCommonInterface); ok { + h, p, b := cm.GetCommonFileds() + // headers + if len(h) > 0 { + if input.Headers == nil { + input.Headers = map[string]string{} + } + for k, v := range h { + input.Headers[k] = v + } + } + + // parameters + if len(p) > 0 { + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + for k, v := range p { + input.Parameters[k] = v + } + } + + // body + input.Body = b + } + + val := reflect.ValueOf(request) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || input == nil { + return nil + } + + t := val.Type() + for k := 0; k < t.NumField(); k++ { + if tag, ok := t.Field(k).Tag.Lookup("input"); ok { + // header|query|body,filed_name,[required,time,usermeta...] + v := val.Field(k) + var flags int = 0 + tokens := strings.Split(tag, ",") + if len(tokens) < 2 { + continue + } + + // parse field flags + if len(tokens) > 2 { + flags = parseFiledFlags(tokens[2:]) + } + // check required flag + if isEmptyValue(v) { + if flags&fRequire != 0 { + return NewErrParamRequired(t.Field(k).Name) + } + continue + } + + switch tokens[0] { + case "query": + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + if v.Kind() == reflect.Pointer { + v = v.Elem() + } + input.Parameters[tokens[1]] = fmt.Sprintf("%v", v.Interface()) + case "header": + if input.Headers == nil { + input.Headers = map[string]string{} + } + if v.Kind() == reflect.Pointer { + v = v.Elem() + } + if flags&fTypeUsermeta != 0 { + if m, ok := v.Interface().(map[string]string); ok { + for k, v := range m { + input.Headers[tokens[1]+k] = v + } + } + } else { + input.Headers[tokens[1]] = fmt.Sprintf("%v", v.Interface()) + } + case "body": + if flags&fTypeXml != 0 { + var b bytes.Buffer + if err := xml.NewEncoder(&b).EncodeElement( + v.Interface(), + xml.StartElement{Name: xml.Name{Local: tokens[1]}}); err != nil { + return &SerializationError{ + Err: err, + } + } + input.Body = bytes.NewReader(b.Bytes()) + } else { + if r, ok := v.Interface().(io.Reader); ok { + input.Body = r + } else { + return NewErrParamTypeNotSupport(t.Field(k).Name) + } + } + } + } + } + + if err := validateInput(input); err != nil { + return err + } + + for _, h := range handlers { + if err := h(request, input); err != nil { + return err + } + } + + return nil +} + +func marshalDeleteObjects(request any, input *OperationInput) error { + var builder strings.Builder + delRequest := request.(*DeleteMultipleObjectsRequest) + builder.WriteString("") + builder.WriteString("") + builder.WriteString(strconv.FormatBool(delRequest.Quiet)) + builder.WriteString("") + if len(delRequest.Objects) > 0 { + for _, object := range delRequest.Objects { + builder.WriteString("") + if object.Key != nil { + builder.WriteString("") + builder.WriteString(escapeXml(*object.Key)) + builder.WriteString("") + } + if object.VersionId != nil { + builder.WriteString("") + builder.WriteString(*object.VersionId) + builder.WriteString("") + } + builder.WriteString("") + } + } else { + return NewErrParamInvalid("Objects") + } + builder.WriteString("") + input.Body = strings.NewReader(builder.String()) + return nil +} + +func discardBody(result any, output *OperationOutput) error { + var err error + if output.Body != nil { + defer output.Body.Close() + _, err = io.Copy(io.Discard, output.Body) + } + return err +} + +func unmarshalBodyXml(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + if len(body) > 0 { + if err = xml.Unmarshal(body, result); err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + } + return err +} + +func unmarshalBodyXmlMix(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + if len(body) == 0 { + return nil + } + + val := reflect.ValueOf(result) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || output == nil { + return nil + } + + t := val.Type() + idx := -1 + for k := 0; k < t.NumField(); k++ { + if tag, ok := t.Field(k).Tag.Lookup("output"); ok { + tokens := strings.Split(tag, ",") + if len(tokens) < 2 { + continue + } + // header|query|body,filed_name,[required,time,usermeta...] + switch tokens[0] { + case "body": + idx = k + break + } + } + } + + if idx >= 0 { + dst := val.Field(idx) + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + err = xml.Unmarshal(body, dst.Interface()) + } else { + err = xml.Unmarshal(body, result) + } + + if err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + + return err +} + +func unmarshalBodyXmlVersions(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + if len(body) > 0 { + oldStrings := []string{"", "", "", ""} + newStrings := []string{"", "", "", ""} + + replacedData := string(body) + for i := range oldStrings { + replacedData = strings.Replace(replacedData, oldStrings[i], newStrings[i], -1) + } + if err = xml.Unmarshal([]byte(replacedData), result); err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + } + return err +} + +func unmarshalBodyDefault(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + // extract body + if len(body) > 0 { + contentType := output.Headers.Get("Content-Type") + switch contentType { + case "application/xml": + err = xml.Unmarshal(body, result) + case "application/json": + err = json.Unmarshal(body, result) + case "application/json;charset=utf-8": + err = json.Unmarshal(body, result) + default: + err = fmt.Errorf("unsupport contentType:%s", contentType) + } + + if err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + } + return err +} + +func unmarshalCallbackBody(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + if len(body) > 0 { + switch r := result.(type) { + case *PutObjectResult: + if err = json.Unmarshal(body, &r.CallbackResult); err != nil { + return err + } + case *CompleteMultipartUploadResult: + if err = json.Unmarshal(body, &r.CallbackResult); err != nil { + return err + } + } + } + return err +} + +func unmarshalHeader(result any, output *OperationOutput) error { + val := reflect.ValueOf(result) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || output == nil { + return nil + } + + filedInfos := map[string]fieldInfo{} + + t := val.Type() + var usermetaKeys []string + for k := 0; k < t.NumField(); k++ { + if tag, ok := t.Field(k).Tag.Lookup("output"); ok { + tokens := strings.Split(tag, ",") + if len(tokens) < 2 { + continue + } + // header|query|body,filed_name,[required,time,usermeta...] + switch tokens[0] { + case "header": + lowkey := strings.ToLower(tokens[1]) + + var flags int = 0 + if len(tokens) >= 3 { + flags = parseFiledFlags(tokens[2:]) + } + filedInfos[lowkey] = fieldInfo{idx: k, flags: flags} + if flags&fTypeUsermeta != 0 { + usermetaKeys = append(usermetaKeys, lowkey) + } + } + } + } + var err error + for key, vv := range output.Headers { + lkey := strings.ToLower(key) + for _, prefix := range usermetaKeys { + if strings.HasPrefix(lkey, prefix) { + if field, ok := filedInfos[prefix]; ok { + if field.flags&fTypeUsermeta != 0 { + mapKey := strings.TrimPrefix(lkey, prefix) + err = setMapStringReflectValue(val.Field(field.idx), mapKey, vv[0]) + } + } + } + } + if field, ok := filedInfos[lkey]; ok { + if field.flags&fTypeTime != 0 { + if t, err := http.ParseTime(vv[0]); err == nil { + err = setTimeReflectValue(val.Field(field.idx), t) + } + } else { + err = setReflectValue(val.Field(field.idx), vv[0]) + } + if err != nil { + return err + } + } + } + + return nil +} + +func unmarshalHeaderLite(result any, output *OperationOutput) error { + val := reflect.ValueOf(result) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || output == nil { + return nil + } + + t := val.Type() + for k := 0; k < t.NumField(); k++ { + if tag := t.Field(k).Tag.Get("output"); tag != "" { + tokens := strings.Split(tag, ",") + if len(tokens) != 2 { + continue + } + switch tokens[0] { + case "header": + if src := output.Headers.Get(tokens[1]); src != "" { + if err := setReflectValue(val.Field(k), src); err != nil { + return err + } + } + } + } + } + return nil +} + +func (c *Client) unmarshalOutput(result any, output *OperationOutput, handlers ...func(any, *OperationOutput) error) error { + // Common + if cm, ok := result.(ResultCommonInterface); ok { + cm.CopyIn(output.Status, output.StatusCode, output.Headers, output.OpMetadata) + } + + var err error + for _, h := range handlers { + if err = h(result, output); err != nil { + break + } + } + return err +} + +func updateContentMd5(_ any, input *OperationInput) error { + var err error + var contentMd5 string + if input.Body != nil { + var r io.ReadSeeker + var ok bool + if r, ok = input.Body.(io.ReadSeeker); !ok { + buf, _ := io.ReadAll(input.Body) + r = bytes.NewReader(buf) + input.Body = r + } + h := md5.New() + if _, err = copySeekableBody(h, r); err != nil { + // error + } else { + contentMd5 = base64.StdEncoding.EncodeToString(h.Sum(nil)) + } + } else { + contentMd5 = "1B2M2Y8AsgTpgAmY7PhCfg==" + } + + // set content-md5 and content-type + if err == nil { + if input.Headers == nil { + input.Headers = map[string]string{} + } + input.Headers["Content-MD5"] = contentMd5 + } + + return err +} + +func updateContentType(_ any, input *OperationInput) error { + if input.Headers == nil { + input.Headers = map[string]string{} + } + if _, ok := input.Headers[HTTPHeaderContentType]; !ok { + value := TypeByExtension(ToString(input.Key)) + if value == "" { + value = contentTypeDefault + } + input.Headers[HTTPHeaderContentType] = value + } + return nil +} + +func addProgress(request any, input *OperationInput) error { + var w io.Writer + switch req := request.(type) { + case *PutObjectRequest: + if req.ProgressFn == nil { + return nil + } + w = NewProgress(req.ProgressFn, GetReaderLen(input.Body)) + case *AppendObjectRequest: + if req.ProgressFn == nil { + return nil + } + w = NewProgress(req.ProgressFn, GetReaderLen(input.Body)) + case *UploadPartRequest: + if req.ProgressFn == nil { + return nil + } + w = NewProgress(req.ProgressFn, GetReaderLen(input.Body)) + default: + return nil + } + input.OpMetadata.Add(OpMetaKeyRequestBodyTracker, w) + return nil +} + +func addProcess(request any, input *OperationInput) error { + switch req := request.(type) { + case *ProcessObjectRequest: + if req.Process == nil { + return nil + } + processData := fmt.Sprintf("%v=%v", "x-oss-process", ToString(req.Process)) + input.Body = strings.NewReader(processData) + case *AsyncProcessObjectRequest: + if req.AsyncProcess == nil { + return nil + } + processData := fmt.Sprintf("%v=%v", "x-oss-async-process", ToString(req.AsyncProcess)) + input.Body = strings.NewReader(processData) + default: + return nil + } + + return nil +} + +func addCrcCheck(_ any, input *OperationInput) error { + var w io.Writer = NewCRC64(0) + input.OpMetadata.Add(OpMetaKeyRequestBodyTracker, w) + input.OpMetadata.Add(OpMetaKeyResponsHandler, func(response *http.Response) error { + return checkResponseHeaderCRC64(fmt.Sprint(w.(hash.Hash64).Sum64()), response.Header) + }) + return nil +} + +func addCallback(_ any, input *OperationInput) error { + input.OpMetadata.Add(OpMetaKeyResponsHandler, callbackErrorResponseHandler) + return nil +} + +func enableNonStream(_ any, input *OperationInput) error { + input.OpMetadata.Add(OpMetaKeyResponsHandler, func(response *http.Response) error { + return nonStreamResponseHandler(response) + }) + return nil +} + +func (c *Client) updateContentType(request any, input *OperationInput) error { + if !c.hasFeature(FeatureAutoDetectMimeType) { + return nil + } + return updateContentType(request, input) +} + +func (c *Client) addCrcCheck(request any, input *OperationInput) error { + if !c.hasFeature(FeatureEnableCRC64CheckUpload) { + return nil + } + return addCrcCheck(request, input) +} + +func encodeSourceObject(request any) string { + var bucket, key, versionId string + switch req := request.(type) { + case *CopyObjectRequest: + key = ToString(req.SourceKey) + if req.SourceBucket != nil { + bucket = *req.SourceBucket + } else { + bucket = ToString(req.Bucket) + } + versionId = ToString(req.SourceVersionId) + case *UploadPartCopyRequest: + key = ToString(req.SourceKey) + if req.SourceBucket != nil { + bucket = *req.SourceBucket + } else { + bucket = ToString(req.Bucket) + } + versionId = ToString(req.SourceVersionId) + } + + source := fmt.Sprintf("/%s/%s", bucket, escapePath(key, false)) + if versionId != "" { + source += "?versionId=" + versionId + } + + return source +} + +func (c *Client) toClientError(err error, code string, output *OperationOutput) error { + if err == nil { + return nil + } + + return &ClientError{ + Code: code, + Message: fmt.Sprintf("execute %s fail, error code is %s, request id:%s", + output.Input.OpName, + code, + output.Headers.Get(HeaderOssRequestID), + ), + Err: err} +} + +func (c *Client) hasFeature(flag FeatureFlagsType) bool { + return (c.options.FeatureFlags & flag) > 0 +} + +func (c *Client) retryMaxAttempts(opts *Options) int { + if opts == nil { + opts = &c.options + } + + if opts.RetryMaxAttempts != nil { + return ToInt(opts.RetryMaxAttempts) + } + + if opts.Retryer != nil { + return opts.Retryer.MaxAttempts() + } + + return retry.DefaultMaxAttempts +} + +func (c *Client) dumpOperationOutput(output *OperationOutput) string { + if output == nil { + return "" + } + return fmt.Sprintf("http.Request[%p] Status:%v, StatusCode%v, RequestId:%v", + output.httpRequest, output.Status, output.StatusCode, + output.Headers.Get(HeaderOssRequestID), + ) +} + +// LoggerHTTPReq Print the header information of the http request +func (c *Client) logHttpPRequet(request *http.Request) { + if c.getLogLevel() < LogDebug { + return + } + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("http.request[%p]", request)) + if request != nil { + logBuffer.WriteString(fmt.Sprintf("Method:%s\t", request.Method)) + logBuffer.WriteString(fmt.Sprintf("Host:%s\t", request.URL.Host)) + logBuffer.WriteString(fmt.Sprintf("Path:%s\t", request.URL.Path)) + logBuffer.WriteString(fmt.Sprintf("Query:%s\t", request.URL.RawQuery)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + + for k, v := range request.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + } + + c.inner.Log.Debugf("%s", logBuffer.String()) +} + +// LoggerHTTPResp Print Response to http request +func (c *Client) logHttpResponse(request *http.Request, response *http.Response) { + if c.getLogLevel() < LogDebug { + return + } + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("http.request[%p]|http.response[%p]", request, response)) + if response != nil { + logBuffer.WriteString(fmt.Sprintf("StatusCode:%d\t", response.StatusCode)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + for k, v := range response.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + } + c.inner.Log.Debugf("%s", logBuffer.String()) +} + +func (c *Client) getLogLevel() int { + if c.inner.Log != nil { + return c.inner.Log.Level() + } + return LogOff +} + +// Content-Type +const ( + contentTypeDefault string = "application/octet-stream" + contentTypeXML = "application/xml" +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go new file mode 100644 index 000000000..b7531d088 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go @@ -0,0 +1,164 @@ +package oss + +import ( + "context" + "errors" + "fmt" + "hash" + "io" + "os" +) + +// NewDownloader creates a new Downloader instance to download objects. +func (c *Client) NewDownloader(optFns ...func(*DownloaderOptions)) *Downloader { + return NewDownloader(c, optFns...) +} + +// NewUploader creates a new Uploader instance to upload objects. +func (c *Client) NewUploader(optFns ...func(*UploaderOptions)) *Uploader { + return NewUploader(c, optFns...) +} + +// NewCopier creates a new Copier instance to copy objects. +func (c *Client) NewCopier(optFns ...func(*CopierOptions)) *Copier { + return NewCopier(c, optFns...) +} + +// OpenFile opens the named file for reading. +func (c *Client) OpenFile(ctx context.Context, bucket string, key string, optFns ...func(*OpenOptions)) (*ReadOnlyFile, error) { + return NewReadOnlyFile(ctx, c, bucket, key, optFns...) +} + +// AppendFile opens or creates the named file for appending. +func (c *Client) AppendFile(ctx context.Context, bucket string, key string, optFns ...func(*AppendOptions)) (*AppendOnlyFile, error) { + return NewAppendFile(ctx, c, bucket, key, optFns...) +} + +type IsObjectExistOptions struct { + VersionId *string + RequestPayer *string +} + +// IsObjectExist checks if the object exists. +func (c *Client) IsObjectExist(ctx context.Context, bucket string, key string, optFns ...func(*IsObjectExistOptions)) (bool, error) { + options := IsObjectExistOptions{} + for _, fn := range optFns { + fn(&options) + } + _, err := c.GetObjectMeta(ctx, &GetObjectMetaRequest{Bucket: Ptr(bucket), Key: Ptr(key), VersionId: options.VersionId, RequestPayer: options.RequestPayer}) + if err == nil { + return true, nil + } + var serr *ServiceError + errors.As(err, &serr) + if errors.As(err, &serr) { + if serr.Code == "NoSuchKey" || + // error code not in response header + (serr.StatusCode == 404 && serr.Code == "BadErrorResponse") { + return false, nil + } + } + return false, err +} + +// IsBucketExist checks if the bucket exists. +func (c *Client) IsBucketExist(ctx context.Context, bucket string, optFns ...func(*Options)) (bool, error) { + _, err := c.GetBucketAcl(ctx, &GetBucketAclRequest{Bucket: Ptr(bucket)}, optFns...) + if err == nil { + return true, nil + } + var serr *ServiceError + if errors.As(err, &serr) { + if serr.Code == "NoSuchBucket" { + return false, nil + } + return true, nil + } + return false, err +} + +// PutObjectFromFile creates a new object from the local file. +func (c *Client) PutObjectFromFile(ctx context.Context, request *PutObjectRequest, filePath string, optFns ...func(*Options)) (*PutObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer file.Close() + pRequest := *request + pRequest.Body = file + return c.PutObject(ctx, &pRequest, optFns...) +} + +// GetObjectToFile downloads the object into a local file. +func (c *Client) GetObjectToFile(ctx context.Context, request *GetObjectRequest, filePath string, optFns ...func(*Options)) (*GetObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + var ( + hash hash.Hash64 + prog *progressTracker + result *GetObjectResult + err error + retry bool + ) + if request.ProgressFn != nil { + prog = &progressTracker{ + pr: request.ProgressFn, + } + } + if c.hasFeature(FeatureEnableCRC64CheckDownload) { + hash = NewCRC64(0) + } + i := 0 + maxRetrys := c.retryMaxAttempts(nil) + for { + i++ + result, retry, err = c.getObjectToFileNoRerty(ctx, request, filePath, hash, prog, optFns...) + if err == nil || !retry { + break + } + if i > maxRetrys { + break + } + } + return result, err +} + +func (c *Client) getObjectToFileNoRerty(ctx context.Context, request *GetObjectRequest, filePath string, + hash hash.Hash64, prog *progressTracker, optFns ...func(*Options)) (*GetObjectResult, bool, error) { + result, err := c.GetObject(ctx, request, optFns...) + if err != nil { + return nil, false, err + } + defer result.Body.Close() + + file, err := os.Create(filePath) + if err != nil { + return nil, false, err + } + defer file.Close() + + var writers []io.Writer + if hash != nil { + hash.Reset() + writers = append(writers, hash) + } + if prog != nil { + prog.total = result.ContentLength + prog.Reset() + writers = append(writers, prog) + } + var r io.Reader = result.Body + if len(writers) > 0 { + r = io.TeeReader(result.Body, io.MultiWriter(writers...)) + } + _, err = io.Copy(file, r) + + if err == nil && hash != nil { + err = checkResponseHeaderCRC64(fmt.Sprint(hash.Sum64()), result.Headers) + } + return result, true, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go new file mode 100644 index 000000000..109f6499d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go @@ -0,0 +1,407 @@ +package oss + +import ( + "context" + "fmt" +) + +type PaginatorOptions struct { + // The maximum number of items in the response. + Limit int32 +} + +// ListObjectsPaginator is a paginator for ListObjects +type ListObjectsPaginator struct { + options PaginatorOptions + client *Client + request *ListObjectsRequest + marker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListObjectsPaginator(request *ListObjectsRequest, optFns ...func(*PaginatorOptions)) *ListObjectsPaginator { + if request == nil { + request = &ListObjectsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsPaginator{ + options: options, + client: c, + request: request, + marker: request.Marker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListObjectsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjects page. +func (p *ListObjectsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.Marker = p.marker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + request.EncodingType = Ptr("url") + + result, err := p.client.ListObjects(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.marker = result.NextMarker + + return result, nil +} + +// ListObjectsV2Paginator is a paginator for ListObjectsV2 +type ListObjectsV2Paginator struct { + options PaginatorOptions + client *Client + request *ListObjectsV2Request + continueToken *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListObjectsV2Paginator(request *ListObjectsV2Request, optFns ...func(*PaginatorOptions)) *ListObjectsV2Paginator { + if request == nil { + request = &ListObjectsV2Request{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsV2Paginator{ + options: options, + client: c, + request: request, + continueToken: request.ContinuationToken, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListObjectsV2Paginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjectsV2 page. +func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Result, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.ContinuationToken = p.continueToken + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + request.EncodingType = Ptr("url") + + result, err := p.client.ListObjectsV2(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.continueToken = result.NextContinuationToken + + return result, nil +} + +// ListObjectVersionsPaginator is a paginator for ListObjectVersions +type ListObjectVersionsPaginator struct { + options PaginatorOptions + client *Client + request *ListObjectVersionsRequest + keyMarker *string + versionIdMarker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListObjectVersionsPaginator(request *ListObjectVersionsRequest, optFns ...func(*PaginatorOptions)) *ListObjectVersionsPaginator { + if request == nil { + request = &ListObjectVersionsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectVersionsPaginator{ + options: options, + client: c, + request: request, + keyMarker: request.KeyMarker, + versionIdMarker: request.VersionIdMarker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListObjectVersionsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjectVersions page. +func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.KeyMarker = p.keyMarker + request.VersionIdMarker = p.versionIdMarker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + request.EncodingType = Ptr("url") + + result, err := p.client.ListObjectVersions(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.keyMarker = result.NextKeyMarker + p.versionIdMarker = result.NextVersionIdMarker + + return result, nil +} + +// ListBucketsPaginator is a paginator for ListBuckets +type ListBucketsPaginator struct { + options PaginatorOptions + client *Client + request *ListBucketsRequest + marker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListBucketsPaginator(request *ListBucketsRequest, optFns ...func(*PaginatorOptions)) *ListBucketsPaginator { + if request == nil { + request = &ListBucketsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListBucketsPaginator{ + options: options, + client: c, + request: request, + marker: request.Marker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListBucketsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListBuckets page. +func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.Marker = p.marker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + + result, err := p.client.ListBuckets(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.marker = result.NextMarker + + return result, nil +} + +type ListPartsAPIClient interface { + ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) +} + +// ListPartsPaginator is a paginator for ListParts +type ListPartsPaginator struct { + options PaginatorOptions + client ListPartsAPIClient + request *ListPartsRequest + marker int32 + firstPage bool + isTruncated bool +} + +func NewListPartsPaginator(c ListPartsAPIClient, request *ListPartsRequest, optFns ...func(*PaginatorOptions)) *ListPartsPaginator { + if request == nil { + request = &ListPartsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxParts + + for _, fn := range optFns { + fn(&options) + } + + return &ListPartsPaginator{ + options: options, + client: c, + request: request, + marker: request.PartNumberMarker, + firstPage: true, + isTruncated: false, + } +} + +func (c *Client) NewListPartsPaginator(request *ListPartsRequest, optFns ...func(*PaginatorOptions)) *ListPartsPaginator { + return NewListPartsPaginator(c, request, optFns...) +} + +// HasNext Returns true if there’s a next page. +func (p *ListPartsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListParts page. +func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.PartNumberMarker = p.marker + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxParts = limit + request.EncodingType = Ptr("url") + result, err := p.client.ListParts(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.marker = result.NextPartNumberMarker + + return result, nil +} + +// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads +type ListMultipartUploadsPaginator struct { + options PaginatorOptions + client *Client + request *ListMultipartUploadsRequest + keyMarker *string + uploadIdMarker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListMultipartUploadsPaginator(request *ListMultipartUploadsRequest, optFns ...func(*PaginatorOptions)) *ListMultipartUploadsPaginator { + if request == nil { + request = &ListMultipartUploadsRequest{} + } + options := PaginatorOptions{} + options.Limit = request.MaxUploads + for _, fn := range optFns { + fn(&options) + } + return &ListMultipartUploadsPaginator{ + options: options, + client: c, + request: request, + keyMarker: request.KeyMarker, + uploadIdMarker: request.UploadIdMarker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListMultipartUploadsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListMultipartUploads page. +func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + request := *p.request + request.KeyMarker = p.keyMarker + request.UploadIdMarker = p.uploadIdMarker + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxUploads = limit + request.EncodingType = Ptr("url") + result, err := p.client.ListMultipartUploads(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.keyMarker = result.NextKeyMarker + p.uploadIdMarker = result.NextUploadIdMarker + return result, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go new file mode 100644 index 000000000..c5015955a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go @@ -0,0 +1,164 @@ +package oss + +import ( + "context" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PresignOptions struct { + // Expires sets the expiration duration for the generated presign url. + Expires time.Duration + + // Expiration sets the expiration time for the generated presign url. + Expiration time.Time +} + +type PresignResult struct { + Method string + URL string + Expiration time.Time + SignedHeaders map[string]string +} + +type nopHttpClient struct { +} + +func (c *nopHttpClient) Do(*http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} + +var ( + defaultNopHttpClient = &nopHttpClient{} + defaultPresignOptions = []func(*Options){ + func(o *Options) { + o.HttpClient = defaultNopHttpClient + o.AuthMethod = Ptr(AuthMethodQuery) + }, + } +) + +func (c *Client) Presign(ctx context.Context, request any, optFns ...func(*PresignOptions)) (*PresignResult, error) { + options := PresignOptions{} + + if request == nil { + return nil, NewErrParamNull("request") + } + + for _, fn := range optFns { + fn(&options) + } + + input := OperationInput{} + if err := c.marshalPresignInput(request, &input); err != nil { + return nil, err + } + + // expiration + if !options.Expiration.IsZero() { + input.OpMetadata.Set(signer.SignTime, options.Expiration) + } else if options.Expires > 0 { + input.OpMetadata.Set(signer.SignTime, time.Now().Add(options.Expires)) + } + output, err := c.invokeOperation(ctx, &input, defaultPresignOptions) + if err != nil { + return nil, err + } + + result := &PresignResult{} + err = c.unmarshalPresignOutput(result, output) + return result, err +} + +func PresignExpires(value time.Duration) func(*PresignOptions) { + return func(o *PresignOptions) { + o.Expires = value + } +} + +func PresignExpiration(value time.Time) func(*PresignOptions) { + return func(o *PresignOptions) { + o.Expiration = value + } +} + +func (c *Client) marshalPresignInput(request any, input *OperationInput) error { + switch t := request.(type) { + case *GetObjectRequest: + input.OpName = "GetObject" + input.Method = "GET" + input.Bucket = t.Bucket + input.Key = t.Key + case *PutObjectRequest: + input.OpName = "PutObject" + input.Method = "PUT" + input.Bucket = t.Bucket + input.Key = t.Key + case *HeadObjectRequest: + input.OpName = "HeadObject" + input.Method = "HEAD" + input.Bucket = t.Bucket + input.Key = t.Key + case *InitiateMultipartUploadRequest: + input.OpName = "InitiateMultipartUpload" + input.Method = "POST" + input.Bucket = t.Bucket + input.Key = t.Key + input.Parameters = map[string]string{ + "uploads": "", + } + case *UploadPartRequest: + input.OpName = "UploadPart" + input.Method = "PUT" + input.Bucket = t.Bucket + input.Key = t.Key + case *CompleteMultipartUploadRequest: + input.OpName = "CompleteMultipartUpload" + input.Method = "POST" + input.Bucket = t.Bucket + input.Key = t.Key + case *AbortMultipartUploadRequest: + input.OpName = "AbortMultipartUpload" + input.Method = "DELETE" + input.Bucket = t.Bucket + input.Key = t.Key + default: + return NewErrParamInvalid(fmt.Sprintf("request %v", reflect.ValueOf(request).Type().String())) + } + + return c.marshalInput(request, input) +} + +func (c *Client) unmarshalPresignOutput(result *PresignResult, output *OperationOutput) error { + if chk, ok := c.options.Signer.(interface{ IsSignedHeader([]string, string) bool }); ok { + header := map[string]string{} + for k, v := range output.httpRequest.Header { + if chk.IsSignedHeader(c.options.AdditionalHeaders, k) { + header[k] = v[0] + } + } + if len(header) > 0 { + result.SignedHeaders = header + } + } + result.Method = output.httpRequest.Method + result.URL = output.httpRequest.URL.String() + if signTime, ok := output.OpMetadata.Get(signer.SignTime).(time.Time); ok { + result.Expiration = signTime + } + _, ok := c.options.Signer.(*signer.SignerV4) + if ok { + if !result.Expiration.IsZero() && (result.Expiration.After(time.Now().Add(7 * 24 * time.Hour))) { + return fmt.Errorf("expires should be not greater than 604800(seven days)") + } + } + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go new file mode 100644 index 000000000..9b86a46e1 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go @@ -0,0 +1,286 @@ +package oss + +import ( + "net/http" + "os" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Config struct { + // The region in which the bucket is located. + Region *string + + // The domain names that other services can use to access OSS. + Endpoint *string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. + RetryMaxAttempts *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + Retryer retry.Retryer + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HttpClient HTTPClient + + // The credentials provider to use when signing requests. + CredentialsProvider credentials.CredentialsProvider + + // Allows you to enable the client to use path-style addressing, i.e., https://oss-cn-hangzhou.aliyuncs.com/bucket/key. + // By default, the oss client will use virtual hosted addressing i.e., https://bucket.oss-cn-hangzhou.aliyuncs.com/key. + UsePathStyle *bool + + // If the endpoint is s CName, set this flag to true + UseCName *bool + + // Connect timeout + ConnectTimeout *time.Duration + + // read & write timeout + ReadWriteTimeout *time.Duration + + // Skip server certificate verification + InsecureSkipVerify *bool + + // Enable http redirect or not. Default is disable + EnabledRedirect *bool + + // Flag of using proxy host. + ProxyHost *string + + // Read the proxy setting from the environment variables. + // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof). + // HTTPS_PROXY takes precedence over HTTP_PROXY for https requests. + ProxyFromEnvironment *bool + + // Upload bandwidth limit in kBytes/s for all request + UploadBandwidthlimit *int64 + + // Download bandwidth limit in kBytes/s for all request + DownloadBandwidthlimit *int64 + + // Authentication with OSS Signature Version + SignatureVersion *SignatureVersionType + + // The level of the output log + LogLevel *int + + // A interface for the SDK to log messages to. + LogPrinter LogPrinter + + // DisableSSL forces the endpoint to be resolved as HTTP. + DisableSSL *bool + + // Dual-stack endpoints are provided in some regions. + // This allows an IPv4 client and an IPv6 client to access a bucket by using the same endpoint. + // Set this to `true` to use a dual-stack endpoint for the requests. + UseDualStackEndpoint *bool + + // OSS provides the transfer acceleration feature to accelerate date transfers of data + // uploads and downloads across countries and regions. + // Set this to `true` to use a accelerate endpoint for the requests. + UseAccelerateEndpoint *bool + + // You can use an internal endpoint to communicate between Alibaba Cloud services located within the same + // region over the internal network. You are not charged for the traffic generated over the internal network. + // Set this to `true` to use a accelerate endpoint for the requests. + UseInternalEndpoint *bool + + // Check data integrity of uploads via the crc64 by default. + // This feature takes effect for PutObject, AppendObject, UploadPart, Uploader.UploadFrom and Uploader.UploadFile + // Set this to `true` to disable this feature. + DisableUploadCRC64Check *bool + + // Check data integrity of download via the crc64 by default. + // This feature only takes effect for Downloader.DownloadFile, GetObjectToFile + // Set this to `true` to disable this feature. + DisableDownloadCRC64Check *bool + + // Additional signable headers. + AdditionalHeaders []string + + // The optional user specific identifier appended to the User-Agent header. + UserAgent *string + + // The cloud box id + CloudBoxId *string + + // The cloud box id is automatically extracted from endpoint. + EnableAutoDetectCloudBoxId *bool +} + +func NewConfig() *Config { + return &Config{} +} + +func (c Config) Copy() Config { + cp := c + return cp +} + +func LoadDefaultConfig() *Config { + config := &Config{} + + // load from env + str := os.Getenv("OSS_SDK_LOG_LEVEL") + if str != "" { + if level := ToLogLevel(str); level > LogOff { + config.LogLevel = Ptr(level) + } + } + + return config +} + +func (c *Config) WithRegion(region string) *Config { + c.Region = Ptr(region) + return c +} + +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = Ptr(endpoint) + return c +} + +func (c *Config) WithRetryMaxAttempts(value int) *Config { + c.RetryMaxAttempts = Ptr(value) + return c +} + +func (c *Config) WithRetryer(retryer retry.Retryer) *Config { + c.Retryer = retryer + return c +} + +func (c *Config) WithHttpClient(client *http.Client) *Config { + c.HttpClient = client + return c +} + +func (c *Config) WithCredentialsProvider(provider credentials.CredentialsProvider) *Config { + c.CredentialsProvider = provider + return c +} + +func (c *Config) WithUsePathStyle(enable bool) *Config { + c.UsePathStyle = Ptr(enable) + return c +} + +func (c *Config) WithUseCName(enable bool) *Config { + c.UseCName = Ptr(enable) + return c +} + +func (c *Config) WithConnectTimeout(value time.Duration) *Config { + c.ConnectTimeout = Ptr(value) + return c +} + +func (c *Config) WithReadWriteTimeout(value time.Duration) *Config { + c.ReadWriteTimeout = Ptr(value) + return c +} + +func (c *Config) WithInsecureSkipVerify(value bool) *Config { + c.InsecureSkipVerify = Ptr(value) + return c +} + +func (c *Config) WithEnabledRedirect(value bool) *Config { + c.EnabledRedirect = Ptr(value) + return c +} + +func (c *Config) WithProxyHost(value string) *Config { + c.ProxyHost = Ptr(value) + return c +} + +func (c *Config) WithProxyFromEnvironment(value bool) *Config { + c.ProxyFromEnvironment = Ptr(value) + return c +} + +func (c *Config) WithUploadBandwidthlimit(value int64) *Config { + c.UploadBandwidthlimit = Ptr(value) + return c +} + +func (c *Config) WithDownloadBandwidthlimit(value int64) *Config { + c.DownloadBandwidthlimit = Ptr(value) + return c +} + +func (c *Config) WithSignatureVersion(value SignatureVersionType) *Config { + c.SignatureVersion = Ptr(value) + return c +} + +func (c *Config) WithLogLevel(level int) *Config { + c.LogLevel = Ptr(level) + return c +} + +func (c *Config) WithLogPrinter(printer LogPrinter) *Config { + c.LogPrinter = printer + return c +} + +func (c *Config) WithDisableSSL(value bool) *Config { + c.DisableSSL = Ptr(value) + return c +} + +func (c *Config) WithUseDualStackEndpoint(value bool) *Config { + c.UseDualStackEndpoint = Ptr(value) + return c +} + +func (c *Config) WithUseAccelerateEndpoint(value bool) *Config { + c.UseAccelerateEndpoint = Ptr(value) + return c +} + +func (c *Config) WithUseInternalEndpoint(value bool) *Config { + c.UseInternalEndpoint = Ptr(value) + return c +} + +func (c *Config) WithDisableUploadCRC64Check(value bool) *Config { + c.DisableUploadCRC64Check = Ptr(value) + return c +} + +func (c *Config) WithDisableDownloadCRC64Check(value bool) *Config { + c.DisableDownloadCRC64Check = Ptr(value) + return c +} + +func (c *Config) WithAdditionalHeaders(value []string) *Config { + c.AdditionalHeaders = value + return c +} + +func (c *Config) WithUserAgent(value string) *Config { + c.UserAgent = Ptr(value) + return c +} + +func (c *Config) WithCloudBoxId(value string) *Config { + c.CloudBoxId = Ptr(value) + return c +} + +func (c *Config) WithEnableAutoDetectCloudBoxId(value bool) *Config { + c.EnableAutoDetectCloudBoxId = Ptr(value) + return c +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go new file mode 100644 index 000000000..768d95dfa --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go @@ -0,0 +1,587 @@ +package oss + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport" +) + +var metadataCopied = map[string]struct{}{ + "content-type": {}, + "content-language": {}, + "content-encoding": {}, + "content-disposition": {}, + "cache-control": {}, + "expires": {}, +} + +type CopierOptions struct { + PartSize int64 + + ParallelNum int + + MultipartCopyThreshold int64 + + LeavePartsOnError bool + + DisableShallowCopy bool + + ClientOptions []func(*Options) + + // MetaProperties and TagProperties takes effect in Copier.Copy + MetadataProperties *HeadObjectResult + + TagProperties *GetObjectTaggingResult +} + +type Copier struct { + options CopierOptions + client CopyAPIClient + featureFlags FeatureFlagsType +} + +// NewCopier creates a new Copier instance to copy objects. +// Pass In additional functional options to customize the copier's behavior. +func NewCopier(api CopyAPIClient, optFns ...func(*CopierOptions)) *Copier { + options := CopierOptions{ + PartSize: DefaultCopyPartSize, + ParallelNum: DefaultCopyParallel, + MultipartCopyThreshold: DefaultCopyThreshold, + LeavePartsOnError: false, + DisableShallowCopy: false, + } + + for _, fn := range optFns { + fn(&options) + } + + options.TagProperties = nil + options.MetadataProperties = nil + + c := &Copier{ + client: api, + options: options, + } + + //Get Client Feature + switch t := api.(type) { + case *Client: + c.featureFlags = t.options.FeatureFlags + } + + return c +} + +type CopyResult struct { + UploadId *string + + ETag *string + + VersionId *string + + HashCRC64 *string + + ResultCommon +} + +type CopyError struct { + Err error + UploadId string + Path string +} + +func (m *CopyError) Error() string { + var extra string + if m.Err != nil { + extra = fmt.Sprintf(", cause: %s", m.Err.Error()) + } + return fmt.Sprintf("copy failed, upload id: %s%s", m.UploadId, extra) +} + +func (m *CopyError) Unwrap() error { + return m.Err +} + +func (c *Copier) Copy(ctx context.Context, request *CopyObjectRequest, optFns ...func(*CopierOptions)) (*CopyResult, error) { + // Copier wrapper + delegate, err := c.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + if err = delegate.checkSource(); err != nil { + return nil, err + } + + if err = delegate.applySource(); err != nil { + return nil, err + } + + return delegate.copy() +} + +type copierDelegate struct { + base *Copier + options CopierOptions + context context.Context + + request *CopyObjectRequest + + // Source's Info + metaProp *HeadObjectResult + tagProp *GetObjectTaggingResult + + sizeInBytes int64 + transferred int64 +} + +func (c *Copier) newDelegate(ctx context.Context, request *CopyObjectRequest, optFns ...func(*CopierOptions)) (*copierDelegate, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + if request.Bucket == nil { + return nil, NewErrParamNull("request.Bucket") + } + + if request.Key == nil { + return nil, NewErrParamNull("request.Key") + } + + if request.SourceKey == nil { + return nil, NewErrParamNull("request.SourceKey") + } + + if request.MetadataDirective != nil && !isValidCopyDirective(*request.MetadataDirective) { + return nil, NewErrParamInvalid("request.MetadataDirective") + } + + if request.TaggingDirective != nil && !isValidCopyDirective(*request.TaggingDirective) { + return nil, NewErrParamInvalid("request.TaggingDirective") + } + + d := copierDelegate{ + base: c, + options: c.options, + context: ctx, + request: request, + } + + for _, opt := range optFns { + opt(&d.options) + } + + if d.options.ParallelNum <= 0 { + d.options.ParallelNum = DefaultCopyParallel + } + + if d.options.PartSize <= 0 { + d.options.PartSize = DefaultCopyPartSize + } + + if d.options.MultipartCopyThreshold < 0 { + d.options.MultipartCopyThreshold = DefaultCopyThreshold + } + + d.tagProp = d.options.TagProperties + d.metaProp = d.options.MetadataProperties + + return &d, nil +} + +func (d *copierDelegate) checkSource() error { + if d.metaProp != nil { + return nil + } + + var request HeadObjectRequest + copyRequest(&request, d.request) + if d.request.SourceBucket != nil { + request.Bucket = d.request.SourceBucket + } + request.Key = d.request.SourceKey + request.VersionId = d.request.SourceVersionId + + result, err := d.base.client.HeadObject(d.context, &request, d.options.ClientOptions...) + if err != nil { + return err + } + + d.metaProp = result + + return nil +} + +func (d *copierDelegate) applySource() error { + + d.sizeInBytes = d.metaProp.ContentLength + + // signle copy mode + if d.sizeInBytes <= d.options.MultipartCopyThreshold { + return nil + } + + // multi part copy mode + //Part Size + partSize := d.options.PartSize + if d.sizeInBytes > 0 { + for d.sizeInBytes/partSize >= int64(MaxUploadParts) { + partSize += d.options.PartSize + } + } + d.options.PartSize = partSize + + return nil +} + +func (d *copierDelegate) canUseShallowCopy() bool { + if d.options.DisableShallowCopy { + return false + } + + // Change StorageClass + if d.request.StorageClass != "" { + return false + } + + // Cross bucket + if d.request.SourceBucket != nil && + ToString(d.request.SourceBucket) != ToString(d.request.Bucket) { + return false + } + + // Decryption + if d.metaProp.Headers.Get(HeaderOssServerSideEncryption) != "" { + return false + } + + return true +} + +func (d *copierDelegate) copy() (*CopyResult, error) { + if d.sizeInBytes <= d.options.MultipartCopyThreshold { + return d.singleCopy() + } else if d.canUseShallowCopy() { + return d.shallowCopy() + } + return d.multiCopy() +} + +func (d *copierDelegate) singleCopy() (*CopyResult, error) { + result, err := d.base.client.CopyObject(d.context, d.request, d.options.ClientOptions...) + + if err != nil { + return nil, d.wrapErr("", err) + } + + // update + d.transferred = d.sizeInBytes + d.progressCallback(d.sizeInBytes) + + return &CopyResult{ + ETag: result.ETag, + HashCRC64: result.HashCRC64, + VersionId: result.VersionId, + ResultCommon: result.ResultCommon, + }, nil +} + +func (d *copierDelegate) shallowCopy() (*CopyResult, error) { + // use signle copy first, if meets timeout, use multiCopy + ctx, cancel := context.WithTimeout(d.context, 30*time.Second) + defer cancel() + result, err := d.base.client.CopyObject(ctx, d.request, d.options.ClientOptions...) + + if err != nil { + if isContextError(ctx, &err) { + return d.multiCopy() + } + return nil, d.wrapErr("", err) + } + + // update + d.transferred = d.sizeInBytes + d.progressCallback(d.sizeInBytes) + + return &CopyResult{ + ETag: result.ETag, + HashCRC64: result.HashCRC64, + VersionId: result.VersionId, + ResultCommon: result.ResultCommon, + }, nil +} + +type copyChunk struct { + partNum int32 + size int64 + sourceRange string +} + +func (d *copierDelegate) multiCopy() (*CopyResult, error) { + var ( + wg sync.WaitGroup + mu sync.Mutex + parts UploadParts + errValue atomic.Value + ) + + // Init the multipart + imRequest, err := d.newInitiateMultipartUpload() + if err != nil { + return nil, d.wrapErr("", err) + } + + initResult, err := d.base.client.InitiateMultipartUpload(d.context, imRequest, d.options.ClientOptions...) + if err != nil { + return nil, d.wrapErr("", err) + } + + saveErrFn := func(e error) { + errValue.Store(e) + } + + getErrFn := func() error { + v := errValue.Load() + if v == nil { + return nil + } + e, _ := v.(error) + return e + } + + // readChunk runs in worker goroutines to pull chunks off of the ch channel + // timeout for MultiPartCopy API + // 10s per 200M, max timeout is 50s + const PART_SIZE int64 = 200 * 1024 * 1024 + const STEP time.Duration = 10 * time.Second + mpcTimeout := transport.DefaultReadWriteTimeout + partSize := d.options.PartSize + for partSize > PART_SIZE { + mpcTimeout += STEP + partSize -= PART_SIZE + if mpcTimeout > 50*time.Second { + break + } + } + mpcClientOptions := append(d.options.ClientOptions, OpReadWriteTimeout(mpcTimeout)) + + readChunkFn := func(ch chan copyChunk) { + defer wg.Done() + for { + data, ok := <-ch + if !ok { + break + } + if getErrFn() == nil { + upResult, err := d.base.client.UploadPartCopy( + d.context, + &UploadPartCopyRequest{ + Bucket: d.request.Bucket, + Key: d.request.Key, + SourceBucket: d.request.SourceBucket, + SourceKey: d.request.SourceKey, + SourceVersionId: d.request.SourceVersionId, + UploadId: initResult.UploadId, + PartNumber: data.partNum, + Range: Ptr(data.sourceRange), + RequestPayer: d.request.RequestPayer, + }, mpcClientOptions...) + //fmt.Printf("UploadPart result: %#v, %#v\n", upResult, err) + if err == nil { + mu.Lock() + parts = append(parts, UploadPart{ETag: upResult.ETag, PartNumber: data.partNum}) + d.transferred += data.size + d.progressCallback(data.size) + mu.Unlock() + } else { + saveErrFn(err) + } + } + } + } + + ch := make(chan copyChunk, d.options.ParallelNum) + for i := 0; i < d.options.ParallelNum; i++ { + wg.Add(1) + go readChunkFn(ch) + } + + // Read and queue the parts + var ( + qnum int32 = 0 + totalSize int64 = d.sizeInBytes + readerPos int64 = 0 + ) + for getErrFn() == nil && readerPos < totalSize { + n := d.options.PartSize + bytesLeft := totalSize - readerPos + if bytesLeft <= d.options.PartSize { + n = bytesLeft + } + //fmt.Printf("send chunk: %d\n", qnum) + qnum++ + ch <- copyChunk{partNum: qnum, size: n, sourceRange: fmt.Sprintf("bytes=%v-%v", readerPos, (readerPos + n - 1))} + readerPos += n + } + + // Close the channel, wait for workers + close(ch) + wg.Wait() + + // Complete upload + var cmResult *CompleteMultipartUploadResult + if err = getErrFn(); err == nil { + sort.Sort(parts) + cmRequest := &CompleteMultipartUploadRequest{} + copyRequest(cmRequest, d.request) + cmRequest.UploadId = initResult.UploadId + cmRequest.CompleteMultipartUpload = &CompleteMultipartUpload{Parts: parts} + cmResult, err = d.base.client.CompleteMultipartUpload(d.context, cmRequest, d.options.ClientOptions...) + } + //fmt.Printf("CompleteMultipartUpload cmResult: %#v, %#v\n", cmResult, err) + + if err != nil { + //Abort + if !d.options.LeavePartsOnError { + amRequest := &AbortMultipartUploadRequest{} + copyRequest(amRequest, d.request) + amRequest.UploadId = initResult.UploadId + _, _ = d.base.client.AbortMultipartUpload(d.context, amRequest, d.options.ClientOptions...) + } + return nil, d.wrapErr(*initResult.UploadId, err) + } + + // check crc + if cmResult.HashCRC64 != nil { + srcCrc := d.metaProp.Headers.Get(HeaderOssCRC64) + if srcCrc != "" { + destCrc := ToString(cmResult.HashCRC64) + if destCrc != srcCrc { + return nil, d.wrapErr(*initResult.UploadId, fmt.Errorf("crc is inconsistent, source %s, destination %s", srcCrc, destCrc)) + } + } + } + + return &CopyResult{ + UploadId: initResult.UploadId, + ETag: cmResult.ETag, + VersionId: cmResult.VersionId, + HashCRC64: cmResult.HashCRC64, + ResultCommon: cmResult.ResultCommon, + }, nil +} + +func (d *copierDelegate) newInitiateMultipartUpload() (*InitiateMultipartUploadRequest, error) { + var err error + imRequest := &InitiateMultipartUploadRequest{} + copyRequest(imRequest, d.request) + imRequest.DisableAutoDetectMimeType = true + + if err = d.overwirteMetadataProp(imRequest); err != nil { + return nil, err + } + + if err = d.overwirteTagProp(imRequest); err != nil { + return nil, err + } + + return imRequest, nil +} + +func (d *copierDelegate) overwirteMetadataProp(imRequest *InitiateMultipartUploadRequest) error { + copyRequest := d.request + switch strings.ToLower(ToString(copyRequest.MetadataDirective)) { + case "", "copy": + if d.metaProp == nil { + return fmt.Errorf("request.MetadataDirective is COPY, but meets nil metaProp for source") + } + imRequest.CacheControl = nil + imRequest.ContentType = nil + imRequest.ContentDisposition = nil + imRequest.ContentEncoding = nil + imRequest.Expires = nil + imRequest.Metadata = nil + imRequest.Headers = map[string]string{} + // skip meta in Headers + for k, v := range d.request.Headers { + lowK := strings.ToLower(k) + if strings.HasPrefix(lowK, "x-oss-meta") { + //skip + } else if _, ok := metadataCopied[lowK]; ok { + //skip + } else { + imRequest.Headers[k] = v + } + } + // copy meta form source + for k, v := range d.metaProp.Headers { + lowK := strings.ToLower(k) + if strings.HasPrefix(lowK, "x-oss-meta") { + imRequest.Headers[lowK] = v[0] + } else if _, ok := metadataCopied[lowK]; ok { + imRequest.Headers[lowK] = v[0] + } + } + case "replace": + // the metedata has been copied via the copyRequest function before + default: + return fmt.Errorf("Unsupport MetadataDirective, %s", ToString(d.request.MetadataDirective)) + } + + return nil +} + +func (d *copierDelegate) overwirteTagProp(imRequest *InitiateMultipartUploadRequest) error { + switch strings.ToLower(ToString(d.request.TaggingDirective)) { + case "", "copy": + imRequest.Tagging = nil + if d.metaProp.TaggingCount > 0 && d.tagProp == nil { + request := &GetObjectTaggingRequest{} + copyRequest(request, d.request) + if d.request.SourceBucket != nil { + request.Bucket = d.request.SourceBucket + } + request.Key = d.request.SourceKey + request.VersionId = d.request.SourceVersionId + result, err := d.base.client.GetObjectTagging(d.context, request, d.options.ClientOptions...) + if err != nil { + return err + } + d.tagProp = result + } + if d.tagProp != nil { + var tags []string + for _, t := range d.tagProp.Tags { + tags = append(tags, fmt.Sprintf("%v=%v", ToString(t.Key), ToString(t.Value))) + } + if len(tags) > 0 { + imRequest.Tagging = Ptr(strings.Join(tags, "&")) + } + } + case "replace": + // the tag has been copied via the copyRequest function before + default: + return fmt.Errorf("Unsupport TaggingDirective, %s", ToString(d.request.TaggingDirective)) + } + + return nil +} + +func (d *copierDelegate) wrapErr(uploadId string, err error) error { + return &CopyError{ + UploadId: uploadId, + Path: fmt.Sprintf("oss://%s/%s", *d.request.Bucket, *d.request.Key), + Err: err} +} + +func (d *copierDelegate) progressCallback(increment int64) { + if d.request.ProgressFn != nil { + d.request.ProgressFn(increment, d.transferred, d.sizeInBytes) + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go new file mode 100644 index 000000000..e27438a3e --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go @@ -0,0 +1,47 @@ +package credentials + +import ( + "context" + "time" +) + +type Credentials struct { + AccessKeyID string // Access key ID + AccessKeySecret string // Access Key Secret + SecurityToken string // Security Token + Expires *time.Time // The time the credentials will expire at. +} + +func (v Credentials) Expired() bool { + if v.Expires != nil { + return !v.Expires.After(time.Now().Round(0)) + } + return false +} + +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.AccessKeySecret) > 0 +} + +type CredentialsProvider interface { + GetCredentials(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// GetCredentials delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) GetCredentials(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type AnonymousCredentialsProvider struct{} + +func NewAnonymousCredentialsProvider() CredentialsProvider { + return &AnonymousCredentialsProvider{} +} + +func (*AnonymousCredentialsProvider) GetCredentials(_ context.Context) (Credentials, error) { + return Credentials{AccessKeyID: "", AccessKeySecret: ""}, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go new file mode 100644 index 000000000..a8cc127d0 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go @@ -0,0 +1,168 @@ +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +const ecs_ram_cred_url = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +type ecsRoleCredentialsProvider struct { + ramCredUrl string + ramRole string + timeout time.Duration + retries int +} + +type ecsRoleCredentials struct { + AccessKeyId string `json:"AccessKeyId,omitempty"` + AccessKeySecret string `json:"AccessKeySecret,omitempty"` + SecurityToken string `json:"SecurityToken,omitempty"` + Expiration time.Time `json:"Expiration,omitempty"` + LastUpDated time.Time `json:"LastUpDated,omitempty"` + Code string `json:"Code,omitempty"` +} + +func (p *ecsRoleCredentialsProvider) httpGet(ctx context.Context, url string) (*http.Response, error) { + c := &http.Client{ + Timeout: p.timeout, + } + var resp *http.Response + var err error + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + for i := 0; i < p.retries; i++ { + resp, err = c.Do(req) + if err != nil { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + time.Sleep(500 * time.Millisecond) + continue + } + return resp, nil + } + return nil, err +} + +func (p *ecsRoleCredentialsProvider) getRoleFromMetaData(ctx context.Context) (string, error) { + resp, err := p.httpGet(ctx, p.ramCredUrl) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("failed to fetch ecs role name, resp.StatusCode:%v", resp.StatusCode) + } + defer resp.Body.Close() + roleName, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + if len(roleName) == 0 { + return "", errors.New("ecs role name is empty") + } + + return string(roleName), nil +} + +func (p *ecsRoleCredentialsProvider) getCredentialsFromMetaData(ctx context.Context) (ecsRoleCredentials, error) { + var ecsCred ecsRoleCredentials + u, err := url.Parse(p.ramCredUrl) + if err != nil { + return ecsCred, err + } + u.Path = path.Join(u.Path, p.ramRole) + resp, err := p.httpGet(ctx, u.String()) + if err != nil { + return ecsCred, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return ecsCred, err + } + err = json.Unmarshal(body, &ecsCred) + if err != nil { + return ecsCred, err + } + + if ecsCred.Code != "" && strings.ToUpper(ecsCred.Code) != "SUCCESS" { + return ecsCred, fmt.Errorf("failed to fetch credentials, return code:%s", ecsCred.Code) + } + + if ecsCred.AccessKeyId == "" || ecsCred.AccessKeySecret == "" { + return ecsCred, fmt.Errorf("AccessKeyId or AccessKeySecret is empty, response body is '%s'", string(body)) + } + + return ecsCred, nil +} + +func (p *ecsRoleCredentialsProvider) GetCredentials(ctx context.Context) (cred Credentials, err error) { + if len(p.ramRole) == 0 { + name, err := p.getRoleFromMetaData(ctx) + if err != nil { + return cred, err + } + p.ramRole = name + } + ecsCred, err := p.getCredentialsFromMetaData(ctx) + if err != nil { + return cred, err + } + cred.AccessKeyID = ecsCred.AccessKeyId + cred.AccessKeySecret = ecsCred.AccessKeySecret + cred.SecurityToken = ecsCred.SecurityToken + if !ecsCred.Expiration.IsZero() { + cred.Expires = &ecsCred.Expiration + } + return cred, nil +} + +type EcsRoleCredentialsProviderOptions struct { + RamRole string + Timeout time.Duration + Retries int +} + +func NewEcsRoleCredentialsProviderWithoutRefresh(optFns ...func(*EcsRoleCredentialsProviderOptions)) CredentialsProvider { + options := EcsRoleCredentialsProviderOptions{ + RamRole: "", + Timeout: time.Second * 10, + Retries: 3, + } + for _, fn := range optFns { + fn(&options) + } + return &ecsRoleCredentialsProvider{ + ramCredUrl: ecs_ram_cred_url, + ramRole: options.RamRole, + timeout: options.Timeout, + retries: options.Retries, + } +} + +func EcsRamRole(ramRole string) func(*EcsRoleCredentialsProviderOptions) { + return func(options *EcsRoleCredentialsProviderOptions) { + options.RamRole = ramRole + } +} + +func NewEcsRoleCredentialsProvider(optFns ...func(*EcsRoleCredentialsProviderOptions)) CredentialsProvider { + p := NewEcsRoleCredentialsProviderWithoutRefresh(optFns...) + provider := NewCredentialsFetcherProvider(CredentialsFetcherFunc(func(ctx context.Context) (Credentials, error) { + return p.GetCredentials(ctx) + })) + return provider +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go new file mode 100644 index 000000000..fbd9ca1fb --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go @@ -0,0 +1,27 @@ +package credentials + +import ( + "context" + "fmt" + "os" +) + +type EnvironmentVariableCredentialsProvider struct { +} + +func (s *EnvironmentVariableCredentialsProvider) GetCredentials(ctx context.Context) (Credentials, error) { + id := os.Getenv("OSS_ACCESS_KEY_ID") + secret := os.Getenv("OSS_ACCESS_KEY_SECRET") + if id == "" || secret == "" { + return Credentials{}, fmt.Errorf("access key id or access key secret is empty!") + } + return Credentials{ + AccessKeyID: id, + AccessKeySecret: secret, + SecurityToken: os.Getenv("OSS_SESSION_TOKEN"), + }, nil +} + +func NewEnvironmentVariableCredentialsProvider() CredentialsProvider { + return &EnvironmentVariableCredentialsProvider{} +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go new file mode 100644 index 000000000..07d0e5a11 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go @@ -0,0 +1,183 @@ +package credentials + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" +) + +var ( + // Default expiration time adjustment factor + defaultExpiredFactor = 0.8 + + // backoff of refresh time + defaultRefreshDuration = 120 * time.Second +) + +// CredentialsFetcherOptions are the options +type CredentialsFetcherOptions struct { + ExpiredFactor float64 + RefreshDuration time.Duration +} + +type CredentialsFetcher interface { + Fetch(ctx context.Context) (Credentials, error) +} + +// CredentialsFetcherFunc provides a helper wrapping a function value to +// satisfy the CredentialsFetcher interface. +type CredentialsFetcherFunc func(context.Context) (Credentials, error) + +// Fetch delegates to the function value the CredentialsFetcherFunc wraps. +func (fn CredentialsFetcherFunc) Fetch(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type CredentialsFetcherProvider struct { + m sync.Mutex + + //credentials *fetcherCredentials + credentials atomic.Value + + fetcher CredentialsFetcher + + expiredFactor float64 + refreshDuration time.Duration +} + +type fetcherCredentials struct { + Creds Credentials + ExpiryWindow time.Duration +} + +func NewCredentialsFetcherProvider(fetcher CredentialsFetcher, optFns ...func(*CredentialsFetcherOptions)) CredentialsProvider { + options := CredentialsFetcherOptions{ + ExpiredFactor: defaultExpiredFactor, + RefreshDuration: defaultRefreshDuration, + } + + for _, fn := range optFns { + fn(&options) + } + + return &CredentialsFetcherProvider{ + fetcher: fetcher, + expiredFactor: options.ExpiredFactor, + refreshDuration: options.RefreshDuration, + } +} + +func (c *CredentialsFetcherProvider) GetCredentials(ctx context.Context) (Credentials, error) { + fcreds := c.getCreds() + if c.isExpired(fcreds) { + c.m.Lock() + defer c.m.Unlock() + creds, err := c.fetch(ctx) + if err == nil { + c.updateCreds(&creds) + } + return creds, err + } else { + if c.isSoonExpire(fcreds) && c.m.TryLock() { + defer c.m.Unlock() + fcreds1 := c.getCreds() + if fcreds1 == fcreds { + creds, err := c.fetch(ctx) + if err == nil { + c.updateCreds(&creds) + return creds, nil + } else { + c.updateExpiryWindow(fcreds1) + err = nil + } + } + fcreds = fcreds1 + } + return fcreds.Creds, nil + } +} + +type asyncFetchResult struct { + val Credentials + err error +} + +func (c *CredentialsFetcherProvider) asyncFetch(ctx context.Context) <-chan asyncFetchResult { + doChan := func() <-chan asyncFetchResult { + ch := make(chan asyncFetchResult, 1) + + go func() { + cred, err := c.fetcher.Fetch(ctx) + ch <- asyncFetchResult{cred, err} + }() + + return ch + } + + return doChan() +} + +func (c *CredentialsFetcherProvider) fetch(ctx context.Context) (Credentials, error) { + if c.fetcher == nil { + return Credentials{}, fmt.Errorf("fetcher is null.") + } + + select { + case result, _ := <-c.asyncFetch(ctx): + return result.val, result.err + case <-ctx.Done(): + return Credentials{}, fmt.Errorf("FetchCredentialsCanceled") + } +} + +func (p *CredentialsFetcherProvider) getCreds() *fetcherCredentials { + v := p.credentials.Load() + if v == nil { + return nil + } + creds, _ := v.(*fetcherCredentials) + return creds +} + +func (c *CredentialsFetcherProvider) updateCreds(cred *Credentials) { + fcred := fetcherCredentials{ + Creds: *cred, + } + if cred.Expires != nil { + curr := time.Now().Round(0) + durationS := c.expiredFactor * float64(cred.Expires.Sub(curr).Seconds()) + duration := time.Duration(durationS * float64(time.Second)) + if duration > c.refreshDuration { + fcred.ExpiryWindow = duration + } + } + c.credentials.Store(&fcred) +} + +func (c *CredentialsFetcherProvider) updateExpiryWindow(fcreds *fetcherCredentials) { + if fcreds.ExpiryWindow > 0 { + fcreds1 := *fcreds + fcreds1.ExpiryWindow -= c.refreshDuration + c.credentials.Store(&fcreds1) + } +} + +func (c *CredentialsFetcherProvider) isExpired(fcreds *fetcherCredentials) bool { + return fcreds == nil || fcreds.Creds.Expired() +} + +func (c *CredentialsFetcherProvider) isSoonExpire(fcreds *fetcherCredentials) bool { + if fcreds == nil || fcreds.Creds.Expired() { + return true + } + + if fcreds.ExpiryWindow > 0 && fcreds.Creds.Expires != nil { + if !fcreds.Creds.Expires.After(time.Now().Round(0).Add(fcreds.ExpiryWindow)) { + return true + } + } + + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go new file mode 100644 index 000000000..bb87dff7b --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go @@ -0,0 +1,168 @@ +package credentials + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "runtime" + "time" +) + +/* +temporary access credentials format +{ + "AccessKeyId" : "ak", + "AccessKeySecret" : "sk", + "Expiration" : "2023-12-29T07:45:02Z", + "SecurityToken" : "token", +} + +long-term access credentials +{ + "AccessKeyId" : "ak", + "AccessKeySecret" : "sk", +} +*/ + +type processCredentialsResult struct { + AccessKeyId string `json:"AccessKeyId"` + + AccessKeySecret string `json:"AccessKeySecret"` + + SecurityToken string `json:"SecurityToken"` + + Expiration *time.Time `json:"Expiration"` +} + +type ProcessCredentialsProviderOptions struct { + Timeout time.Duration +} + +type ProcessCredentialsProvider struct { + timeout time.Duration + args []string +} + +func NewProcessCredentialsProvider(command string, optFns ...func(*ProcessCredentialsProviderOptions)) CredentialsProvider { + options := ProcessCredentialsProviderOptions{ + Timeout: 15 * time.Second, + } + + for _, fn := range optFns { + fn(&options) + } + + var args []string + if len(command) > 0 { + args = []string{command} + } + + return &ProcessCredentialsProvider{ + timeout: options.Timeout, + args: args, + } +} + +func (p *ProcessCredentialsProvider) GetCredentials(ctx context.Context) (Credentials, error) { + return p.fetchCredentials(ctx) +} + +func (p *ProcessCredentialsProvider) buildCommand(ctx context.Context) (*exec.Cmd, error) { + if len(p.args) == 0 { + return nil, fmt.Errorf("command must not be empty") + } + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + cmdArgs = append(cmdArgs, p.args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + return cmd, nil +} + +func (p *ProcessCredentialsProvider) fetchCredentials(ctx context.Context) (Credentials, error) { + data, err := p.executeProcess(ctx) + if err != nil { + return Credentials{}, err + } + + //json to Credentials + result := &processCredentialsResult{} + if err = json.Unmarshal(data, result); err != nil { + return Credentials{}, err + + } + + creds := Credentials{ + AccessKeyID: result.AccessKeyId, + AccessKeySecret: result.AccessKeySecret, + SecurityToken: result.SecurityToken, + Expires: result.Expiration, + } + + if !creds.HasKeys() { + return creds, fmt.Errorf("missing AccessKeyId or AccessKeySecret in process output") + } + + return creds, nil +} + +func (p *ProcessCredentialsProvider) executeProcess(ctx context.Context) ([]byte, error) { + if p.timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.timeout) + defer cancelFunc() + } + + cmd, err := p.buildCommand(ctx) + if err != nil { + return nil, err + } + + // get creds from process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*1024))) + cmd.Stdout = output + + // Start the command + executeFn := func(cmd *exec.Cmd, exec chan error) { + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + exec <- err + } + + execCh := make(chan error, 1) + go executeFn(cmd, execCh) + + // Wait commnd done + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), fmt.Errorf("credential process timed out: %w", execError) + default: + return output.Bytes(), fmt.Errorf("error in credential_process: %w", execError) + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go new file mode 100644 index 000000000..a6e7a126b --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go @@ -0,0 +1,26 @@ +package credentials + +import ( + "context" +) + +type StaticCredentialsProvider struct { + credentials Credentials +} + +func NewStaticCredentialsProvider(id, secret string, tokens ...string) CredentialsProvider { + token := "" + if len(tokens) > 0 { + token = tokens[0] + } + return StaticCredentialsProvider{ + credentials: Credentials{ + AccessKeyID: id, + AccessKeySecret: secret, + SecurityToken: token, + }} +} + +func (s StaticCredentialsProvider) GetCredentials(_ context.Context) (Credentials, error) { + return s.credentials, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go new file mode 100644 index 000000000..d8784d262 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go @@ -0,0 +1,65 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "io" +) + +type aesCtr struct { + block cipher.Block + cipherData CipherData +} + +func newAesCtr(cd CipherData) (Cipher, error) { + block, err := aes.NewCipher(cd.Key) + if err != nil { + return nil, err + } + return &aesCtr{block, cd}, nil +} + +func (c *aesCtr) Encrypt(src io.Reader) io.Reader { + reader := &ctrEncryptReader{ + encrypter: cipher.NewCTR(c.block, c.cipherData.IV), + src: src, + } + return reader +} + +type ctrEncryptReader struct { + encrypter cipher.Stream + src io.Reader +} + +func (reader *ctrEncryptReader) Read(data []byte) (int, error) { + plainText := make([]byte, len(data), len(data)) + n, err := reader.src.Read(plainText) + if n > 0 { + plainText = plainText[0:n] + reader.encrypter.XORKeyStream(data, plainText) + } + return n, err +} + +func (c *aesCtr) Decrypt(src io.Reader) io.Reader { + return &ctrDecryptReader{ + decrypter: cipher.NewCTR(c.block, c.cipherData.IV), + src: src, + } +} + +type ctrDecryptReader struct { + decrypter cipher.Stream + src io.Reader +} + +func (reader *ctrDecryptReader) Read(data []byte) (int, error) { + cryptoText := make([]byte, len(data), len(data)) + n, err := reader.src.Read(cryptoText) + if n > 0 { + cryptoText = cryptoText[0:n] + reader.decrypter.XORKeyStream(data, cryptoText) + } + return n, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go new file mode 100644 index 000000000..9d8aec8a3 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go @@ -0,0 +1,208 @@ +package crypto + +import ( + "fmt" + "io" +) + +const ( + aesKeySize = 32 + ivSize = 16 +) + +// aesCtrCipherBuilder for building ContentCipher +type aesCtrCipherBuilder struct { + MasterCipher MasterCipher +} + +// aesCtrCipher will use aes ctr algorithm +type aesCtrCipher struct { + CipherData CipherData + Cipher Cipher +} + +// CreateAesCtrCipher creates ContentCipherBuilder +func CreateAesCtrCipher(cipher MasterCipher) ContentCipherBuilder { + return aesCtrCipherBuilder{MasterCipher: cipher} +} + +// createCipherData create CipherData for encrypt object data +func (builder aesCtrCipherBuilder) createCipherData() (CipherData, error) { + var cd CipherData + var err error + err = cd.RandomKeyIv(aesKeySize, ivSize) + if err != nil { + return cd, err + } + + cd.WrapAlgorithm = builder.MasterCipher.GetWrapAlgorithm() + cd.CEKAlgorithm = AesCtrAlgorithm + cd.MatDesc = builder.MasterCipher.GetMatDesc() + + // EncryptedKey + cd.EncryptedKey, err = builder.MasterCipher.Encrypt(cd.Key) + if err != nil { + return cd, err + } + + // EncryptedIV + cd.EncryptedIV, err = builder.MasterCipher.Encrypt(cd.IV) + if err != nil { + return cd, err + } + + return cd, nil +} + +// contentCipherCD is used to create ContentCipher with CipherData +func (builder aesCtrCipherBuilder) contentCipherCD(cd CipherData) (ContentCipher, error) { + cipher, err := newAesCtr(cd) + if err != nil { + return nil, err + } + + return &aesCtrCipher{ + CipherData: cd, + Cipher: cipher, + }, nil +} + +// ContentCipher is used to create ContentCipher interface +func (builder aesCtrCipherBuilder) ContentCipher() (ContentCipher, error) { + cd, err := builder.createCipherData() + if err != nil { + return nil, err + } + return builder.contentCipherCD(cd) +} + +// ContentCipherEnv is used to create a decrption ContentCipher from Envelope +func (builder aesCtrCipherBuilder) ContentCipherEnv(envelope Envelope) (ContentCipher, error) { + var cd CipherData + cd.EncryptedKey = make([]byte, len(envelope.CipherKey)) + copy(cd.EncryptedKey, []byte(envelope.CipherKey)) + + plainKey, err := builder.MasterCipher.Decrypt([]byte(envelope.CipherKey)) + if err != nil { + return nil, err + } + cd.Key = make([]byte, len(plainKey)) + copy(cd.Key, plainKey) + + cd.EncryptedIV = make([]byte, len(envelope.IV)) + copy(cd.EncryptedIV, []byte(envelope.IV)) + + plainIV, err := builder.MasterCipher.Decrypt([]byte(envelope.IV)) + if err != nil { + return nil, err + } + + cd.IV = make([]byte, len(plainIV)) + copy(cd.IV, plainIV) + + cd.MatDesc = envelope.MatDesc + cd.WrapAlgorithm = envelope.WrapAlg + cd.CEKAlgorithm = envelope.CEKAlg + + return builder.contentCipherCD(cd) +} + +// GetMatDesc is used to get MasterCipher's MatDesc +func (builder aesCtrCipherBuilder) GetMatDesc() string { + return builder.MasterCipher.GetMatDesc() +} + +// EncryptContents will generate a random key and iv and encrypt the data using ctr +func (cc *aesCtrCipher) EncryptContent(src io.Reader) (io.ReadCloser, error) { + if sr, ok := src.(io.ReadSeeker); ok { + if curr, err := sr.Seek(0, io.SeekCurrent); err == nil { + return &aesSeekEncrypter{ + Body: sr, + Encrypter: nil, + Start: curr, + Offset: curr, + cc: cc, + }, nil + } + } + reader := cc.Cipher.Encrypt(src) + return &CryptoEncrypter{Body: src, Encrypter: reader}, nil +} + +// DecryptContent is used to decrypt object using ctr +func (cc *aesCtrCipher) DecryptContent(src io.Reader) (io.ReadCloser, error) { + reader := cc.Cipher.Decrypt(src) + return &CryptoDecrypter{Body: src, Decrypter: reader}, nil +} + +// GetCipherData is used to get cipher data information +func (cc *aesCtrCipher) GetCipherData() *CipherData { + return &(cc.CipherData) +} + +// GetCipherData returns cipher data +func (cc *aesCtrCipher) GetEncryptedLen(plainTextLen int64) int64 { + // AES CTR encryption mode does not change content length + return plainTextLen +} + +// GetAlignLen is used to get align length +func (cc *aesCtrCipher) GetAlignLen() int { + return len(cc.CipherData.IV) +} + +// Clone is used to create a new aesCtrCipher from itself +func (cc *aesCtrCipher) Clone(cd CipherData) (ContentCipher, error) { + cipher, err := newAesCtr(cd) + if err != nil { + return nil, err + } + + return &aesCtrCipher{ + CipherData: cd, + Cipher: cipher, + }, nil +} + +// CryptoSeekEncrypter provides close and seek method for Encrypter +type aesSeekEncrypter struct { + Body io.ReadSeeker + Encrypter io.Reader + isClosed bool + Start int64 + Offset int64 + cc *aesCtrCipher +} + +// Close lets the CryptoSeekEncrypter satisfy io.ReadCloser interface +func (rc *aesSeekEncrypter) Close() error { + rc.isClosed = true + if closer, ok := rc.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +// Read lets the CryptoSeekEncrypter satisfy io.ReadCloser interface +func (rc *aesSeekEncrypter) Read(b []byte) (int, error) { + if rc.isClosed { + return 0, io.EOF + } + if rc.Encrypter == nil { + if rc.Start != rc.Offset { + return 0, fmt.Errorf("Cant not encrypt from offset %v, must start from %v", rc.Offset, rc.Start) + } + rc.Encrypter = rc.cc.Cipher.Encrypt(rc.Body) + } + return rc.Encrypter.Read(b) +} + +// Seek lets the CryptoSeekEncrypter satisfy io.Seeker interface +func (rc *aesSeekEncrypter) Seek(offset int64, whence int) (int64, error) { + off, err := rc.Body.Seek(offset, whence) + //Reset Encrypter Reader + rc.Encrypter = nil + rc.Offset = off + + return off, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go new file mode 100644 index 000000000..3a46fd9f6 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go @@ -0,0 +1,69 @@ +package crypto + +import ( + "io" +) + +// Cipher is interface for encryption or decryption of an object +type Cipher interface { + Encrypter + Decrypter +} + +// Encrypter is interface with only encrypt method +type Encrypter interface { + Encrypt(io.Reader) io.Reader +} + +// Decrypter is interface with only decrypt method +type Decrypter interface { + Decrypt(io.Reader) io.Reader +} + +// CryptoEncrypter provides close method for Encrypter +type CryptoEncrypter struct { + Body io.Reader + Encrypter io.Reader + isClosed bool +} + +// Close lets the CryptoEncrypter satisfy io.ReadCloser interface +func (rc *CryptoEncrypter) Close() error { + rc.isClosed = true + if closer, ok := rc.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +// Read lets the CryptoEncrypter satisfy io.ReadCloser interface +func (rc *CryptoEncrypter) Read(b []byte) (int, error) { + if rc.isClosed { + return 0, io.EOF + } + return rc.Encrypter.Read(b) +} + +// CryptoDecrypter provides close method for Decrypter +type CryptoDecrypter struct { + Body io.Reader + Decrypter io.Reader + isClosed bool +} + +// Close lets the CryptoDecrypter satisfy io.ReadCloser interface +func (rc *CryptoDecrypter) Close() error { + rc.isClosed = true + if closer, ok := rc.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +// Read lets the CryptoDecrypter satisfy io.ReadCloser interface +func (rc *CryptoDecrypter) Read(b []byte) (int, error) { + if rc.isClosed { + return 0, io.EOF + } + return rc.Decrypter.Read(b) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go new file mode 100644 index 000000000..d09c47589 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go @@ -0,0 +1,8 @@ +package crypto + +// encryption Algorithm +const ( + RsaCryptoWrap string = "RSA/NONE/PKCS1Padding" + KmsAliCryptoWrap string = "KMS/ALICLOUD" + AesCtrAlgorithm string = "AES/CTR/NoPadding" +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go new file mode 100644 index 000000000..9e5d2b887 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go @@ -0,0 +1,125 @@ +package crypto + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "io" + math_rand "math/rand" +) + +// MasterCipher encrypt or decrpt CipherData +// support master key: rsa && ali kms +type MasterCipher interface { + Encrypt([]byte) ([]byte, error) + Decrypt([]byte) ([]byte, error) + GetWrapAlgorithm() string + GetMatDesc() string +} + +// ContentCipherBuilder is used to create ContentCipher for encryting object's data +type ContentCipherBuilder interface { + ContentCipher() (ContentCipher, error) + ContentCipherEnv(Envelope) (ContentCipher, error) + GetMatDesc() string +} + +// ContentCipher is used to encrypt or decrypt object's data +type ContentCipher interface { + EncryptContent(io.Reader) (io.ReadCloser, error) + DecryptContent(io.Reader) (io.ReadCloser, error) + Clone(cd CipherData) (ContentCipher, error) + GetEncryptedLen(int64) int64 + GetCipherData() *CipherData + GetAlignLen() int +} + +// Envelope is stored in oss object's meta +type Envelope struct { + IV string + CipherKey string + MatDesc string + WrapAlg string + CEKAlg string + UnencryptedMD5 string + UnencryptedContentLen string +} + +func (el Envelope) IsValid() bool { + return len(el.IV) > 0 && + len(el.CipherKey) > 0 && + len(el.WrapAlg) > 0 && + len(el.CEKAlg) > 0 +} + +func (el Envelope) String() string { + return fmt.Sprintf("IV=%s&CipherKey=%s&WrapAlg=%s&CEKAlg=%s", el.IV, el.CipherKey, el.WrapAlg, el.CEKAlg) +} + +// CipherData is secret key information +type CipherData struct { + IV []byte + Key []byte + MatDesc string + WrapAlgorithm string + CEKAlgorithm string + EncryptedIV []byte + EncryptedKey []byte +} + +func (cd *CipherData) RandomKeyIv(keyLen int, ivLen int) error { + // Key + cd.Key = make([]byte, keyLen) + if _, err := io.ReadFull(rand.Reader, cd.Key); err != nil { + return err + } + + // sizeof uint64 + if ivLen < 8 { + return fmt.Errorf("ivLen:%d less than 8", ivLen) + } + + // IV:reserve 8 bytes + cd.IV = make([]byte, ivLen) + if _, err := io.ReadFull(rand.Reader, cd.IV[0:ivLen-8]); err != nil { + return err + } + + // only use 4 byte,in order not to overflow when SeekIV() + randNumber := math_rand.Uint32() + cd.SetIV(uint64(randNumber)) + return nil +} + +func (cd *CipherData) SetIV(iv uint64) { + ivLen := len(cd.IV) + binary.BigEndian.PutUint64(cd.IV[ivLen-8:], iv) +} + +func (cd *CipherData) GetIV() uint64 { + ivLen := len(cd.IV) + return binary.BigEndian.Uint64(cd.IV[ivLen-8:]) +} + +func (cd *CipherData) SeekIV(startPos uint64) { + cd.SetIV(cd.GetIV() + startPos/uint64(len(cd.IV))) +} + +func (cd *CipherData) Clone() CipherData { + var cloneCd CipherData + cloneCd = *cd + + cloneCd.Key = make([]byte, len(cd.Key)) + copy(cloneCd.Key, cd.Key) + + cloneCd.IV = make([]byte, len(cd.IV)) + copy(cloneCd.IV, cd.IV) + + cloneCd.EncryptedIV = make([]byte, len(cd.EncryptedIV)) + copy(cloneCd.EncryptedIV, cd.EncryptedIV) + + cloneCd.EncryptedKey = make([]byte, len(cd.EncryptedKey)) + copy(cloneCd.EncryptedKey, cd.EncryptedKey) + + return cloneCd +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go new file mode 100644 index 000000000..91f3b9df5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go @@ -0,0 +1,102 @@ +package crypto + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "encoding/json" + "encoding/pem" + "fmt" +) + +// CreateMasterRsa Create master key interface implemented by rsa +// matDesc will be converted to json string +func CreateMasterRsa(matDesc map[string]string, publicKey string, privateKey string) (MasterCipher, error) { + var masterCipher MasterRsaCipher + var jsonDesc string + if len(matDesc) > 0 { + b, err := json.Marshal(matDesc) + if err != nil { + return masterCipher, err + } + jsonDesc = string(b) + } + masterCipher.MatDesc = jsonDesc + masterCipher.PublicKey = publicKey + masterCipher.PrivateKey = privateKey + return masterCipher, nil +} + +// MasterRsaCipher rsa master key interface +type MasterRsaCipher struct { + MatDesc string + PublicKey string + PrivateKey string +} + +// GetWrapAlgorithm get master key wrap algorithm +func (mrc MasterRsaCipher) GetWrapAlgorithm() string { + return RsaCryptoWrap +} + +// GetMatDesc get master key describe +func (mrc MasterRsaCipher) GetMatDesc() string { + return mrc.MatDesc +} + +// Encrypt encrypt data by rsa public key +// Mainly used to encrypt object's symmetric secret key and iv +func (mrc MasterRsaCipher) Encrypt(plainData []byte) ([]byte, error) { + block, _ := pem.Decode([]byte(mrc.PublicKey)) + if block == nil { + return nil, fmt.Errorf("pem.Decode public key error") + } + + var pub *rsa.PublicKey + if block.Type == "PUBLIC KEY" { + // pks8 format + pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + pub = pubInterface.(*rsa.PublicKey) + } else if block.Type == "RSA PUBLIC KEY" { + // pks1 format + pub = &rsa.PublicKey{} + _, err := asn1.Unmarshal(block.Bytes, pub) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("not supported public key,type:%s", block.Type) + } + return rsa.EncryptPKCS1v15(rand.Reader, pub, plainData) +} + +// Decrypt Decrypt data by rsa private key +// Mainly used to decrypt object's symmetric secret key and iv +func (mrc MasterRsaCipher) Decrypt(cryptoData []byte) ([]byte, error) { + block, _ := pem.Decode([]byte(mrc.PrivateKey)) + if block == nil { + return nil, fmt.Errorf("pem.Decode private key error") + } + + if block.Type == "PRIVATE KEY" { + // pks8 format + privInterface, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return rsa.DecryptPKCS1v15(rand.Reader, privInterface.(*rsa.PrivateKey), cryptoData) + } else if block.Type == "RSA PRIVATE KEY" { + // pks1 format + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return rsa.DecryptPKCS1v15(rand.Reader, priv, cryptoData) + } else { + return nil, fmt.Errorf("not supported private key,type:%s", block.Type) + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go new file mode 100644 index 000000000..ec9c242df --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go @@ -0,0 +1,79 @@ +package oss + +import "os" + +const ( + MaxUploadParts int32 = 10000 + + // MaxPartSize Max part size, 5GB, For UploadPart + MaxPartSize int64 = 5 * 1024 * 1024 * 1024 + + // MinPartSize Min part size, 100KB, For UploadPart + MinPartSize int64 = 100 * 1024 + + // DefaultPartSize Default part size, 6M + DefaultPartSize int64 = 6 * 1024 * 1024 + + // DefaultUploadPartSize Default part size for uploader uploads data + DefaultUploadPartSize = DefaultPartSize + + // DefaultDownloadPartSize Default part size for downloader downloads object + DefaultDownloadPartSize = DefaultPartSize + + // DefaultCopyPartSize Default part size for copier copys object, 64M + DefaultCopyPartSize int64 = 64 * 1024 * 1024 + + // DefaultParallel Default parallel + DefaultParallel = 3 + + // DefaultUploadParallel Default parallel for uploader uploads data + DefaultUploadParallel = DefaultParallel + + // DefaultDownloadParallel Default parallel for downloader downloads object + DefaultDownloadParallel = DefaultParallel + + // DefaultCopyParallel Default parallel for copier copys object + DefaultCopyParallel = DefaultParallel + + // DefaultPrefetchThreshold Default prefetch threshold to swith to async read in ReadOnlyFile + DefaultPrefetchThreshold int64 = 20 * 1024 * 1024 + + // DefaultPrefetchNum Default prefetch number for async read in ReadOnlyFile + DefaultPrefetchNum = DefaultParallel + + // DefaultPrefetchChunkSize Default prefetch chunk size for async read in ReadOnlyFile + DefaultPrefetchChunkSize = DefaultPartSize + + // DefaultCopyThreshold Default threshold to use muitipart copy in Copier, 256M + DefaultCopyThreshold int64 = 200 * 1024 * 1024 + + // FilePermMode File permission + FilePermMode = os.FileMode(0664) + + // TempFileSuffix Temp file suffix + TempFileSuffix = ".temp" + + // CheckpointFileSuffixDownloader Checkpoint file suffix for Downloader + CheckpointFileSuffixDownloader = ".dcp" + + // CheckpointFileSuffixUploader Checkpoint file suffix for Uploader + CheckpointFileSuffixUploader = ".ucp" + + // CheckpointMagic Checkpoint file Magic + CheckpointMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + + // DefaultProduct Product for signing + DefaultProduct = "oss" + + // CloudBoxProduct Product of cloud box for signing + CloudBoxProduct = "oss-cloudbox" + + // DefaultEndpointScheme The URL's scheme, default is https + DefaultEndpointScheme = "https" + + // DefaultSignatureVersion Default signature version is v4 + DefaultSignatureVersion = SignatureVersionV4 + + // DefaultOutOfOrderReadThreshold Default out of order read threshold is 3 + DefaultOutOfOrderReadThreshold int64 = 3 +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go new file mode 100644 index 000000000..bc1329de5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go @@ -0,0 +1,598 @@ +package oss + +import ( + "context" + "fmt" + "hash" + "io" + "net/http" + "os" + "path/filepath" + "sort" + "sync" + "sync/atomic" +) + +type DownloaderOptions struct { + PartSize int64 + + ParallelNum int + + EnableCheckpoint bool + + CheckpointDir string + + VerifyData bool + + UseTempFile bool + + ClientOptions []func(*Options) +} + +type Downloader struct { + options DownloaderOptions + client DownloadAPIClient + featureFlags FeatureFlagsType +} + +// NewDownloader creates a new Downloader instance to downloads objects. +// Pass in additional functional options to customize the downloader behavior. +func NewDownloader(c DownloadAPIClient, optFns ...func(*DownloaderOptions)) *Downloader { + options := DownloaderOptions{ + PartSize: DefaultUploadPartSize, + ParallelNum: DefaultUploadParallel, + UseTempFile: true, + } + + for _, fn := range optFns { + fn(&options) + } + + u := &Downloader{ + client: c, + options: options, + } + + //Get Client Feature + switch t := c.(type) { + case *Client: + u.featureFlags = t.options.FeatureFlags + case *EncryptionClient: + u.featureFlags = (t.Unwrap().options.FeatureFlags & ^FeatureEnableCRC64CheckDownload) + } + + return u +} + +type DownloadResult struct { + Written int64 +} + +type DownloadError struct { + Err error + Path string +} + +func (m *DownloadError) Error() string { + var extra string + if m.Err != nil { + extra = fmt.Sprintf(", cause: %s", m.Err.Error()) + } + return fmt.Sprintf("download failed %s", extra) +} + +func (m *DownloadError) Unwrap() error { + return m.Err +} + +func (d *Downloader) DownloadFile(ctx context.Context, request *GetObjectRequest, filePath string, optFns ...func(*DownloaderOptions)) (result *DownloadResult, err error) { + // Downloader wrapper + delegate, err := d.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + // Source + if err = delegate.checkSource(); err != nil { + return nil, err + } + + // Destination + var file *os.File + if file, err = delegate.checkDestination(filePath); err != nil { + return nil, err + } + + // Range + if err = delegate.adjustRange(); err != nil { + return nil, err + } + + // Checkpoint + if err = delegate.checkCheckpoint(); err != nil { + return nil, err + } + + // truncate to the right position + if err = delegate.adjustWriter(file); err != nil { + return nil, err + } + + // CRC Part + delegate.updateCRCFlag() + + // download + result, err = delegate.download() + + return result, delegate.closeWriter(file, err) +} + +type downloaderDelegate struct { + base *Downloader + options DownloaderOptions + client DownloadAPIClient + context context.Context + + m sync.Mutex + + request *GetObjectRequest + w io.WriterAt + rstart int64 + pos int64 + epos int64 + written int64 + + // Source's Info + sizeInBytes int64 + etag string + modTime string + headers http.Header + + //Destination's Info + filePath string + tempFilePath string + fileInfo os.FileInfo + + //crc + calcCRC bool + checkCRC bool + + checkpoint *downloadCheckpoint +} + +type downloaderChunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + rstart int64 //range start +} + +type downloadedChunk struct { + start int64 + size int64 + crc64 uint64 +} + +type downloadedChunks []downloadedChunk + +func (slice downloadedChunks) Len() int { + return len(slice) +} +func (slice downloadedChunks) Less(i, j int) bool { + return slice[i].start < slice[j].start +} +func (slice downloadedChunks) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func (c *downloaderChunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur-c.rstart) + c.cur += int64(n) + return +} + +func (d *Downloader) newDelegate(ctx context.Context, request *GetObjectRequest, optFns ...func(*DownloaderOptions)) (*downloaderDelegate, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + if !isValidBucketName(request.Bucket) { + return nil, NewErrParamInvalid("request.Bucket") + } + + if !isValidObjectName(request.Key) { + return nil, NewErrParamInvalid("request.Key") + } + + if request.Range != nil && !isValidRange(request.Range) { + return nil, NewErrParamInvalid("request.Range") + } + + delegate := downloaderDelegate{ + base: d, + options: d.options, + client: d.client, + context: ctx, + request: request, + } + + for _, opt := range optFns { + opt(&delegate.options) + } + + if delegate.options.ParallelNum <= 0 { + delegate.options.ParallelNum = DefaultDownloadParallel + } + if delegate.options.PartSize <= 0 { + delegate.options.PartSize = DefaultDownloadPartSize + } + + return &delegate, nil +} + +func (d *downloaderDelegate) checkSource() error { + var request HeadObjectRequest + copyRequest(&request, d.request) + result, err := d.client.HeadObject(d.context, &request, d.options.ClientOptions...) + if err != nil { + return err + } + + d.sizeInBytes = result.ContentLength + d.modTime = result.Headers.Get(HTTPHeaderLastModified) + d.etag = result.Headers.Get(HTTPHeaderETag) + d.headers = result.Headers + + return nil +} + +func (d *downloaderDelegate) checkDestination(filePath string) (*os.File, error) { + if filePath == "" { + return nil, NewErrParamInvalid("filePath") + } + absFilePath, err := filepath.Abs(filePath) + if err != nil { + return nil, err + } + + // use temporary file + tempFilePath := absFilePath + if d.options.UseTempFile { + tempFilePath += TempFileSuffix + } + d.filePath = absFilePath + d.tempFilePath = tempFilePath + + // use openfile to check the filepath is valid + var file *os.File + if file, err = os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode); err != nil { + return nil, err + } + + if d.fileInfo, err = file.Stat(); err != nil { + return nil, err + } + + return file, nil +} + +func (d *downloaderDelegate) adjustWriter(file *os.File) error { + expectSize := d.epos - d.rstart + if d.fileInfo != nil && d.fileInfo.Size() > expectSize { + if err := file.Truncate(d.pos - d.rstart); err != nil { + return err + } + } + d.w = file + return nil +} + +func (d *downloaderDelegate) closeWriter(file *os.File, err error) error { + if file != nil { + file.Close() + } + + if err != nil { + if d.checkpoint == nil { + os.Remove(d.tempFilePath) + } + } else { + if d.tempFilePath != d.filePath { + err = os.Rename(d.tempFilePath, d.filePath) + } + if err == nil && d.checkpoint != nil { + d.checkpoint.remove() + } + } + + d.w = nil + d.checkpoint = nil + + return err +} + +func (d *downloaderDelegate) adjustRange() error { + d.pos = 0 + d.rstart = 0 + d.epos = d.sizeInBytes + if d.request.Range != nil { + httpRange, _ := ParseRange(*d.request.Range) + if httpRange.Offset >= d.sizeInBytes { + return fmt.Errorf("invalid range, object size :%v, range: %v", d.sizeInBytes, ToString(d.request.Range)) + } + d.pos = httpRange.Offset + d.rstart = d.pos + if httpRange.Count > 0 { + d.epos = minInt64(httpRange.Offset+httpRange.Count, d.sizeInBytes) + } + } + + return nil +} + +func (d *downloaderDelegate) checkCheckpoint() error { + if d.options.EnableCheckpoint { + d.checkpoint = newDownloadCheckpoint(d.request, d.tempFilePath, d.options.CheckpointDir, d.headers, d.options.PartSize) + d.checkpoint.VerifyData = d.options.VerifyData + if err := d.checkpoint.load(); err != nil { + return err + } + + if d.checkpoint.Loaded { + d.pos = d.checkpoint.Info.Data.DownloadInfo.Offset + d.written = d.pos - d.rstart + } else { + d.checkpoint.Info.Data.DownloadInfo.Offset = d.pos + } + } + return nil +} + +func (d *downloaderDelegate) updateCRCFlag() error { + if (d.base.featureFlags & FeatureEnableCRC64CheckDownload) > 0 { + d.checkCRC = d.request.Range == nil + d.calcCRC = (d.checkpoint != nil && d.checkpoint.VerifyData) || d.checkCRC + } + return nil +} + +func (d *downloaderDelegate) download() (*DownloadResult, error) { + var ( + wg sync.WaitGroup + errValue atomic.Value + cpCh chan downloadedChunk + cpWg sync.WaitGroup + cpChunks downloadedChunks + tracker bool = d.calcCRC || d.checkpoint != nil + tCRC64 uint64 = 0 + ) + + saveErrFn := func(e error) { + errValue.Store(e) + } + + getErrFn := func() error { + v := errValue.Load() + if v == nil { + return nil + } + e, _ := v.(error) + return e + } + + // writeChunkFn runs in worker goroutines to pull chunks off of the ch channel + writeChunkFn := func(ch chan downloaderChunk) { + defer wg.Done() + var hash hash.Hash64 + if d.calcCRC { + hash = NewCRC64(0) + } + + for { + chunk, ok := <-ch + if !ok { + break + } + + if getErrFn() != nil { + continue + } + + dchunk, derr := d.downloadChunk(chunk, hash) + + if derr != nil && derr != io.EOF { + saveErrFn(derr) + } else { + // update tracker info + if tracker { + cpCh <- dchunk + } + } + } + } + + // trackerFn runs in worker goroutines to update checkpoint info or calc downloaded crc + trackerFn := func(ch chan downloadedChunk) { + defer cpWg.Done() + var ( + tOffset int64 = 0 + ) + + if d.checkpoint != nil { + tOffset = d.checkpoint.Info.Data.DownloadInfo.Offset + tCRC64 = d.checkpoint.Info.Data.DownloadInfo.CRC64 + } + + for { + chunk, ok := <-ch + if !ok { + break + } + cpChunks = append(cpChunks, chunk) + sort.Sort(cpChunks) + newOffset := tOffset + i := 0 + for ii := range cpChunks { + if cpChunks[ii].start == newOffset { + newOffset += cpChunks[ii].size + i++ + } else { + break + } + } + if newOffset != tOffset { + //remove updated chunk in cpChunks + if d.calcCRC { + tCRC64 = d.combineCRC(tCRC64, cpChunks[0:i]) + } + tOffset = newOffset + cpChunks = cpChunks[i:] + if d.checkpoint != nil { + d.checkpoint.Info.Data.DownloadInfo.Offset = tOffset + d.checkpoint.Info.Data.DownloadInfo.CRC64 = tCRC64 + d.checkpoint.dump() + } + } + } + } + + // Start the download workers + ch := make(chan downloaderChunk, d.options.ParallelNum) + for i := 0; i < d.options.ParallelNum; i++ { + wg.Add(1) + go writeChunkFn(ch) + } + + // Start tracker worker if need track downloaded chunk + if tracker { + cpCh = make(chan downloadedChunk, maxInt(3, d.options.ParallelNum)) + cpWg.Add(1) + go trackerFn(cpCh) + } + + // Consume downloaded data + if d.request.ProgressFn != nil && d.written > 0 { + d.request.ProgressFn(d.written, d.written, d.sizeInBytes) + } + + // Queue the next range of bytes to read. + for getErrFn() == nil { + if d.pos >= d.epos { + break + } + size := minInt64(d.epos-d.pos, d.options.PartSize) + ch <- downloaderChunk{w: d.w, start: d.pos, size: size, rstart: d.rstart} + d.pos += size + } + + // Waiting for parts download finished + close(ch) + wg.Wait() + + if tracker { + close(cpCh) + cpWg.Wait() + } + + if err := getErrFn(); err != nil { + return nil, d.wrapErr(err) + } + + if d.checkCRC { + if len(cpChunks) > 0 { + sort.Sort(cpChunks) + } + if derr := checkResponseHeaderCRC64(fmt.Sprint(d.combineCRC(tCRC64, cpChunks)), d.headers); derr != nil { + return nil, d.wrapErr(derr) + } + } + + return &DownloadResult{ + Written: d.written, + }, nil +} + +func (d *downloaderDelegate) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + d.written += n + if d.request.ProgressFn != nil && n > 0 { + d.request.ProgressFn(n, d.written, d.sizeInBytes) + } +} + +func (d *downloaderDelegate) downloadChunk(chunk downloaderChunk, hash hash.Hash64) (downloadedChunk, error) { + // Get the next byte range of data + var request GetObjectRequest + copyRequest(&request, d.request) + + getFn := func(ctx context.Context, httpRange HTTPRange) (output *ReaderRangeGetOutput, err error) { + // update range + request.Range = nil + rangeStr := httpRange.FormatHTTPRange() + request.RangeBehavior = nil + if rangeStr != nil { + request.Range = rangeStr + request.RangeBehavior = Ptr("standard") + } + + result, err := d.client.GetObject(ctx, &request, d.options.ClientOptions...) + if err != nil { + return nil, err + } + + return &ReaderRangeGetOutput{ + Body: result.Body, + ETag: result.ETag, + ContentLength: result.ContentLength, + ContentRange: result.ContentRange, + }, nil + } + + reader, _ := NewRangeReader(d.context, getFn, &HTTPRange{chunk.start, chunk.size}, d.etag) + defer reader.Close() + + var ( + r io.Reader = reader + crc64 uint64 = 0 + ) + if hash != nil { + hash.Reset() + r = io.TeeReader(reader, hash) + } + + n, err := io.Copy(&chunk, r) + d.incrWritten(n) + + if hash != nil { + crc64 = hash.Sum64() + } + + return downloadedChunk{ + start: chunk.start, + size: n, + crc64: crc64, + }, err +} + +func (u *downloaderDelegate) combineCRC(hashCRC uint64, crcs downloadedChunks) uint64 { + if len(crcs) == 0 { + return hashCRC + } + crc := hashCRC + for _, c := range crcs { + crc = CRC64Combine(crc, c.crc64, uint64(c.size)) + } + return crc +} + +func (u *downloaderDelegate) wrapErr(err error) error { + return &DownloadError{ + Path: fmt.Sprintf("oss://%s/%s", *u.request.Bucket, *u.request.Key), + Err: err} +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go new file mode 100644 index 000000000..cfa5c20c8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go @@ -0,0 +1,503 @@ +package oss + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto" +) + +// EncryptionUaSuffix user agent tag for client encryption +const ( + EncryptionUaSuffix string = "OssEncryptionClient" +) + +type EncryptionClientOptions struct { + MasterCiphers []crypto.MasterCipher +} + +type EncryptionClient struct { + client *Client + defualtCCBuilder crypto.ContentCipherBuilder + ccBuilderMap map[string]crypto.ContentCipherBuilder + alignLen int +} + +// EncryptionMultiPartContext save encryption or decryption information +type EncryptionMultiPartContext struct { + ContentCipher crypto.ContentCipher + DataSize int64 + PartSize int64 +} + +// Valid judge PartCryptoContext is valid or not +func (ec EncryptionMultiPartContext) Valid() bool { + if ec.ContentCipher == nil || ec.DataSize == 0 || ec.PartSize == 0 { + return false + } + return true +} + +func NewEncryptionClient(c *Client, masterCipher crypto.MasterCipher, optFns ...func(*EncryptionClientOptions)) (*EncryptionClient, error) { + options := EncryptionClientOptions{} + for _, fn := range optFns { + fn(&options) + } + + if masterCipher == nil { + return nil, NewErrParamNull("masterCipher") + } + + defualtCCBuilder := crypto.CreateAesCtrCipher(masterCipher) + ccBuilderMap := map[string]crypto.ContentCipherBuilder{} + for _, m := range options.MasterCiphers { + if m != nil && len(m.GetMatDesc()) > 0 { + ccBuilderMap[m.GetMatDesc()] = crypto.CreateAesCtrCipher(m) + } + } + + e := &EncryptionClient{ + client: c, + defualtCCBuilder: defualtCCBuilder, + ccBuilderMap: ccBuilderMap, + alignLen: 16, + } + + return e, nil +} + +func (e *EncryptionClient) Unwrap() *Client { return e.client } + +// GetObjectMeta Queries the metadata of an object, including ETag, Size, and LastModified. +// The content of the object is not returned. +func (e *EncryptionClient) GetObjectMeta(ctx context.Context, request *GetObjectMetaRequest, optFns ...func(*Options)) (*GetObjectMetaResult, error) { + return e.client.GetObjectMeta(ctx, request, optFns...) +} + +// HeadObject Queries information about all objects in a bucket. +func (e *EncryptionClient) HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) { + return e.client.HeadObject(ctx, request, optFns...) +} + +// GetObject Downloads a object. +func (e *EncryptionClient) GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) { + return e.getObjectSecurely(ctx, request, optFns...) +} + +// PutObject Uploads a object. +func (e *EncryptionClient) PutObject(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) { + return e.putObjectSecurely(ctx, request, optFns...) +} + +// InitiateMultipartUpload Initiates a multipart upload task before you can upload data in parts to Object Storage Service (OSS). +func (e *EncryptionClient) InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) { + return e.initiateMultipartUploadSecurely(ctx, request, optFns...) +} + +// UploadPart Call the UploadPart interface to upload data in blocks (parts) based on the specified Object name and uploadId. +func (e *EncryptionClient) UploadPart(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) { + return e.uploadPartSecurely(ctx, request, optFns...) +} + +// CompleteMultipartUpload Completes the multipart upload task of an object after all parts of the object are uploaded. +func (e *EncryptionClient) CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) { + return e.client.CompleteMultipartUpload(ctx, request, optFns...) +} + +// AbortMultipartUpload Cancels a multipart upload task and deletes the parts uploaded in the task. +func (e *EncryptionClient) AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) { + return e.client.AbortMultipartUpload(ctx, request, optFns...) +} + +// ListParts Lists all parts that are uploaded by using a specified upload ID. +func (e *EncryptionClient) ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) { + return e.client.ListParts(ctx, request, optFns...) +} + +// NewDownloader creates a new Downloader instance to download objects. +func (c *EncryptionClient) NewDownloader(optFns ...func(*DownloaderOptions)) *Downloader { + return NewDownloader(c, optFns...) +} + +// NewUploader creates a new Uploader instance to upload objects. +func (c *EncryptionClient) NewUploader(optFns ...func(*UploaderOptions)) *Uploader { + return NewUploader(c, optFns...) +} + +// OpenFile opens the named file for reading. +func (c *EncryptionClient) OpenFile(ctx context.Context, bucket string, key string, optFns ...func(*OpenOptions)) (*ReadOnlyFile, error) { + return NewReadOnlyFile(ctx, c, bucket, key, optFns...) +} + +func (e *EncryptionClient) getObjectSecurely(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + var ( + err error + httpRange *HTTPRange + discardCount int64 = 0 + adjustOffset int64 = 0 + closeBody bool = true + ) + + if request.Range != nil { + httpRange, err = ParseRange(*request.Range) + if err != nil { + return nil, err + } + offset := httpRange.Offset + count := httpRange.Count + adjustOffset = adjustRangeStart(offset, int64(e.alignLen)) + discardCount = httpRange.Offset - adjustOffset + + if discardCount != 0 { + if count > 0 { + count += discardCount + } + httpRange.Offset = adjustOffset + httpRange.Count = count + } + } + + eRequest := request + if httpRange != nil && discardCount > 0 { + _request := *request + eRequest = &_request + eRequest.Range = httpRange.FormatHTTPRange() + eRequest.RangeBehavior = Ptr("standard") + } + + result, err := e.client.GetObject(ctx, eRequest, optFns...) + + if err != nil { + return nil, err + } + + defer func() { + if closeBody && result.Body != nil { + result.Body.Close() + } + }() + + if hasEncryptedHeader(result.Headers) { + envelope, err := getEnvelopeFromHeader(result.Headers) + if err != nil { + return nil, err + } + if !isValidContentAlg(envelope.CEKAlg) { + return nil, fmt.Errorf("not supported content algorithm %s,object:%s", envelope.CEKAlg, ToString(request.Key)) + } + if !envelope.IsValid() { + return nil, fmt.Errorf("getEnvelopeFromHeader error,object:%s", ToString(request.Key)) + } + + // use ContentCipherBuilder to decrpt object by default + cc, err := e.getContentCipherBuilder(envelope).ContentCipherEnv(envelope) + if err != nil { + return nil, fmt.Errorf("%s,object:%s", err.Error(), ToString(request.Key)) + } + + if adjustOffset > 0 { + cipherData := cc.GetCipherData().Clone() + cipherData.SeekIV(uint64(adjustOffset)) + cc, _ = cc.Clone(cipherData) + } + + result.Body, err = cc.DecryptContent(result.Body) + } + + if discardCount > 0 && err == nil { + //rewrite ContentRange & ContentRange + if result.ContentRange != nil { + if from, to, total, cerr := ParseContentRange(*result.ContentRange); cerr == nil { + from += discardCount + value := fmt.Sprintf("bytes %v-%v/%v", from, to, total) + result.ContentRange = Ptr(value) + result.Headers.Set(HTTPHeaderContentRange, value) + } + } else { + result.Headers.Set(HTTPHeaderContentRange, fmt.Sprintf("bytes %v-/*", discardCount)) + } + if result.ContentLength > 0 { + result.ContentLength -= discardCount + result.Headers.Set(HTTPHeaderContentLength, fmt.Sprint(result.ContentLength)) + } + result.Body = &DiscardReadCloser{ + RC: result.Body, + Discard: int(discardCount), + } + } + + closeBody = false + return result, err +} + +func (e *EncryptionClient) putObjectSecurely(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + cc, err := e.defualtCCBuilder.ContentCipher() + if err != nil { + return nil, err + } + cryptoReader, err := cc.EncryptContent(request.Body) + if err != nil { + return nil, err + } + + eRequest := *request + eRequest.Body = cryptoReader + addCryptoHeaders(&eRequest, cc.GetCipherData()) + + return e.client.PutObject(ctx, &eRequest, optFns...) +} + +func (e *EncryptionClient) initiateMultipartUploadSecurely(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) { + var err error + if request == nil { + return nil, NewErrParamNull("request") + } + if err = e.validEncryptionContext(request); err != nil { + return nil, err + } + cc, err := e.defualtCCBuilder.ContentCipher() + if err != nil { + return nil, err + } + eRequest := *request + addMultiPartCryptoHeaders(&eRequest, cc.GetCipherData()) + + result, err := e.client.InitiateMultipartUpload(ctx, &eRequest, optFns...) + if err != nil { + return nil, err + } + + result.CSEMultiPartContext = &EncryptionMultiPartContext{ + ContentCipher: cc, + PartSize: ToInt64(request.CSEPartSize), + DataSize: ToInt64(request.CSEDataSize), + } + return result, nil +} + +func (e *EncryptionClient) uploadPartSecurely(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + if request.CSEMultiPartContext == nil { + return nil, NewErrParamNull("request.CSEMultiPartContext") + } + cseCtx := request.CSEMultiPartContext + if !cseCtx.Valid() { + return nil, fmt.Errorf("request.CSEMultiPartContext is invalid") + } + if cseCtx.PartSize%int64(e.alignLen) != 0 { + return nil, fmt.Errorf("CSEMultiPartContext's PartSize must be aligned to %v", e.alignLen) + } + + cipherData := cseCtx.ContentCipher.GetCipherData().Clone() + // caclulate iv based on part number + if request.PartNumber > 1 { + cipherData.SeekIV(uint64(request.PartNumber-1) * uint64(cseCtx.PartSize)) + } + + // for parallel upload part + cc, _ := cseCtx.ContentCipher.Clone(cipherData) + + cryptoReader, err := cc.EncryptContent(request.Body) + if err != nil { + return nil, err + } + + eRequest := *request + eRequest.Body = cryptoReader + + addUploadPartCryptoHeaders(&eRequest, cseCtx, cc.GetCipherData()) + + return e.client.UploadPart(ctx, &eRequest, optFns...) +} + +func (e *EncryptionClient) getContentCipherBuilder(envelope crypto.Envelope) crypto.ContentCipherBuilder { + if ccb, ok := e.ccBuilderMap[envelope.MatDesc]; ok { + return ccb + } + return e.defualtCCBuilder +} + +func (e *EncryptionClient) validEncryptionContext(request *InitiateMultipartUploadRequest) error { + partSize := ToInt64(request.CSEPartSize) + if partSize <= 0 { + return NewErrParamInvalid("request.CSEPartSize") + } + + if partSize%int64(e.alignLen) != 0 { + return fmt.Errorf("request.CSEPartSize must aligned to the %v", e.alignLen) + } + + return nil +} + +func hasEncryptedHeader(headers http.Header) bool { + return len(headers.Get(OssClientSideEncryptionKey)) > 0 +} + +// addCryptoHeaders save Envelope information in oss meta +func addCryptoHeaders(request *PutObjectRequest, cd *crypto.CipherData) { + if request.Headers == nil { + request.Headers = map[string]string{} + } + + // convert content-md5 + if request.ContentMD5 != nil { + request.Headers[OssClientSideEncryptionUnencryptedContentMD5] = *request.ContentMD5 + request.ContentMD5 = nil + } + + // convert content-length + if request.ContentLength != nil { + request.Headers[OssClientSideEncryptionUnencryptedContentLength] = fmt.Sprint(*request.ContentLength) + request.ContentLength = nil + } + + // matDesc + if len(cd.MatDesc) > 0 { + request.Headers[OssClientSideEncryptionMatDesc] = cd.MatDesc + } + + // encrypted key + strEncryptedKey := base64.StdEncoding.EncodeToString(cd.EncryptedKey) + request.Headers[OssClientSideEncryptionKey] = strEncryptedKey + + // encrypted iv + strEncryptedIV := base64.StdEncoding.EncodeToString(cd.EncryptedIV) + request.Headers[OssClientSideEncryptionStart] = strEncryptedIV + + // wrap alg + request.Headers[OssClientSideEncryptionWrapAlg] = cd.WrapAlgorithm + + // cek alg + request.Headers[OssClientSideEncryptionCekAlg] = cd.CEKAlgorithm +} + +// addMultiPartCryptoHeaders save Envelope information in oss meta +func addMultiPartCryptoHeaders(request *InitiateMultipartUploadRequest, cd *crypto.CipherData) { + if request.Headers == nil { + request.Headers = map[string]string{} + } + + // matDesc + if len(cd.MatDesc) > 0 { + request.Headers[OssClientSideEncryptionMatDesc] = cd.MatDesc + } + + if ToInt64(request.CSEDataSize) > 0 { + request.Headers[OssClientSideEncryptionDataSize] = fmt.Sprint(*request.CSEDataSize) + } + + request.Headers[OssClientSideEncryptionPartSize] = fmt.Sprint(*request.CSEPartSize) + + // encrypted key + strEncryptedKey := base64.StdEncoding.EncodeToString(cd.EncryptedKey) + request.Headers[OssClientSideEncryptionKey] = strEncryptedKey + + // encrypted iv + strEncryptedIV := base64.StdEncoding.EncodeToString(cd.EncryptedIV) + request.Headers[OssClientSideEncryptionStart] = strEncryptedIV + + // wrap alg + request.Headers[OssClientSideEncryptionWrapAlg] = cd.WrapAlgorithm + + // cek alg + request.Headers[OssClientSideEncryptionCekAlg] = cd.CEKAlgorithm +} + +// addUploadPartCryptoHeaders save Envelope information in oss meta +func addUploadPartCryptoHeaders(request *UploadPartRequest, cseContext *EncryptionMultiPartContext, cd *crypto.CipherData) { + if request.Headers == nil { + request.Headers = map[string]string{} + } + + // matDesc + if len(cd.MatDesc) > 0 { + request.Headers[OssClientSideEncryptionMatDesc] = cd.MatDesc + } + + if cseContext.DataSize > 0 { + request.Headers[OssClientSideEncryptionDataSize] = fmt.Sprint(cseContext.DataSize) + } + + request.Headers[OssClientSideEncryptionPartSize] = fmt.Sprint(cseContext.PartSize) + + // encrypted key + strEncryptedKey := base64.StdEncoding.EncodeToString(cd.EncryptedKey) + request.Headers[OssClientSideEncryptionKey] = strEncryptedKey + + // encrypted iv + strEncryptedIV := base64.StdEncoding.EncodeToString(cd.EncryptedIV) + request.Headers[OssClientSideEncryptionStart] = strEncryptedIV + + // wrap alg + request.Headers[OssClientSideEncryptionWrapAlg] = cd.WrapAlgorithm + + // cek alg + request.Headers[OssClientSideEncryptionCekAlg] = cd.CEKAlgorithm +} + +func isValidContentAlg(algName string) bool { + // now content encyrption only support aec/ctr algorithm + return algName == crypto.AesCtrAlgorithm +} + +func adjustRangeStart(start, align int64) int64 { + return (start / align) * align +} + +func getEnvelopeFromHeader(header http.Header) (crypto.Envelope, error) { + var envelope crypto.Envelope + envelope.IV = header.Get(OssClientSideEncryptionStart) + decodedIV, err := base64.StdEncoding.DecodeString(envelope.IV) + if err != nil { + return envelope, err + } + envelope.IV = string(decodedIV) + + envelope.CipherKey = header.Get(OssClientSideEncryptionKey) + decodedKey, err := base64.StdEncoding.DecodeString(envelope.CipherKey) + if err != nil { + return envelope, err + } + envelope.CipherKey = string(decodedKey) + envelope.MatDesc = header.Get(OssClientSideEncryptionMatDesc) + envelope.WrapAlg = header.Get(OssClientSideEncryptionWrapAlg) + envelope.CEKAlg = header.Get(OssClientSideEncryptionCekAlg) + envelope.UnencryptedMD5 = header.Get(OssClientSideEncryptionUnencryptedContentMD5) + envelope.UnencryptedContentLen = header.Get(OssClientSideEncryptionUnencryptedContentLength) + return envelope, err +} + +func getEnvelopeFromListParts(result *ListPartsResult) (crypto.Envelope, error) { + var envelope crypto.Envelope + if result == nil { + return envelope, NewErrParamNull("result.*ListPartsResult") + } + envelope.IV = ToString(result.ClientEncryptionStart) + decodedIV, err := base64.StdEncoding.DecodeString(envelope.IV) + if err != nil { + return envelope, err + } + envelope.IV = string(decodedIV) + + envelope.CipherKey = ToString(result.ClientEncryptionKey) + decodedKey, err := base64.StdEncoding.DecodeString(envelope.CipherKey) + if err != nil { + return envelope, err + } + envelope.CipherKey = string(decodedKey) + envelope.WrapAlg = ToString(result.ClientEncryptionWrapAlg) + envelope.CEKAlg = ToString(result.ClientEncryptionCekAlg) + return envelope, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go new file mode 100644 index 000000000..66f8ae8d0 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go @@ -0,0 +1,62 @@ +package oss + +import ( + "fmt" + "regexp" +) + +type EndpointType int + +const ( + // Access OSS over the public network, oss-[region].aliyuncs.com + EndpointPublic EndpointType = iota + + // Access OSS over the internal network, oss-[region]-internal.aliyuncs.com + EndpointInternal + + // Access OSS over the global acceleration endpoint, oss-accelerate.aliyuncs.com + EndpointAccelerate + + // Access OSS over the acceleration endpoint outside the Chinese mainland, oss-accelerate-overseas.aliyuncs.com + EndpointAccelerateOverseas + + // Access OSS over the dual stack endpoint that support both IPv4 and IPv6, [region].oss.aliyuncs.com + EndpointDualStack +) + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +func addEndpointScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := DefaultEndpointScheme + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + return endpoint +} + +func endpointFromRegion(region string, disableSSL bool, etype EndpointType) string { + scheme := DefaultEndpointScheme + if disableSSL { + scheme = "http" + } + + var endpoint string + switch etype { + case EndpointInternal: + endpoint = fmt.Sprintf("oss-%s-internal.aliyuncs.com", region) + case EndpointDualStack: + endpoint = fmt.Sprintf("%s.oss.aliyuncs.com", region) + case EndpointAccelerate: + endpoint = "oss-accelerate.aliyuncs.com" + case EndpointAccelerateOverseas: + endpoint = "oss-accelerate-overseas.aliyuncs.com" + default: + endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + + return endpoint +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go new file mode 100644 index 000000000..e7b2cb4c8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go @@ -0,0 +1,344 @@ +package oss + +// BucketACLType The access control list (ACL) of the bucket +type BucketACLType string + +// Enum values for BucketACLType +const ( + // BucketACLPrivate Only the bucket owner can perform read and write operations on objects in the bucket. + // Other users cannot access the objects in the bucket. + BucketACLPrivate BucketACLType = "private" + + // BucketACLPublicRead Only the bucket owner can write data to objects in the bucket. + // Other users, including anonymous users, can only read objects in the bucket. + BucketACLPublicRead BucketACLType = "public-read" + + // BucketACLPublicReadWrite All users, including anonymous users, can perform read and write operations on the bucket. + BucketACLPublicReadWrite BucketACLType = "public-read-write" +) + +// StorageClassType The storage class of the bucket +type StorageClassType string + +// Enum values for StorageClassType +const ( + // StorageClassStandard Standard provides highly reliable, highly available, + // and high-performance object storage for data that is frequently accessed. + StorageClassStandard StorageClassType = "Standard" + + // StorageClassIA IA provides highly durable storage at lower prices compared with Standard. + // IA has a minimum billable size of 64 KB and a minimum billable storage duration of 30 days. + StorageClassIA StorageClassType = "IA" + + // StorageClassArchive Archive provides high-durability storage at lower prices compared with Standard and IA. + // Archive has a minimum billable size of 64 KB and a minimum billable storage duration of 60 days. + StorageClassArchive StorageClassType = "Archive" + + // StorageClassColdArchive Cold Archive provides highly durable storage at lower prices compared with Archive. + // Cold Archive has a minimum billable size of 64 KB and a minimum billable storage duration of 180 days. + StorageClassColdArchive StorageClassType = "ColdArchive" + + // StorageClassDeepColdArchive Deep Cold Archive provides highly durable storage at lower prices compared with Cold Archive. + // Deep Cold Archive has a minimum billable size of 64 KB and a minimum billable storage duration of 180 days. + StorageClassDeepColdArchive StorageClassType = "DeepColdArchive" +) + +// DataRedundancyType The redundancy type of the bucket +type DataRedundancyType string + +// Enum values for BucketACLType +const ( + // DataRedundancyLRS Locally redundant storage (LRS) stores copies of each object across different devices in the same zone. + // This ensures data reliability and availability even if two storage devices are damaged at the same time. + DataRedundancyLRS DataRedundancyType = "LRS" + + // DataRedundancyZRS Zone-redundant storage (ZRS) uses the multi-zone mechanism to distribute user data across + // multiple zones in the same region. If one zone becomes unavailable, you can continue to access the data + // that is stored in other zones. + DataRedundancyZRS DataRedundancyType = "ZRS" +) + +// ObjectACLType The access control list (ACL) of the object +type ObjectACLType string + +// Enum values for ObjectACLType +const ( + // ObjectACLPrivate Only the object owner is allowed to perform read and write operations on the object. + // Other users cannot access the object. + ObjectACLPrivate ObjectACLType = "private" + + // ObjectACLPublicRead Only the object owner can write data to the object. + // Other users, including anonymous users, can only read the object. + ObjectACLPublicRead ObjectACLType = "public-read" + + // ObjectACLPublicReadWrite All users, including anonymous users, can perform read and write operations on the object. + ObjectACLPublicReadWrite ObjectACLType = "public-read-write" + + // ObjectACLDefault The ACL of the object is the same as that of the bucket in which the object is stored. + ObjectACLDefault ObjectACLType = "default" +) + +// VersioningStatusType bucket versioning status +type VersioningStatusType string + +const ( + // VersionEnabled Versioning Status definition: Enabled + VersionEnabled VersioningStatusType = "Enabled" + + // VersionSuspended Versioning Status definition: Suspended + VersionSuspended VersioningStatusType = "Suspended" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "Requester" + + // BucketOwner the requester who send the request + BucketOwner PayerType = "BucketOwner" +) + +// BucketWormStateType the type of bucket worm state +type BucketWormStateType string + +const ( + BucketWormStateInProgress BucketWormStateType = "InProgress" + BucketWormStateLocked BucketWormStateType = "Locked" +) + +// InventoryFormatType The format of exported inventory lists +type InventoryFormatType string + +// InventoryFormatCSV Enum values for InventoryFormatType +const ( + InventoryFormatCSV InventoryFormatType = "CSV" +) + +// InventoryFrequencyType The frequency at which inventory lists are exported +type InventoryFrequencyType string + +// Enum values for InventoryFrequencyType +const ( + InventoryFrequencyDaily InventoryFrequencyType = "Daily" + InventoryFrequencyWeekly InventoryFrequencyType = "Weekly" +) + +// InventoryOptionalFieldType The configuration fields that are included in inventory lists. +type InventoryOptionalFieldType string + +// Enum values for InventoryOptionalFieldType +const ( + InventoryOptionalFieldSize InventoryOptionalFieldType = "Size" + InventoryOptionalFieldLastModifiedDate InventoryOptionalFieldType = "LastModifiedDate" + InventoryOptionalFieldETag InventoryOptionalFieldType = "ETag" + InventoryOptionalFieldStorageClass InventoryOptionalFieldType = "StorageClass" + InventoryOptionalFieldIsMultipartUploaded InventoryOptionalFieldType = "IsMultipartUploaded" + InventoryOptionalFieldEncryptionStatus InventoryOptionalFieldType = "EncryptionStatus" + InventoryOptionalFieldTransitionTime InventoryOptionalFieldType = "TransitionTime" +) + +// AccessMonitorStatusType The type of access monitor status +type AccessMonitorStatusType string + +// Enum values for AccessMonitorStatusType +const ( + AccessMonitorStatusEnabled AccessMonitorStatusType = "Enabled" + AccessMonitorStatusDisabled AccessMonitorStatusType = "Disabled" +) + +type HistoricalObjectReplicationType string + +// Enum values for HistoricalObjectReplicationType +const ( + HistoricalObjectReplicationEnabled HistoricalObjectReplicationType = "enabled" + HistoricalObjectReplicationDisabled HistoricalObjectReplicationType = "disabled" +) + +type TransferTypeType string + +// Enum values for TransferTypeType +const ( + TransferTypeInternal TransferTypeType = "internal" + TransferTypeOssAcc TransferTypeType = "oss_acc" +) + +type StatusType string + +// Enum values for StatusType +const ( + StatusEnabled StatusType = "Enabled" + StatusDisabled StatusType = "Disabled" +) + +type MetaQueryOrderType string + +// Enum values for MetaQueryOrderType +const ( + MetaQueryOrderAsc MetaQueryOrderType = "asc" + MetaQueryOrderDesc MetaQueryOrderType = "desc" +) + +// OSS headers +const ( + HeaderOssPrefix string = "X-Oss-" + HeaderOssMetaPrefix = "X-Oss-Meta-" + HeaderOssACL = "X-Oss-Acl" + HeaderOssObjectACL = "X-Oss-Object-Acl" + HeaderOssObjectType = "X-Oss-Object-Type" + HeaderOssSecurityToken = "X-Oss-Security-Token" + HeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption" + HeaderOssSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm" + HeaderOssSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key" + HeaderOssSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5" + HeaderOssCopySource = "X-Oss-Copy-Source" + HeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HeaderOssRequestID = "X-Oss-Request-Id" + HeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HeaderOssStorageClass = "X-Oss-Storage-Class" + HeaderOssCallback = "X-Oss-Callback" + HeaderOssCallbackVar = "X-Oss-Callback-Var" + HeaderOssRequester = "X-Oss-Request-Payer" + HeaderOssTagging = "X-Oss-Tagging" + HeaderOssTaggingDirective = "X-Oss-Tagging-Directive" + HeaderOssTrafficLimit = "X-Oss-Traffic-Limit" + HeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite" + HeaderOssRangeBehavior = "X-Oss-Range-Behavior" + HeaderOssAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap" + HeaderOssDate = "X-Oss-Date" + HeaderOssContentSha256 = "X-Oss-Content-Sha256" + HeaderOssEC = "X-Oss-Ec" + HeaderOssERR = "X-Oss-Err" +) + +// OSS headers for client sider encryption +const ( + OssClientSideEncryptionKey string = "X-Oss-Meta-Client-Side-Encryption-Key" + OssClientSideEncryptionStart = "X-Oss-Meta-Client-Side-Encryption-Start" + OssClientSideEncryptionCekAlg = "X-Oss-Meta-Client-Side-Encryption-Cek-Alg" + OssClientSideEncryptionWrapAlg = "X-Oss-Meta-Client-Side-Encryption-Wrap-Alg" + OssClientSideEncryptionMatDesc = "X-Oss-Meta-Client-Side-Encryption-Matdesc" + OssClientSideEncryptionUnencryptedContentLength = "X-Oss-Meta-Client-Side-Encryption-Unencrypted-Content-Length" + OssClientSideEncryptionUnencryptedContentMD5 = "X-Oss-Meta-Client-Side-Encryption-Unencrypted-Content-Md5" + OssClientSideEncryptionDataSize = "X-Oss-Meta-Client-Side-Encryption-Data-Size" + OssClientSideEncryptionPartSize = "X-Oss-Meta-Client-Side-Encryption-Part-Size" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderContentRange = "Content-Range" + HTTPHeaderDate = "Date" + HTTPHeaderETag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" +) + +type UrlStyleType int + +const ( + UrlStyleVirtualHosted UrlStyleType = iota + UrlStylePath + UrlStyleCName +) + +func (f UrlStyleType) String() string { + switch f { + default: + return "virtual-hosted-style" + case UrlStylePath: + return "path-style" + case UrlStyleCName: + return "cname-style" + } +} + +type FeatureFlagsType int + +const ( + // FeatureCorrectClockSkew If the client time is different from server time by more than about 15 minutes, + // the requests your application makes will be signed with the incorrect time, and the server will reject them. + // The feature to help to identify this case, and SDK will correct for clock skew. + FeatureCorrectClockSkew FeatureFlagsType = 1 << iota + + FeatureEnableMD5 + + // FeatureAutoDetectMimeType Content-Type is automatically added based on the object name if not specified. + // This feature takes effect for PutObject, AppendObject and InitiateMultipartUpload + FeatureAutoDetectMimeType + + // FeatureEnableCRC64CheckUpload check data integrity of uploads via the crc64. + // This feature takes effect for PutObject, AppendObject, UploadPart, Uploader.UploadFrom and Uploader.UploadFile + FeatureEnableCRC64CheckUpload + + // FeatureEnableCRC64CheckDownload check data integrity of downloads via the crc64. + // This feature takes effect for Downloader.DownloadFile + FeatureEnableCRC64CheckDownload + + FeatureFlagsDefault = FeatureCorrectClockSkew + FeatureAutoDetectMimeType + + FeatureEnableCRC64CheckUpload + FeatureEnableCRC64CheckDownload +) + +type SignatureVersionType int + +const ( + SignatureVersionV1 SignatureVersionType = iota + SignatureVersionV4 +) + +func (f SignatureVersionType) String() string { + switch f { + case SignatureVersionV4: + return "OSS Signature Version 4" + default: + return "OSS Signature Version 1" + } +} + +type AuthMethodType int + +const ( + AuthMethodHeader AuthMethodType = iota + AuthMethodQuery +) + +func (f AuthMethodType) String() string { + switch f { + case AuthMethodQuery: + return "authentication in query" + default: + return "authentication in header" + } +} + +// OperationMetadata Keys +const ( + OpMetaKeyResponsHandler string = "opm-response-handler" + OpMetaKeyRequestBodyTracker string = "opm-request-body-tracker" +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go new file mode 100644 index 000000000..a0b455835 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go @@ -0,0 +1,170 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" + "time" +) + +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` + EC string `xml:"EC"` + + StatusCode int + Snapshot []byte + Timestamp time.Time + RequestTarget string + Headers http.Header +} + +func (e *ServiceError) Error() string { + return fmt.Sprintf( + `Error returned by Service. +Http Status Code: %d. +Error Code: %s. +Request Id: %s. +Message: %s. +EC: %s. +Timestamp: %s. +Request Endpoint: %s.`, + e.StatusCode, e.Code, e.RequestID, e.Message, e.EC, e.Timestamp, e.RequestTarget) +} + +func (e *ServiceError) HttpStatusCode() int { + return e.StatusCode +} + +func (e *ServiceError) ErrorCode() string { + return e.Code +} + +type ClientError struct { + Code string + Message string + Err error +} + +func (e *ClientError) Unwrap() error { return e.Err } + +func (e *ClientError) Error() string { + return fmt.Sprintf("client error: %v, %v", e.Message, e.Err) +} + +type OperationError struct { + name string + err error +} + +func (e *OperationError) Operation() string { return e.name } + +func (e *OperationError) Unwrap() error { return e.err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %v", e.name, e.err) +} + +type DeserializationError struct { + Err error + Snapshot []byte +} + +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +func (e *DeserializationError) Unwrap() error { return e.Err } + +type SerializationError struct { + Err error +} + +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +func (e *SerializationError) Unwrap() error { return e.Err } + +type CanceledError struct { + Err error +} + +func (*CanceledError) CanceledError() bool { return true } + +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} + +type InvalidParamError interface { + error + Field() string + SetContext(string) +} + +type invalidParamError struct { + context string + field string + reason string +} + +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +func NewErrParamRequired(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + } +} + +func NewErrParamInvalid(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("invalid field"), + } +} + +func NewErrParamNull(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("null field"), + } +} + +func NewErrParamTypeNotSupport(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("type not support"), + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go new file mode 100644 index 000000000..f5f169f8d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go @@ -0,0 +1,795 @@ +package oss + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "runtime" + "strings" + "time" +) + +type OpenOptions struct { + Offset int64 + + VersionId *string + + EnablePrefetch bool + PrefetchNum int + ChunkSize int64 + + PrefetchThreshold int64 + RequestPayer *string + + OutOfOrderReadThreshold int64 +} + +type ReadOnlyFile struct { + client OpenFileAPIClient + context context.Context + + // object info + bucket string + key string + versionId *string + requestPayer *string + + // file info + sizeInBytes int64 + modTime string + etag string + headers http.Header + + // current read position + offset int64 + + // read + reader io.ReadCloser + readBufOffset int64 + + // prefetch + enablePrefetch bool + chunkSize int64 + prefetchNum int + prefetchThreshold int64 + + asyncReaders []*AsyncRangeReader + seqReadAmount int64 // number of sequential read + numOOORead int64 // number of out of order read + + closed bool // whether we have closed the file + + oooReadThreshold int64 +} + +// NewReadOnlyFile OpenFile opens the named file for reading. +// If successful, methods on the returned file can be used for reading. +func NewReadOnlyFile(ctx context.Context, c OpenFileAPIClient, bucket string, key string, optFns ...func(*OpenOptions)) (*ReadOnlyFile, error) { + options := OpenOptions{ + Offset: 0, + EnablePrefetch: false, + PrefetchNum: DefaultPrefetchNum, + ChunkSize: DefaultPrefetchChunkSize, + PrefetchThreshold: DefaultPrefetchThreshold, + OutOfOrderReadThreshold: DefaultOutOfOrderReadThreshold, + } + + for _, fn := range optFns { + fn(&options) + } + + if options.EnablePrefetch { + var chunkSize int64 + if options.ChunkSize > 0 { + chunkSize = (options.ChunkSize + AsyncReadeBufferSize - 1) / AsyncReadeBufferSize * AsyncReadeBufferSize + } else { + chunkSize = DefaultPrefetchChunkSize + } + options.ChunkSize = chunkSize + + if options.PrefetchNum <= 0 { + options.PrefetchNum = DefaultPrefetchNum + } + + if options.OutOfOrderReadThreshold <= int64(0) { + options.OutOfOrderReadThreshold = DefaultOutOfOrderReadThreshold + } + } + + f := &ReadOnlyFile{ + client: c, + context: ctx, + + bucket: bucket, + key: key, + versionId: options.VersionId, + requestPayer: options.RequestPayer, + + offset: options.Offset, + + enablePrefetch: options.EnablePrefetch, + prefetchNum: options.PrefetchNum, + chunkSize: options.ChunkSize, + prefetchThreshold: options.PrefetchThreshold, + oooReadThreshold: options.OutOfOrderReadThreshold, + } + + result, err := f.client.HeadObject(f.context, &HeadObjectRequest{ + Bucket: &f.bucket, + Key: &f.key, + VersionId: f.versionId, + RequestPayer: f.requestPayer, + }) + + if err != nil { + return nil, err + } + + //File info + f.sizeInBytes = result.ContentLength + f.modTime = result.Headers.Get(HTTPHeaderLastModified) + f.etag = result.Headers.Get(HTTPHeaderETag) + f.headers = result.Headers + + if f.sizeInBytes < 0 { + return nil, fmt.Errorf("file size is invaid, got %v", f.sizeInBytes) + } + + if f.offset > f.sizeInBytes { + return nil, fmt.Errorf("offset is unavailable, offset:%v, file size:%v", f.offset, f.sizeInBytes) + } + + return f, nil +} + +// Close closes the File. +func (f *ReadOnlyFile) Close() error { + if f == nil { + return os.ErrInvalid + } + return f.close() +} + +func (f *ReadOnlyFile) close() error { + if f.closed { + return nil + } + + if f.reader != nil { + f.reader.Close() + f.reader = nil + } + for _, reader := range f.asyncReaders { + reader.Close() + } + f.asyncReaders = nil + + f.closed = true + runtime.SetFinalizer(f, nil) + return nil +} + +// Read reads up to len(b) bytes from the File and stores them in b. +// It returns the number of bytes read and any error encountered. +// At end of file, Read returns 0, io.EOF. +func (f *ReadOnlyFile) Read(p []byte) (bytesRead int, err error) { + if err := f.checkValid("read"); err != nil { + return 0, err + } + n, e := f.read(p) + return n, f.wrapErr("read", e) +} + +func (f *ReadOnlyFile) read(p []byte) (bytesRead int, err error) { + defer func() { + f.offset += int64(bytesRead) + }() + nwant := len(p) + var nread int + for bytesRead < nwant && err == nil { + nread, err = f.readInternal(f.offset+int64(bytesRead), p[bytesRead:]) + if nread > 0 { + bytesRead += nread + } + } + return +} + +// Seek sets the offset for the next Read or Write on file to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 means +// relative to the current offset, and 2 means relative to the end. +// It returns the new offset and an error. +func (f *ReadOnlyFile) Seek(offset int64, whence int) (int64, error) { + if err := f.checkValid("seek"); err != nil { + return 0, err + } + r, e := f.seek(offset, whence) + if e != nil { + return 0, f.wrapErr("seek", e) + } + return r, nil +} + +func (f *ReadOnlyFile) seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = f.offset + offset + case io.SeekEnd: + abs = f.sizeInBytes + offset + default: + return 0, fmt.Errorf("invalid whence") + } + if abs < 0 { + return 0, fmt.Errorf("negative position") + } + if abs > f.sizeInBytes { + return offset - (abs - f.sizeInBytes), fmt.Errorf("offset is unavailable") + } + + f.offset = abs + + return abs, nil +} + +type fileInfo struct { + name string + size int64 + modTime time.Time + header http.Header +} + +func (fi *fileInfo) Name() string { return fi.name } +func (fi *fileInfo) Size() int64 { return fi.size } +func (fi *fileInfo) Mode() os.FileMode { return os.FileMode(0644) } +func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (fi *fileInfo) IsDir() bool { return false } +func (fi *fileInfo) Sys() any { return fi.header } + +// Stat returns the FileInfo structure describing file. +func (f *ReadOnlyFile) Stat() (os.FileInfo, error) { + if err := f.checkValid("stat"); err != nil { + return nil, err + } + mtime, _ := http.ParseTime(f.modTime) + return &fileInfo{ + name: f.name(), + size: f.sizeInBytes, + modTime: mtime, + header: f.headers, + }, nil +} + +func (f *ReadOnlyFile) name() string { + var name string + if f.versionId != nil { + name = fmt.Sprintf("oss://%s/%s?versionId=%s", f.bucket, f.key, *f.versionId) + } else { + name = fmt.Sprintf("oss://%s/%s", f.bucket, f.key) + } + return name +} + +func (f *ReadOnlyFile) wrapErr(op string, err error) error { + if err == nil || err == io.EOF { + return err + } + return &os.PathError{Op: op, Path: f.name(), Err: err} +} + +func (f *ReadOnlyFile) checkValid(_ string) error { + if f == nil { + return os.ErrInvalid + } else if f.closed { + return os.ErrClosed + } + return nil +} + +func (f *ReadOnlyFile) readInternal(offset int64, p []byte) (bytesRead int, err error) { + defer func() { + if bytesRead > 0 { + f.readBufOffset += int64(bytesRead) + f.seqReadAmount += int64(bytesRead) + } + }() + + if offset >= f.sizeInBytes { + err = io.EOF + return + } + + if f.readBufOffset != offset { + f.readBufOffset = offset + f.seqReadAmount = 0 + + if f.reader != nil { + f.reader.Close() + f.reader = nil + } + + if f.asyncReaders != nil { + f.numOOORead++ + } + + for _, ar := range f.asyncReaders { + ar.Close() + } + f.asyncReaders = nil + } + + if f.enablePrefetch && f.seqReadAmount >= f.prefetchThreshold && f.numOOORead < f.oooReadThreshold { + //swith to async reader + if f.reader != nil { + f.reader.Close() + f.reader = nil + } + + err = f.prefetch(offset, len(p)) + if err == nil { + bytesRead, err = f.readFromPrefetcher(offset, p) + if err == nil { + return + } + } + + // fall back to read serially + f.seqReadAmount = 0 + for _, ar := range f.asyncReaders { + ar.Close() + } + f.asyncReaders = nil + } + + bytesRead, err = f.readDirect(offset, p) + return +} + +func (f *ReadOnlyFile) readDirect(offset int64, buf []byte) (bytesRead int, err error) { + if offset >= f.sizeInBytes { + return + } + + if f.reader == nil { + var result *GetObjectResult + result, err = f.client.GetObject(f.context, &GetObjectRequest{ + Bucket: Ptr(f.bucket), + Key: Ptr(f.key), + VersionId: f.versionId, + Range: Ptr(fmt.Sprintf("bytes=%d-", offset)), + RangeBehavior: Ptr("standard"), + RequestPayer: f.requestPayer, + }) + if err != nil { + return bytesRead, err + } + + if err = f.checkResultValid(offset, result.Headers); err != nil { + if result != nil { + result.Body.Close() + } + return bytesRead, err + } + + f.reader = result.Body + } + + bytesRead, err = f.reader.Read(buf) + if err != nil { + f.reader.Close() + f.reader = nil + err = nil + } + + return +} + +func (f *ReadOnlyFile) checkResultValid(offset int64, header http.Header) error { + modTime := header.Get(HTTPHeaderLastModified) + etag := header.Get(HTTPHeaderETag) + gotOffset, _ := parseOffsetAndSizeFromHeaders(header) + if gotOffset != offset { + return fmt.Errorf("Range get fail, expect offset:%v, got offset:%v", offset, gotOffset) + } + + if (modTime != "" && f.modTime != "" && modTime != f.modTime) || + (etag != "" && f.etag != "" && etag != f.etag) { + return fmt.Errorf("Source file is changed, origin info [%v,%v], new info [%v,%v]", + f.modTime, f.etag, modTime, etag) + } + + return nil +} + +func (f *ReadOnlyFile) readFromPrefetcher(offset int64, buf []byte) (bytesRead int, err error) { + var nread int + for len(f.asyncReaders) != 0 { + asyncReader := f.asyncReaders[0] + //check offset + if offset != asyncReader.Offset() { + return 0, errors.New("out of order") + } + nread, err = asyncReader.Read(buf) + bytesRead += nread + if err != nil { + if err == io.EOF { + //fmt.Printf("asyncReader done\n") + asyncReader.Close() + f.asyncReaders = f.asyncReaders[1:] + err = nil + } else { + return 0, err + } + } + buf = buf[nread:] + if len(buf) == 0 { + return + } + // Update offset for the next read + offset += int64(nread) + } + + return +} + +func (f *ReadOnlyFile) prefetch(offset int64, _ /*needAtLeast*/ int) (err error) { + off := offset + for _, ar := range f.asyncReaders { + off = ar.oriHttpRange.Offset + ar.oriHttpRange.Count + } + //fmt.Printf("prefetch:offset %v, needAtLeast:%v, off:%v\n", offset, needAtLeast, off) + for len(f.asyncReaders) < f.prefetchNum { + remaining := f.sizeInBytes - off + size := minInt64(remaining, f.chunkSize) + cnt := (size + (AsyncReadeBufferSize - 1)) / AsyncReadeBufferSize + //fmt.Printf("f.sizeInBytes:%v, off:%v, size:%v, cnt:%v\n", f.sizeInBytes, off, size, cnt) + //NewAsyncRangeReader support softStartInitial, add more buffer count to prevent connections from not being released + if size > softStartInitial { + acnt := (AsyncReadeBufferSize+(softStartInitial-1))/softStartInitial - 1 + cnt += int64(acnt) + } + if size != 0 { + getFn := func(ctx context.Context, httpRange HTTPRange) (output *ReaderRangeGetOutput, err error) { + request := &GetObjectRequest{ + Bucket: Ptr(f.bucket), + Key: Ptr(f.key), + VersionId: f.versionId, + RequestPayer: f.requestPayer, + } + rangeStr := httpRange.FormatHTTPRange() + if rangeStr != nil { + request.Range = rangeStr + request.RangeBehavior = Ptr("standard") + } + var result *GetObjectResult + result, err = f.client.GetObject(f.context, request) + if err != nil { + return nil, err + } + + return &ReaderRangeGetOutput{ + Body: result.Body, + ETag: result.ETag, + ContentLength: result.ContentLength, + ContentRange: result.ContentRange, + }, nil + //fmt.Printf("result.Headers:%#v\n", result.Headers) + } + ar, err := NewAsyncRangeReader(f.context, getFn, &HTTPRange{off, size}, f.etag, int(cnt)) + if err != nil { + break + } + f.asyncReaders = append(f.asyncReaders, ar) + off += size + } + + if size != f.chunkSize { + break + } + } + return nil +} + +type AppendOptions struct { + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string + + // The parameters when the object is first generated, supports below + // CacheControl, ContentEncoding, Expires, ContentType, ContentType, Metadata + // SSE's parameters, Acl, StorageClass, Tagging + // If the object exists, ignore this parameters + CreateParameter *AppendObjectRequest +} + +type AppendOnlyFile struct { + client AppendFileAPIClient + context context.Context + + // object info + bucket string + key string + requestPayer *string + + info os.FileInfo + + created bool + createParam *AppendObjectRequest + + // current write position + offset int64 + hashCRC64 *string + + closed bool +} + +// NewAppendFile AppendFile opens or creates the named file for appending. +// If successful, methods on the returned file can be used for appending. +func NewAppendFile(ctx context.Context, c AppendFileAPIClient, bucket string, key string, optFns ...func(*AppendOptions)) (*AppendOnlyFile, error) { + options := AppendOptions{} + + for _, fn := range optFns { + fn(&options) + } + + f := &AppendOnlyFile{ + client: c, + context: ctx, + + bucket: bucket, + key: key, + requestPayer: options.RequestPayer, + + created: false, + createParam: options.CreateParameter, + } + + result, err := f.client.HeadObject(f.context, &HeadObjectRequest{Bucket: &f.bucket, Key: &f.key, RequestPayer: f.requestPayer}) + if err != nil { + var serr *ServiceError + if errors.As(err, &serr) && serr.StatusCode == 404 { + // not found + } else { + return nil, err + } + } else { + if !strings.EqualFold(ToString(result.ObjectType), "Appendable") { + return nil, errors.New("Not a appendable file") + } + f.offset = result.ContentLength + f.hashCRC64 = result.HashCRC64 + f.created = true + } + + return f, nil +} + +// Write writes len(b) bytes from b to the AppendOnlyFile. +// It returns the number of bytes written and an error, if any. +// Write returns a non-nil error when n != len(b). +func (f *AppendOnlyFile) Write(b []byte) (n int, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + + n, e := f.write(b) + if n < 0 { + n = 0 + } + + if e == nil && n != len(b) { + err = io.ErrShortWrite + } + + if e != nil { + err = f.wrapErr("write", e) + } + + return n, err +} + +// write writes len(b) bytes to the File. +// It returns the number of bytes written and an error, if any. +func (f *AppendOnlyFile) write(b []byte) (n int, err error) { + offset := f.offset + + request := &AppendObjectRequest{ + Bucket: &f.bucket, + Key: &f.key, + Position: Ptr(f.offset), + Body: bytes.NewReader(b), + InitHashCRC64: f.hashCRC64, + RequestPayer: f.requestPayer, + } + + f.applyCreateParamIfNeed(request) + + if f.offset == 0 { + request.InitHashCRC64 = Ptr("0") + } + + var result *AppendObjectResult + if result, err = f.client.AppendObject(f.context, request); err == nil { + f.offset = result.NextPosition + f.hashCRC64 = result.HashCRC64 + f.created = true + } else { + var serr *ServiceError + if errors.As(err, &serr) && serr.Code == "PositionNotEqualToLength" { + if position, hashcrc, ok := f.nextAppendPosition(); ok { + if offset+int64(len(b)) == position { + f.offset = position + f.hashCRC64 = hashcrc + f.created = true + err = nil + } + } + } + } + + return int(f.offset - offset), err +} + +// WriteFrom writes io.Reader to the File. +// It returns the number of bytes written and an error, if any. +func (f *AppendOnlyFile) WriteFrom(r io.Reader) (n int64, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + + n, err = f.writeFrom(r) + + if err != nil { + err = f.wrapErr("write", err) + } + + return n, err +} + +func (f *AppendOnlyFile) writeFrom(r io.Reader) (n int64, err error) { + offset := f.offset + + request := &AppendObjectRequest{ + Bucket: &f.bucket, + Key: &f.key, + Position: Ptr(f.offset), + Body: r, + RequestPayer: f.requestPayer, + } + + f.applyCreateParamIfNeed(request) + + var roffset int64 + var rs io.Seeker + rs, ok := r.(io.Seeker) + if ok { + roffset, _ = rs.Seek(0, io.SeekCurrent) + } + + var result *AppendObjectResult + if result, err = f.client.AppendObject(f.context, request); err == nil { + f.offset = result.NextPosition + f.hashCRC64 = result.HashCRC64 + f.created = true + } else { + var serr *ServiceError + if errors.As(err, &serr) && serr.Code == "PositionNotEqualToLength" { + if position, hashcrc, ok := f.nextAppendPosition(); ok { + if rs != nil { + if curr, e := rs.Seek(0, io.SeekCurrent); e == nil { + if offset+(curr-roffset) == position { + f.offset = position + f.hashCRC64 = hashcrc + f.created = true + err = nil + } + } + } + } + } + } + + return f.offset - offset, err +} + +func (f *AppendOnlyFile) nextAppendPosition() (int64, *string, bool) { + if h, e := f.client.HeadObject(f.context, &HeadObjectRequest{Bucket: &f.bucket, Key: &f.key, RequestPayer: f.requestPayer}); e == nil { + return h.ContentLength, h.HashCRC64, true + } + return 0, nil, false +} + +func (f *AppendOnlyFile) applyCreateParamIfNeed(request *AppendObjectRequest) { + if f.created || f.createParam == nil { + return + } + + if len(f.createParam.Acl) > 0 { + request.Acl = f.createParam.Acl + } + if len(f.createParam.StorageClass) > 0 { + request.StorageClass = f.createParam.StorageClass + } + request.CacheControl = f.createParam.CacheControl + request.ContentDisposition = f.createParam.ContentDisposition + request.ContentEncoding = f.createParam.ContentEncoding + request.Expires = f.createParam.Expires + request.ContentType = f.createParam.ContentType + request.ServerSideEncryption = f.createParam.ServerSideEncryption + request.ServerSideDataEncryption = f.createParam.ServerSideDataEncryption + request.ServerSideEncryptionKeyId = f.createParam.ServerSideEncryptionKeyId + request.Metadata = f.createParam.Metadata + request.Tagging = f.createParam.Tagging +} + +// wrapErr wraps an error that occurred during an operation on an open file. +// It passes io.EOF through unchanged, otherwise converts +// Wraps the error in a PathError. +func (f *AppendOnlyFile) wrapErr(op string, err error) error { + if err == nil || err == io.EOF { + return err + } + return &os.PathError{Op: op, Path: f.name(), Err: err} +} + +func (f *AppendOnlyFile) checkValid(_ string) error { + if f == nil { + return os.ErrInvalid + } else if f.closed { + return os.ErrClosed + } + return nil +} + +func (f *AppendOnlyFile) name() string { + return fmt.Sprintf("oss://%s/%s", f.bucket, f.key) +} + +// Stat returns the FileInfo structure describing file. +func (f *AppendOnlyFile) Stat() (os.FileInfo, error) { + if err := f.checkValid("stat"); err != nil { + return nil, err + } + + info, err := f.stat() + + if err != nil { + return nil, f.wrapErr("stat", err) + } + + return info, nil +} + +func (f *AppendOnlyFile) stat() (os.FileInfo, error) { + var err error + if f.info == nil || f.info.Size() != f.offset { + f.info = nil + if result, err := f.client.HeadObject(f.context, &HeadObjectRequest{Bucket: &f.bucket, Key: &f.key, RequestPayer: f.requestPayer}); err == nil { + f.info = &fileInfo{ + name: f.name(), + size: result.ContentLength, + modTime: ToTime(result.LastModified), + header: result.Headers, + } + } + } + return f.info, err +} + +// Close closes the File. +func (f *AppendOnlyFile) Close() error { + if f == nil { + return os.ErrInvalid + } + return f.close() +} + +func (f *AppendOnlyFile) close() error { + f.closed = true + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go new file mode 100644 index 000000000..3059bf1cd --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go @@ -0,0 +1,63 @@ +package oss + +import "time" + +// ToBool returns bool value if the pointer is not nil. +// Returns a bool zero value if the pointer is nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToInt returns int value if the pointer is not nil. +// Returns a int zero value if the pointer is nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToInt64 returns int value if the pointer is not nil. +// Returns a int64 zero value if the pointer is nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToString returns bool value if the pointer is not nil. +// Returns a string zero value if the pointer is nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToTime returns time.Time value if the pointer is not nil. +// Returns a time.Time zero value if the pointer is nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToDuration returns time.Duration value if the pointer is not nil. +// Returns a time.Duration zero value if the pointer is nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go new file mode 100644 index 000000000..f67d1cc21 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go @@ -0,0 +1,869 @@ +package oss + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" +) + +type LimitedReadCloser struct { + *io.LimitedReader + io.Closer +} + +func NewLimitedReadCloser(rc io.ReadCloser, limit int64) io.ReadCloser { + if limit < 0 { + return rc + } + return &LimitedReadCloser{ + LimitedReader: &io.LimitedReader{R: rc, N: limit}, + Closer: rc, + } +} + +func ReadSeekNopCloser(r io.Reader) ReadSeekerNopClose { + return ReadSeekerNopClose{r} +} + +type ReadSeekerNopClose struct { + r io.Reader +} + +func (r ReadSeekerNopClose) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +func (r ReadSeekerNopClose) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +func (r ReadSeekerNopClose) Close() error { + return nil +} + +func (r ReadSeekerNopClose) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +func (r ReadSeekerNopClose) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +func (r ReadSeekerNopClose) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, io.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +func isReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReadSeekerNopClose: + return v.IsSeeker() + case *ReadSeekerNopClose: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +func GetReaderLen(r io.Reader) int64 { + type lenner interface { + Len() int + } + + if lr, ok := r.(lenner); ok { + return int64(lr.Len()) + } + + if s, ok := r.(io.Seeker); ok { + if l, err := seekerLen(s); err == nil { + return l + } + } + + return -1 +} + +type buffer struct { + buf []byte + err error + offset int +} + +func (b *buffer) isEmpty() bool { + if b == nil { + return true + } + if len(b.buf)-b.offset <= 0 { + return true + } + return false +} + +func (b *buffer) read(rd io.Reader) error { + var n int + n, b.err = readFill(rd, b.buf) + b.buf = b.buf[0:n] + b.offset = 0 + return b.err +} + +func (b *buffer) buffer() []byte { + return b.buf[b.offset:] +} + +func (b *buffer) increment(n int) { + b.offset += n +} + +const ( + AsyncReadeBufferSize = 1024 * 1024 + softStartInitial = 512 * 1024 +) + +type ReaderRangeGetOutput struct { + Body io.ReadCloser + ContentLength int64 + ContentRange *string + ETag *string + LastModified *time.Time +} + +type ReaderRangeGetFn func(context.Context, HTTPRange) (output *ReaderRangeGetOutput, err error) + +type AsyncRangeReader struct { + in io.ReadCloser // Input reader + ready chan *buffer // Buffers ready to be handed to the reader + token chan struct{} // Tokens which allow a buffer to be taken + exit chan struct{} // Closes when finished + buffers int // Number of buffers + err error // If an error has occurred it is here + cur *buffer // Current buffer being served + exited chan struct{} // Channel is closed been the async reader shuts down + size int // size of buffer to use + closed bool // whether we have closed the underlying stream + mu sync.Mutex // lock for Read/WriteTo/Abandon/Close + + //Range Getter + rangeGet ReaderRangeGetFn + httpRange HTTPRange + + // For reader + offset int64 + gotsize int64 + + oriHttpRange HTTPRange + + context context.Context + cancel context.CancelFunc + + // Origin file pattern + etag string + modTime string +} + +// NewAsyncRangeReader returns a reader that will asynchronously read from +// the Reader returued by getter from the given offset into a number of buffers each of size AsyncReadeBufferSize +// The input can be read from the returned reader. +// When done use Close to release the buffers and close the supplied input. +// The etag is used to identify the content of the object. If not set, the first ETag returned value will be used instead. +func NewAsyncRangeReader(ctx context.Context, + rangeGet ReaderRangeGetFn, httpRange *HTTPRange, etag string, buffers int) (*AsyncRangeReader, error) { + + if buffers <= 0 { + return nil, errors.New("number of buffers too small") + } + if rangeGet == nil { + return nil, errors.New("nil reader supplied") + } + + context, cancel := context.WithCancel(ctx) + + range_ := HTTPRange{} + if httpRange != nil { + range_ = *httpRange + } + + a := &AsyncRangeReader{ + rangeGet: rangeGet, + context: context, + cancel: cancel, + httpRange: range_, + oriHttpRange: range_, + offset: range_.Offset, + gotsize: 0, + etag: etag, + buffers: buffers, + } + + //fmt.Printf("NewAsyncRangeReader, range: %s, etag:%s, buffer count:%v\n", ToString(a.httpRange.FormatHTTPRange()), a.etag, a.buffers) + + a.init(buffers) + return a, nil +} + +func (a *AsyncRangeReader) init(buffers int) { + a.ready = make(chan *buffer, buffers) + a.token = make(chan struct{}, buffers) + a.exit = make(chan struct{}) + a.exited = make(chan struct{}) + a.buffers = buffers + a.cur = nil + a.size = softStartInitial + + // Create tokens + for i := 0; i < buffers; i++ { + a.token <- struct{}{} + } + + // Start async reader + go func() { + // Ensure that when we exit this is signalled. + defer close(a.exited) + defer close(a.ready) + for { + select { + case <-a.token: + b := a.getBuffer() + if a.size < AsyncReadeBufferSize { + b.buf = b.buf[:a.size] + a.size <<= 1 + } + + if a.httpRange.Count > 0 && a.gotsize > a.httpRange.Count { + b.buf = b.buf[0:0] + b.err = io.EOF + //fmt.Printf("a.gotsize > a.httpRange.Count, err:%v\n", b.err) + a.ready <- b + return + } + + if a.in == nil { + httpRangeRemains := a.httpRange + if a.httpRange.Count > 0 { + gotNum := a.httpRange.Offset - a.oriHttpRange.Offset + if gotNum > 0 && a.httpRange.Count > gotNum { + httpRangeRemains.Count = a.httpRange.Count - gotNum + } + } + output, err := a.rangeGet(a.context, httpRangeRemains) + if err == nil { + etag := ToString(output.ETag) + if a.etag == "" { + a.etag = etag + } + if etag != a.etag { + err = fmt.Errorf("Source file is changed, expect etag:%s ,got etag:%s", a.etag, etag) + } + + // Partial Response check + var off int64 + if output.ContentRange == nil { + off = 0 + } else { + off, _, _, _ = ParseContentRange(*output.ContentRange) + } + if off != httpRangeRemains.Offset { + err = fmt.Errorf("Range get fail, expect offset:%v, got offset:%v", httpRangeRemains.Offset, off) + } + } + if err != nil { + b.buf = b.buf[0:0] + b.err = err + if output != nil && output.Body != nil { + output.Body.Close() + } + //fmt.Printf("call getFunc fail, err:%v\n", err) + a.ready <- b + return + } + body := output.Body + if httpRangeRemains.Count > 0 { + body = NewLimitedReadCloser(output.Body, httpRangeRemains.Count) + } + a.in = body + //fmt.Printf("call getFunc done, range:%s\n", ToString(httpRangeRemains.FormatHTTPRange())) + } + + // ignore err from read + err := b.read(a.in) + a.httpRange.Offset += int64(len(b.buf)) + a.gotsize += int64(len(b.buf)) + if err != io.EOF { + b.err = nil + } + //fmt.Printf("read into buffer, size:%v, next begin:%v, err:%v\n", len(b.buf), a.httpRange.Offset, err) + a.ready <- b + if err != nil { + a.in.Close() + a.in = nil + if err == io.EOF { + return + } + } + case <-a.exit: + return + } + } + }() +} + +func (a *AsyncRangeReader) fill() (err error) { + if a.cur.isEmpty() { + if a.cur != nil { + a.putBuffer(a.cur) + a.token <- struct{}{} + a.cur = nil + } + b, ok := <-a.ready + if !ok { + // Return an error to show fill failed + if a.err == nil { + return errors.New("stream abandoned") + } + return a.err + } + a.cur = b + } + return nil +} + +// Read will return the next available data. +func (a *AsyncRangeReader) Read(p []byte) (n int, err error) { + a.mu.Lock() + defer a.mu.Unlock() + defer func() { + a.offset += int64(n) + }() + + // Swap buffer and maybe return error + err = a.fill() + if err != nil { + return 0, err + } + + // Copy what we can + n = copy(p, a.cur.buffer()) + a.cur.increment(n) + + // If at end of buffer, return any error, if present + if a.cur.isEmpty() { + a.err = a.cur.err + return n, a.err + } + return n, nil +} + +func (a *AsyncRangeReader) Offset() int64 { + return a.offset +} + +func (a *AsyncRangeReader) Close() (err error) { + a.abandon() + if a.closed { + return nil + } + a.closed = true + + if a.in != nil { + err = a.in.Close() + } + return +} + +func (a *AsyncRangeReader) abandon() { + a.stop() + a.mu.Lock() + defer a.mu.Unlock() + if a.cur != nil { + a.putBuffer(a.cur) + a.cur = nil + } + for b := range a.ready { + a.putBuffer(b) + } +} + +func (a *AsyncRangeReader) stop() { + select { + case <-a.exit: + return + default: + } + a.cancel() + close(a.exit) + <-a.exited +} + +// bufferPool is a global pool of buffers +var bufferPool *sync.Pool +var bufferPoolOnce sync.Once + +// TODO use pool +func (a *AsyncRangeReader) putBuffer(b *buffer) { + b.buf = b.buf[0:cap(b.buf)] + bufferPool.Put(b.buf) +} + +func (a *AsyncRangeReader) getBuffer() *buffer { + bufferPoolOnce.Do(func() { + // Initialise the buffer pool when used + bufferPool = &sync.Pool{ + New: func() any { + //fmt.Printf("make([]byte, BufferSize)\n") + return make([]byte, AsyncReadeBufferSize) + }, + } + }) + return &buffer{ + buf: bufferPool.Get().([]byte), + } +} + +func readFill(r io.Reader, buf []byte) (n int, err error) { + var nn int + for n < len(buf) && err == nil { + nn, err = r.Read(buf[n:]) + n += nn + } + return n, err +} + +// MultiBytesReader A Reader implements the io.Reader, io.Seeker interfaces by reading from multi byte slice. +type MultiBytesReader struct { + s [][]byte + i int64 // current reading index + size int64 + rbuf int + rp int +} + +// Len returns the number of bytes of the unread portion of the slice. +func (r *MultiBytesReader) Len() int { + if r.i >= r.size { + return 0 + } + return int(r.size - r.i) +} + +// Size returns the original length of the underlying byte slice. +func (r *MultiBytesReader) Size() int64 { return r.size } + +// Read implements the io.Reader interface. +func (r *MultiBytesReader) Read(b []byte) (n int, err error) { + if r.i >= r.size { + return 0, io.EOF + } + + var nn int + for n < len(b) && err == nil { + nn, err = r.read(b[n:]) + n += nn + } + + if err == io.EOF { + err = nil + } + + return n, err +} + +func (r *MultiBytesReader) read(b []byte) (n int, err error) { + if r.i >= r.size { + return 0, io.EOF + } + + //if r.rp == cap(r.s[r.rbuf]) { + if r.rp == len(r.s[r.rbuf]) { + r.rbuf++ + r.rp = 0 + } + + if r.rbuf == len(r.s) { + err = io.EOF + return + } else if r.rbuf > len(r.s) { + return 0, fmt.Errorf("read overflow, rbuf:%d, buf len%d", r.rbuf, len(r.s)) + } + + n = copy(b, r.s[r.rbuf][r.rp:]) + r.rp += n + r.i += int64(n) + + return +} + +// Seek implements the io.Seeker interface. +func (r *MultiBytesReader) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = r.i + offset + case io.SeekEnd: + abs = r.size + offset + default: + return 0, errors.New("MultiSliceReader.Seek: invalid whence") + } + if abs < 0 { + return 0, errors.New("MultiSliceReader.Seek: negative position") + } + r.i = abs + r.updateRp() + return abs, nil +} + +// Reset resets the Reader to be reading from b. +func (r *MultiBytesReader) Reset(b [][]byte) { + n := MultiBytesReader{ + s: b, + i: 0, + } + n.size = int64(r.calcSize(n.s)) + n.updateRp() + *r = n +} + +func (r *MultiBytesReader) calcSize(b [][]byte) int { + size := 0 + for i := 0; i < len(b); i++ { + size += len(r.s[i]) + } + return size +} + +func (r *MultiBytesReader) updateRp() { + remains := r.i + rbuf := 0 + for remains > 0 && rbuf < len(r.s) { + slen := int64(len(r.s[rbuf])) + if remains < slen { + break + } + rbuf++ + remains -= slen + } + r.rbuf = rbuf + r.rp = int(remains) +} + +// NewReader returns a new Reader reading from b. +func NewMultiBytesReader(b [][]byte) *MultiBytesReader { + r := &MultiBytesReader{ + s: b, + i: 0, + } + r.size = int64(r.calcSize(r.s)) + r.updateRp() + return r +} + +type RangeReader struct { + in io.ReadCloser // Input reader + closed bool // whether we have closed the underlying stream + + //Range Getter + rangeGet ReaderRangeGetFn + httpRange HTTPRange + + // For reader + offset int64 + + oriHttpRange HTTPRange + + context context.Context + + // Origin file pattern + etag string + modTime *time.Time + totalSize int64 +} + +// NewRangeReader returns a reader that will read from the Reader returued by getter from the given offset. +// The etag is used to identify the content of the object. If not set, the first ETag returned value will be used instead. +func NewRangeReader(ctx context.Context, rangeGet ReaderRangeGetFn, httpRange *HTTPRange, etag string) (*RangeReader, error) { + if rangeGet == nil { + return nil, errors.New("nil reader supplied") + } + + range_ := HTTPRange{} + if httpRange != nil { + range_ = *httpRange + } + + a := &RangeReader{ + rangeGet: rangeGet, + context: ctx, + httpRange: range_, + oriHttpRange: range_, + offset: range_.Offset, + etag: etag, + } + + //fmt.Printf("NewRangeReader, range: %s, etag:%s\n", ToString(a.httpRange.FormatHTTPRange()), a.etag) + + return a, nil +} + +// Read will return the next available data. +func (r *RangeReader) Read(p []byte) (n int, err error) { + defer func() { + r.offset += int64(n) + r.httpRange.Offset += int64(n) + }() + n, err = r.read(p) + return +} + +func (r *RangeReader) read(p []byte) (int, error) { + if r.closed { + return 0, fmt.Errorf("RangeReader is closed") + } + + // open stream + if r.in == nil { + httpRangeRemains := r.httpRange + if r.httpRange.Count > 0 { + gotNum := r.httpRange.Offset - r.oriHttpRange.Offset + if gotNum > 0 && r.httpRange.Count > gotNum { + httpRangeRemains.Count = r.httpRange.Count - gotNum + } + } + output, err := r.rangeGet(r.context, httpRangeRemains) + if err == nil { + etag := ToString(output.ETag) + if r.etag == "" { + r.etag = etag + r.modTime = output.LastModified + } + if etag != r.etag { + err = fmt.Errorf("Source file is changed, expect etag:%s ,got etag:%s", r.etag, etag) + } + + // Partial Response check + var off int64 + if output.ContentRange == nil { + off = 0 + r.totalSize = output.ContentLength + } else { + off, _, r.totalSize, _ = ParseContentRange(*output.ContentRange) + } + if off != httpRangeRemains.Offset { + err = fmt.Errorf("Range get fail, expect offset:%v, got offset:%v", httpRangeRemains.Offset, off) + } + } + if err != nil { + if output != nil && output.Body != nil { + output.Body.Close() + } + return 0, err + } + body := output.Body + if httpRangeRemains.Count > 0 { + body = NewLimitedReadCloser(output.Body, httpRangeRemains.Count) + } + r.in = body + } + + // read from stream + // ignore error when reading from stream + n, err := r.in.Read(p) + if err != nil && err != io.EOF { + r.in.Close() + r.in = nil + err = nil + } + + return n, err +} + +func (r *RangeReader) Offset() int64 { + return r.offset +} + +func (r *RangeReader) Close() (err error) { + if r.closed { + return nil + } + r.closed = true + + if r.in != nil { + err = r.in.Close() + } + return +} + +// TeeReadNopCloser returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReadNopCloser(reader io.Reader, writers ...io.Writer) io.ReadCloser { + return &teeReadNopCloser{ + reader: reader, + writers: writers, + mark: -1, + } +} + +type teeReadNopCloser struct { + reader io.Reader + writers []io.Writer + mark int64 +} + +func (t *teeReadNopCloser) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if n > 0 { + for _, w := range t.writers { + if nn, err := w.Write(p[:n]); err != nil { + return nn, err + } + } + } + return +} + +func (t *teeReadNopCloser) Seek(offset int64, whence int) (int64, error) { + switch t := t.reader.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +func (t *teeReadNopCloser) Close() error { + return nil +} + +// IsSeekable tests if this reader supports Seek method. +func (t *teeReadNopCloser) IsSeekable() bool { + _, ok := t.reader.(io.Seeker) + return ok +} + +// MarkSupported tests if this reader supports the Mark and Reset methods. +func (t *teeReadNopCloser) MarkSupported() bool { + return t.IsSeekable() +} + +// Mark marks the current position in this reader. A subsequent call to +// the Reset method repositions this reader at the last marked position +// so that subsequent reads re-read the same bytes. +func (t *teeReadNopCloser) Mark() { + if s, ok := t.reader.(io.Seeker); ok { + if pos, err := s.Seek(0, io.SeekCurrent); err == nil { + t.mark = pos + } + } +} + +// Reset repositions this stream to the position at the time +// the Mark method was last called on this reader. +func (t *teeReadNopCloser) Reset() error { + if !t.MarkSupported() { + return fmt.Errorf("Mark/Reset not supported") + } + + if t.mark < 0 { + return fmt.Errorf("Mark is not called yet") + } + + // seek to the last marked position + if s, ok := t.reader.(io.Seeker); ok { + if _, err := s.Seek(t.mark, io.SeekStart); err != nil { + return err + } + } + + // reset writer + type reseter interface { + Reset() + } + + for _, w := range t.writers { + if rw, ok := w.(reseter); ok { + rw.Reset() + } + } + + return nil +} + +type DiscardReadCloser struct { + RC io.ReadCloser + Discard int +} + +func (drc *DiscardReadCloser) Read(b []byte) (int, error) { + n, err := drc.RC.Read(b) + if drc.Discard == 0 || n <= 0 { + return n, err + } + + if n <= drc.Discard { + drc.Discard -= n + return 0, err + } + + realLen := n - drc.Discard + copy(b[0:realLen], b[drc.Discard:n]) + drc.Discard = 0 + return realLen, err +} + +func (drc *DiscardReadCloser) Close() error { + closer, ok := drc.RC.(io.ReadCloser) + if ok { + return closer.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go new file mode 100644 index 000000000..49a8f99d1 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go @@ -0,0 +1,44 @@ +package oss + +import ( + "context" + "time" + + "golang.org/x/time/rate" +) + +const ( + BwTokenBucketSlotRx int = iota + BwTokenBucketSlotTx + BwTokenBucketSlots +) + +type BwTokenBucket struct { + // Byte/S + Bandwidth int64 + Limiter *rate.Limiter +} + +type BwTokenBuckets [BwTokenBucketSlots]*BwTokenBucket + +func newBwTokenBucket(bandwidth int64) *BwTokenBucket { + return &BwTokenBucket{ + Bandwidth: bandwidth, + Limiter: newEmptyTokenBucket(bandwidth), + } +} + +func newEmptyTokenBucket(bandwidth int64) *rate.Limiter { + const defaultMaxBurstSize = 4 * 1024 * 1024 + maxBurstSize := (bandwidth * defaultMaxBurstSize) / (256 * 1024 * 1024) + if maxBurstSize < defaultMaxBurstSize { + maxBurstSize = defaultMaxBurstSize + } + tb := rate.NewLimiter(rate.Limit(bandwidth), int(maxBurstSize)) + tb.AllowN(time.Now(), int(maxBurstSize)) + return tb +} + +func (tb *BwTokenBucket) LimitBandwidth(n int) { + tb.Limiter.WaitN(context.Background(), n) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go new file mode 100644 index 000000000..e9a7f458c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go @@ -0,0 +1,130 @@ +package oss + +import ( + "fmt" + "log" + "strings" +) + +// A LogPrinter is a interface for the SDK to log messages to. +type LogPrinter interface { + Print(...any) +} + +// A LogPrinterFunc is a convenience type to wrap it so the LogPrinter interface can be used. +type LogPrinterFunc func(...any) + +// Print calls the wrapped function with the arguments provided +func (f LogPrinterFunc) Print(v ...any) { + f(v...) +} + +// Define the level of the output log +const ( + LogOff = iota + LogError + LogWarn + LogInfo + LogDebug +) + +var logLevelTag = []string{"", "ERROR ", "WARNING ", "INFO ", "DEBUG "} + +// Logger interface to handle logging +type Logger interface { + Debugf(format string, v ...any) + Infof(format string, v ...any) + Warnf(format string, v ...any) + Errorf(format string, v ...any) + Level() int +} + +type nopLogger struct { +} + +func (*nopLogger) Debugf(_ string, _ ...any) {} +func (*nopLogger) Infof(_ string, _ ...any) {} +func (*nopLogger) Warnf(_ string, _ ...any) {} +func (*nopLogger) Errorf(_ string, _ ...any) {} +func (*nopLogger) Level() int { return LogOff } + +// NewLogger returns a Logger +func NewLogger(level int, printer LogPrinter) Logger { + if level <= LogOff { + return &nopLogger{} + } + + if printer == nil { + printer = LogPrinterFunc(func(v ...any) { + log.Print(v...) + }) + } + + return &standardLogger{ + level: level, + printer: printer, + } +} + +type standardLogger struct { + level int + printer LogPrinter +} + +func (l *standardLogger) printf(level int, format string, v ...any) { + if l.printer == nil { + return + } + l.printer.Print(logLevelTag[level], fmt.Sprintf(format, v...)) +} + +func (l *standardLogger) Debugf(format string, v ...any) { + if l.level < LogDebug { + return + } + l.printf(LogDebug, format, v...) +} + +func (l *standardLogger) Infof(format string, v ...any) { + if l.level < LogInfo { + return + } + l.printf(LogInfo, format, v...) +} + +func (l *standardLogger) Warnf(format string, v ...any) { + if l.level < LogWarn { + return + } + l.printf(LogWarn, format, v...) +} + +func (l *standardLogger) Errorf(format string, v ...any) { + if l.level < LogError { + return + } + l.printf(LogError, format, v...) +} + +func (l *standardLogger) Level() int { + return l.level +} + +func ToLogLevel(s string) int { + s = strings.ToLower(s) + switch s { + case "error", "err": + return LogError + case "warning", "warn": + return LogWarn + case "info": + return LogInfo + case "debug", "dbg": + return LogDebug + default: + return LogOff + } +} + +var _ Logger = (*nopLogger)(nil) +var _ Logger = (*standardLogger)(nil) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go new file mode 100644 index 000000000..e96b54407 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go @@ -0,0 +1,41 @@ +package oss + +import "io" + +type ProgressFunc func(increment, transferred, total int64) + +type progressTracker struct { + pr ProgressFunc + written int64 + lwritten int64 // last written + total int64 +} + +// NewProgress NewRequestProgress creates a tracker with progress reporting +func NewProgress(pr ProgressFunc, total int64) io.Writer { + return &progressTracker{ + pr: pr, + written: 0, + lwritten: 0, + total: total, + } +} + +func (p *progressTracker) Write(b []byte) (n int, err error) { + n = len(b) + p.written += int64(n) + + // Invokes the user's callback method to report progress + if p.written > p.lwritten { + p.pr(int64(n), p.written, p.total) + } + + return +} + +func (p *progressTracker) Reset() { + p.lwritten = p.written + p.written = 0 +} + +var _ RequestBodyTracker = (*progressTracker)(nil) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go new file mode 100644 index 000000000..550577e57 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go @@ -0,0 +1,79 @@ +package retry + +import ( + "math" + "math/rand" + "time" +) + +type EqualJitterBackoff struct { + baseDelay time.Duration + maxBackoff time.Duration + attemptCelling int +} + +func NewEqualJJitterBackoff(baseDelay time.Duration, maxBackoff time.Duration) *EqualJitterBackoff { + return &EqualJitterBackoff{ + baseDelay: baseDelay, + maxBackoff: maxBackoff, + attemptCelling: int(math.Log2(float64(math.MaxInt64 / baseDelay))), + } +} + +func (j *EqualJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + // ceil = min(2 ^ attempts * baseDealy, maxBackoff) + // ceil/2 + [0.0, 1.0) *(ceil/2 + 1) + if attempt > j.attemptCelling { + attempt = j.attemptCelling + } + delayDuration := j.baseDelay * (1 << attempt) + if delayDuration > j.maxBackoff { + delayDuration = j.maxBackoff + } + half := delayDuration.Seconds() / 2 + return floatSecondsDuration(half + rand.Float64()*float64(half+1)), nil +} + +type FullJitterBackoff struct { + baseDelay time.Duration + maxBackoff time.Duration + attemptCelling int +} + +func NewFullJitterBackoff(baseDelay time.Duration, maxBackoff time.Duration) *FullJitterBackoff { + return &FullJitterBackoff{ + baseDelay: baseDelay, + maxBackoff: maxBackoff, + attemptCelling: int(math.Log2(float64(math.MaxInt64 / baseDelay))), + } +} + +func (j *FullJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + // [0.0, 1.0) * min(2 ^ attempts * baseDealy, maxBackoff) + if attempt > j.attemptCelling { + attempt = j.attemptCelling + } + delayDuration := j.baseDelay * (1 << attempt) + if delayDuration > j.maxBackoff { + delayDuration = j.maxBackoff + } + return floatSecondsDuration(rand.Float64() * float64(delayDuration.Seconds())), nil +} + +type FixedDelayBackoff struct { + fixedBackoff time.Duration +} + +func NewFixedDelayBackoff(fixedBackoff time.Duration) *FixedDelayBackoff { + return &FixedDelayBackoff{ + fixedBackoff: fixedBackoff, + } +} + +func (j *FixedDelayBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + return j.fixedBackoff, nil +} + +func floatSecondsDuration(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go new file mode 100644 index 000000000..fff7b91fc --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go @@ -0,0 +1,103 @@ +package retry + +import ( + "errors" + "io" + "net" + "net/url" + "strings" +) + +type HTTPStatusCodeRetryable struct { +} + +var retryErrorCodes = []int{ + 401, // Unauthorized + 408, // Request Timeout + 429, // Rate exceeded. +} + +func (*HTTPStatusCodeRetryable) IsErrorRetryable(err error) bool { + var v interface{ HttpStatusCode() int } + if errors.As(err, &v) { + code := v.HttpStatusCode() + if code >= 500 { + return true + } + for _, e := range retryErrorCodes { + if code == e { + return true + } + } + } + return false +} + +type ServiceErrorCodeRetryable struct { +} + +var retryServiceErrorCodes = map[string]struct{}{ + "RequestTimeTooSkewed": {}, + "BadRequest": {}, +} + +func (*ServiceErrorCodeRetryable) IsErrorRetryable(err error) bool { + var v interface{ ErrorCode() string } + if errors.As(err, &v) { + if _, ok := retryServiceErrorCodes[v.ErrorCode()]; ok { + return true + } + } + return false +} + +type ConnectionErrorRetryable struct{} + +var retriableErrorStrings = []string{ + "connection reset", + "connection refused", + "use of closed network connection", + "unexpected EOF reading trailer", + "transport connection broken", + "server closed idle connection", + "bad record MAC", + "stream error:", + "tls: use of closed connection", + "connection was forcibly closed", + "broken pipe", + "crc is inconsistent", // oss crc check error pattern +} + +var retriableErrors = []error{ + io.EOF, + io.ErrUnexpectedEOF, +} + +func (c *ConnectionErrorRetryable) IsErrorRetryable(err error) bool { + if err != nil { + switch t := err.(type) { + case *url.Error: + if t.Err != nil { + return c.IsErrorRetryable(t.Err) + } + case net.Error: + if t.Temporary() || t.Timeout() { + return true + } + } + + for _, retriableErr := range retriableErrors { + if err == retriableErr { + return true + } + } + + errString := err.Error() + for _, phrase := range retriableErrorStrings { + if strings.Contains(errString, phrase) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go new file mode 100644 index 000000000..51634ec36 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go @@ -0,0 +1,22 @@ +package retry + +import ( + "fmt" + "time" +) + +type Retryer interface { + IsErrorRetryable(error) bool + MaxAttempts() int + RetryDelay(attempt int, opErr error) (time.Duration, error) +} + +type NopRetryer struct{} + +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +func (NopRetryer) MaxAttempts() int { return 1 } + +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go new file mode 100644 index 000000000..fa8e38b77 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go @@ -0,0 +1,71 @@ +package retry + +import ( + "time" +) + +const ( + DefaultMaxAttempts = 3 + DefaultMaxBackoff = 20 * time.Second + DefaultBaseDelay = 200 * time.Millisecond +) + +var DefaultErrorRetryables = []ErrorRetryable{ + &HTTPStatusCodeRetryable{}, + &ServiceErrorCodeRetryable{}, + &ConnectionErrorRetryable{}, +} + +type Standard struct { + maxAttempts int + retryables []ErrorRetryable + backoff BackoffDelayer +} + +func NewStandard(fnOpts ...func(*RetryOptions)) *Standard { + o := RetryOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + BaseDelay: DefaultBaseDelay, + ErrorRetryables: DefaultErrorRetryables, + } + + for _, fn := range fnOpts { + fn(&o) + } + + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + if o.BaseDelay <= 0 { + o.BaseDelay = DefaultBaseDelay + } + + if o.Backoff == nil { + o.Backoff = NewFullJitterBackoff(o.BaseDelay, o.MaxBackoff) + } + + return &Standard{ + maxAttempts: o.MaxAttempts, + retryables: o.ErrorRetryables, + backoff: o.Backoff, + } +} + +func (s *Standard) MaxAttempts() int { + return s.maxAttempts +} + +func (s *Standard) IsErrorRetryable(err error) bool { + for _, re := range s.retryables { + if v := re.IsErrorRetryable(err); v { + return v + } + } + return false +} + +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go new file mode 100644 index 000000000..3fea8091d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go @@ -0,0 +1,19 @@ +package retry + +import "time" + +type RetryOptions struct { + MaxAttempts int + MaxBackoff time.Duration + BaseDelay time.Duration + Backoff BackoffDelayer + ErrorRetryables []ErrorRetryable +} + +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +type ErrorRetryable interface { + IsErrorRetryable(error) bool +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go new file mode 100644 index 000000000..684fe1428 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go @@ -0,0 +1,51 @@ +package signer + +import ( + "context" + "net/http" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" +) + +const ( + SubResource = "SubResource" + SignTime = "SignTime" +) + +type SigningContext struct { + //input + Product *string + Region *string + Bucket *string + Key *string + Request *http.Request + + SubResource []string + AdditionalHeaders []string + + Credentials *credentials.Credentials + + AuthMethodQuery bool + + // input and output + Time time.Time + ClockOffset time.Duration + + // output + SignedHeaders map[string]string + StringToSign string + + // for test + signTime *time.Time +} + +type Signer interface { + Sign(ctx context.Context, signingCtx *SigningContext) error +} + +type NopSigner struct{} + +func (*NopSigner) Sign(ctx context.Context, signingCtx *SigningContext) error { + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go new file mode 100644 index 000000000..ea3a1dd3a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go @@ -0,0 +1,264 @@ +package signer + +import ( + "context" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "hash" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +var requiredSignedParametersMap = map[string]struct{}{ + "acl": {}, + "bucketInfo": {}, + "location": {}, + "stat": {}, + "delete": {}, + "append": {}, + "tagging": {}, + "objectMeta": {}, + "uploads": {}, + "uploadId": {}, + "partNumber": {}, + "security-token": {}, + "position": {}, + "response-content-type": {}, + "response-content-language": {}, + "response-expires": {}, + "response-cache-control": {}, + "response-content-disposition": {}, + "response-content-encoding": {}, + "restore": {}, + "callback": {}, + "callback-var": {}, + "versions": {}, + "versioning": {}, + "versionId": {}, + "sequential": {}, + "continuation-token": {}, + "regionList": {}, + "cloudboxes": {}, + "symlink": {}, +} + +const ( + // headers + authorizationHeader = "Authorization" + securityTokenHeader = "x-oss-security-token" + + dateHeader = "Date" + contentTypeHeader = "Content-Type" + contentMd5Header = "Content-MD5" + ossHeaderPreifx = "x-oss-" + ossDateHeader = "x-oss-date" + + //Query + securityTokenQuery = "security-token" + expiresQuery = "Expires" + accessKeyIdQuery = "OSSAccessKeyId" + signatureQuery = "Signature" + + defaultExpiresDuration = 15 * time.Minute +) + +type SignerV1 struct { +} + +func isSubResource(list []string, key string) bool { + for _, k := range list { + if key == k { + return true + } + } + return false +} + +func (*SignerV1) calcStringToSign(date string, signingCtx *SigningContext) string { + /* + SignToString = + VERB + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Date + "\n" + + CanonicalizedOSSHeaders + + CanonicalizedResource + Signature = base64(hmac-sha1(AccessKeySecret, SignToString)) + */ + request := signingCtx.Request + + contentMd5 := request.Header.Get(contentMd5Header) + contentType := request.Header.Get(contentTypeHeader) + + //CanonicalizedOSSHeaders + var headers []string + for k := range request.Header { + lowerCaseKey := strings.ToLower(k) + if strings.HasPrefix(lowerCaseKey, ossHeaderPreifx) { + headers = append(headers, lowerCaseKey) + } + } + sort.Strings(headers) + headerItems := make([]string, len(headers)) + for i, k := range headers { + headerValues := make([]string, len(request.Header.Values(k))) + for i, v := range request.Header.Values(k) { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + strings.Join(headerValues, ",") + "\n" + } + canonicalizedOSSHeaders := strings.Join(headerItems, "") + + //CanonicalizedResource + query := request.URL.Query() + var params []string + for k := range query { + if _, ok := requiredSignedParametersMap[k]; ok { + params = append(params, k) + } else if strings.HasPrefix(k, ossHeaderPreifx) { + params = append(params, k) + } else if isSubResource(signingCtx.SubResource, k) { + params = append(params, k) + } + } + sort.Strings(params) + paramItems := make([]string, len(params)) + for i, k := range params { + v := query.Get(k) + if len(v) > 0 { + paramItems[i] = k + "=" + v + } else { + paramItems[i] = k + } + } + subResource := strings.Join(paramItems, "&") + canonicalizedResource := "/" + if signingCtx.Bucket != nil { + canonicalizedResource += *signingCtx.Bucket + "/" + } + if signingCtx.Key != nil { + canonicalizedResource += *signingCtx.Key + } + if subResource != "" { + canonicalizedResource += "?" + subResource + } + + // string to Sign + stringToSign := + request.Method + "\n" + + contentMd5 + "\n" + + contentType + "\n" + + date + "\n" + + canonicalizedOSSHeaders + + canonicalizedResource + + //fmt.Printf("stringToSign:%s\n", stringToSign) + return stringToSign +} + +func (s *SignerV1) authHeader(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + if signingCtx.Time.IsZero() { + signingCtx.Time = time.Now().Add(signingCtx.ClockOffset) + } + datetime := signingCtx.Time.UTC().Format(http.TimeFormat) + request.Header.Set(dateHeader, datetime) + + // Credentials information + if cred.SecurityToken != "" { + request.Header.Set(securityTokenHeader, cred.SecurityToken) + } + + // StringToSign + stringToSign := s.calcStringToSign(datetime, signingCtx) + signingCtx.StringToSign = stringToSign + + // Signature + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(cred.AccessKeySecret)) + if _, err := io.WriteString(h, stringToSign); err != nil { + return err + } + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + // Authorization header + request.Header.Set(authorizationHeader, fmt.Sprintf("OSS %s:%s", cred.AccessKeyID, signature)) + + return nil +} + +func (s *SignerV1) authQuery(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + if signingCtx.Time.IsZero() { + signingCtx.Time = time.Now().UTC().Add(defaultExpiresDuration) + } + datetime := fmt.Sprintf("%v", signingCtx.Time.UTC().Unix()) + + // Credentials information + query, _ := url.ParseQuery(request.URL.RawQuery) + if cred.SecurityToken != "" { + query.Add(securityTokenQuery, cred.SecurityToken) + request.URL.RawQuery = query.Encode() + } + + // StringToSign + stringToSign := s.calcStringToSign(datetime, signingCtx) + signingCtx.StringToSign = stringToSign + + // Signature + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(cred.AccessKeySecret)) + if _, err := io.WriteString(h, stringToSign); err != nil { + return err + } + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + // Authorization query + query.Add(expiresQuery, datetime) + query.Add(accessKeyIdQuery, cred.AccessKeyID) + query.Add(signatureQuery, signature) + request.URL.RawQuery = strings.Replace(query.Encode(), "+", "%20", -1) + + return nil +} + +func (s *SignerV1) Sign(ctx context.Context, signingCtx *SigningContext) error { + if signingCtx == nil { + return fmt.Errorf("SigningContext is null.") + } + + if signingCtx.Credentials == nil || !signingCtx.Credentials.HasKeys() { + return fmt.Errorf("SigningContext.Credentials is null or empty.") + } + + if signingCtx.Request == nil { + return fmt.Errorf("SigningContext.Request is null.") + } + + if signingCtx.AuthMethodQuery { + return s.authQuery(ctx, signingCtx) + } + + return s.authHeader(ctx, signingCtx) +} + +func (*SignerV1) IsSignedHeader(additionalHeaders []string, h string) bool { + lowerCaseKey := strings.ToLower(h) + if strings.HasPrefix(lowerCaseKey, ossHeaderPreifx) || + lowerCaseKey == "date" || + lowerCaseKey == "content-type" || + lowerCaseKey == "content-md5" { + return true + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go new file mode 100644 index 000000000..a0d46f947 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go @@ -0,0 +1,390 @@ +package signer + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +const ( + // headers + contentSha256Header = "x-oss-content-sha256" + iso8601DatetimeFormat = "20060102T150405Z" + iso8601DateFormat = "20060102" + algorithmV4 = "OSS4-HMAC-SHA256" + + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +func toString(p *string) (v string) { + if p == nil { + return v + } + return *p +} + +func escapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func isDefaultSignedHeader(low string) bool { + if strings.HasPrefix(low, ossHeaderPreifx) || + low == "content-type" || + low == "content-md5" { + return true + } + return false +} + +func getCommonAdditionalHeaders(header http.Header, additionalHeaders []string) []string { + var keys []string + for _, k := range additionalHeaders { + lowK := strings.ToLower(k) + if isDefaultSignedHeader(lowK) { + //default signed header, skip + continue + } else if header.Get(lowK) != "" { + keys = append(keys, lowK) + } + } + sort.Strings(keys) + return keys +} + +type SignerV4 struct { +} + +func (s *SignerV4) calcStringToSign(datetime, scope, canonicalRequest string) string { + /** + StringToSign + "OSS4-HMAC-SHA256" + "\n" + + TimeStamp + "\n" + + Scope + "\n" + + Hex(SHA256Hash(Canonical Request)) + */ + hash256 := sha256.New() + hash256.Write([]byte(canonicalRequest)) + hashValue := hash256.Sum(nil) + canonicalHash := hex.EncodeToString(hashValue) + + return "OSS4-HMAC-SHA256" + "\n" + + datetime + "\n" + + scope + "\n" + + canonicalHash +} + +func (s *SignerV4) calcCanonicalRequest(signingCtx *SigningContext, additionalHeaders []string) string { + request := signingCtx.Request + /* + Canonical Request + HTTP Verb + "\n" + + Canonical URI + "\n" + + Canonical Query String + "\n" + + Canonical Headers + "\n" + + Additional Headers + "\n" + + Hashed PayLoad + */ + + //Canonical Uri + uri := "/" + if signingCtx.Bucket != nil { + uri += *signingCtx.Bucket + "/" + } + if signingCtx.Key != nil { + uri += *signingCtx.Key + } + canonicalUri := escapePath(uri, false) + + //Canonical Query + query := strings.Replace(request.URL.RawQuery, "+", "%20", -1) + values := make(map[string]string) + var params []string + for query != "" { + var key string + key, query, _ = strings.Cut(query, "&") + if key == "" { + continue + } + key, value, _ := strings.Cut(key, "=") + values[key] = value + params = append(params, key) + } + sort.Strings(params) + var buf strings.Builder + for _, k := range params { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if len(values[k]) > 0 { + buf.WriteByte('=') + buf.WriteString(values[k]) + } + } + canonicalQuery := buf.String() + + //Canonical Headers + var headers []string + buf.Reset() + addHeadersMap := make(map[string]bool) + for _, k := range additionalHeaders { + addHeadersMap[strings.ToLower(k)] = true + } + for k := range request.Header { + lowk := strings.ToLower(k) + if isDefaultSignedHeader(lowk) { + headers = append(headers, lowk) + } else if _, ok := addHeadersMap[lowk]; ok { + headers = append(headers, lowk) + } + } + sort.Strings(headers) + for _, k := range headers { + headerValues := make([]string, len(request.Header.Values(k))) + for i, v := range request.Header.Values(k) { + headerValues[i] = strings.TrimSpace(v) + } + buf.WriteString(k) + buf.WriteString(":") + buf.WriteString(strings.Join(headerValues, ",")) + buf.WriteString("\n") + } + canonicalHeaders := buf.String() + + //Additional Headers + canonicalAdditionalHeaders := strings.Join(additionalHeaders, ";") + + hashPayload := unsignedPayload + if val := request.Header.Get(contentSha256Header); val != "" { + hashPayload = val + } + + buf.Reset() + buf.WriteString(request.Method) + buf.WriteString("\n") + buf.WriteString(canonicalUri) + buf.WriteString("\n") + buf.WriteString(canonicalQuery) + buf.WriteString("\n") + buf.WriteString(canonicalHeaders) + buf.WriteString("\n") + buf.WriteString(canonicalAdditionalHeaders) + buf.WriteString("\n") + buf.WriteString(hashPayload) + + return buf.String() +} + +func buildScope(date, region, product string) string { + return fmt.Sprintf("%s/%s/%s/aliyun_v4_request", date, region, product) +} + +func (s *SignerV4) calcSignature(sk, date, region, product, stringToSign string) string { + hmacHash := func() hash.Hash { return sha256.New() } + + signingKey := "aliyun_v4" + sk + + h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte(signingKey)) + io.WriteString(h1, date) + h1Key := h1.Sum(nil) + + h2 := hmac.New(hmacHash, h1Key) + io.WriteString(h2, region) + h2Key := h2.Sum(nil) + + h3 := hmac.New(hmacHash, h2Key) + io.WriteString(h3, product) + h3Key := h3.Sum(nil) + + h4 := hmac.New(hmacHash, h3Key) + io.WriteString(h4, "aliyun_v4_request") + h4Key := h4.Sum(nil) + + h := hmac.New(hmacHash, h4Key) + io.WriteString(h, stringToSign) + signature := hex.EncodeToString(h.Sum(nil)) + + return signature +} + +func (s *SignerV4) authHeader(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + if signingCtx.Time.IsZero() { + signingCtx.Time = time.Now().Add(signingCtx.ClockOffset) + } + utcTime := signingCtx.Time.UTC() + datetime := utcTime.Format(iso8601DatetimeFormat) + date := utcTime.Format(iso8601DateFormat) + request.Header.Set(ossDateHeader, datetime) + request.Header.Set(dateHeader, utcTime.Format(http.TimeFormat)) + + // Credentials information + if cred.SecurityToken != "" { + request.Header.Set(securityTokenHeader, cred.SecurityToken) + } + + // Other Headers + request.Header.Set(contentSha256Header, unsignedPayload) + + // Scope + region := toString(signingCtx.Region) + product := toString(signingCtx.Product) + scope := buildScope(date, region, product) + + additionalHeaders := getCommonAdditionalHeaders(request.Header, signingCtx.AdditionalHeaders) + + // CanonicalRequest + canonicalRequest := s.calcCanonicalRequest(signingCtx, additionalHeaders) + + // StringToSign + stringToSign := s.calcStringToSign(datetime, scope, canonicalRequest) + signingCtx.StringToSign = stringToSign + + // Signature + signature := s.calcSignature(cred.AccessKeySecret, date, region, product, stringToSign) + + // credential + var buf strings.Builder + buf.WriteString("OSS4-HMAC-SHA256 Credential=") + buf.WriteString(cred.AccessKeyID + "/" + scope) + if len(additionalHeaders) > 0 { + buf.WriteString(",AdditionalHeaders=") + buf.WriteString(strings.Join(additionalHeaders, ";")) + } + buf.WriteString(",Signature=") + buf.WriteString(signature) + + request.Header.Set(authorizationHeader, buf.String()) + + //fmt.Printf("canonicalRequest:\n%s\n", canonicalRequest) + + //fmt.Printf("stringToSign:\n%s\n", stringToSign) + + return nil +} + +func (s *SignerV4) authQuery(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + now := time.Now().UTC() + if signingCtx.Time.IsZero() { + signingCtx.Time = now.Add(defaultExpiresDuration) + } + if signingCtx.signTime != nil { + now = signingCtx.signTime.UTC() + } + datetime := now.Format(iso8601DatetimeFormat) + date := now.Format(iso8601DateFormat) + expires := signingCtx.Time.Unix() - now.Unix() + + // Scope + region := toString(signingCtx.Region) + product := toString(signingCtx.Product) + scope := buildScope(date, region, product) + + additionalHeaders := getCommonAdditionalHeaders(request.Header, signingCtx.AdditionalHeaders) + + // Credentials information + query, _ := url.ParseQuery(request.URL.RawQuery) + if cred.SecurityToken != "" { + query.Add("x-oss-security-token", cred.SecurityToken) + } + query.Add("x-oss-signature-version", algorithmV4) + query.Add("x-oss-date", datetime) + query.Add("x-oss-expires", fmt.Sprintf("%v", expires)) + query.Add("x-oss-credential", fmt.Sprintf("%s/%s", cred.AccessKeyID, scope)) + if len(additionalHeaders) > 0 { + query.Add("x-oss-additional-headers", strings.Join(additionalHeaders, ";")) + } + request.URL.RawQuery = query.Encode() + + // CanonicalRequest + canonicalRequest := s.calcCanonicalRequest(signingCtx, additionalHeaders) + + // StringToSign + stringToSign := s.calcStringToSign(datetime, scope, canonicalRequest) + signingCtx.StringToSign = stringToSign + + //fmt.Printf("canonicalRequest:\n%s\n", canonicalRequest) + + //fmt.Printf("stringToSign:\n%s\n", stringToSign) + + // Signature + signature := s.calcSignature(cred.AccessKeySecret, date, region, product, stringToSign) + + // Authorization query + query.Add("x-oss-signature", signature) + request.URL.RawQuery = strings.Replace(query.Encode(), "+", "%20", -1) + + return nil +} + +func (s *SignerV4) Sign(ctx context.Context, signingCtx *SigningContext) error { + if signingCtx == nil { + return fmt.Errorf("SigningContext is null.") + } + + if signingCtx.Credentials == nil || !signingCtx.Credentials.HasKeys() { + return fmt.Errorf("SigningContext.Credentials is null or empty.") + } + + if signingCtx.Request == nil { + return fmt.Errorf("SigningContext.Request is null.") + } + if signingCtx.AuthMethodQuery { + return s.authQuery(ctx, signingCtx) + } + return s.authHeader(ctx, signingCtx) +} + +func (s *SignerV4) IsSignedHeader(additionalHeaders []string, h string) bool { + return isDefaultSignedHeader(strings.ToLower(h)) || ContainsStr(additionalHeaders, h) +} + +// ContainsStr Used to check if the string is in the slice +func ContainsStr(slice []string, str string) bool { + for _, item := range slice { + if strings.ToLower(str) == strings.ToLower(item) { + return true + } + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go new file mode 100644 index 000000000..9827c96f6 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go @@ -0,0 +1,15 @@ +package oss + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go new file mode 100644 index 000000000..0d24e99ed --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go @@ -0,0 +1,88 @@ +package transport + +import ( + "context" + "net" + "time" +) + +// Dialer +type Dialer struct { + net.Dialer + // Read/Write timeout + timeout time.Duration + postRead []func(n int, err error) + postWrite []func(n int, err error) +} + +func newDialer(cfg *Config) *Dialer { + dialer := &Dialer{ + Dialer: net.Dialer{ + Timeout: *cfg.ConnectTimeout, + KeepAlive: *cfg.KeepAliveTimeout, + }, + timeout: *cfg.ReadWriteTimeout, + postRead: cfg.PostRead, + postWrite: cfg.PostWrite, + } + return dialer +} + +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + c, err := d.Dialer.DialContext(ctx, network, address) + if err != nil { + return c, err + } + + timeout := d.timeout + if u, ok := ctx.Value("OpReadWriteTimeout").(*time.Duration); ok { + timeout = *u + } + + t := &timeoutConn{ + Conn: c, + timeout: timeout, + dialer: d, + } + return t, t.nudgeDeadline() +} + +// A net.Conn with Read/Write timeout and rate limiting, +type timeoutConn struct { + net.Conn + timeout time.Duration + dialer *Dialer +} + +func (c *timeoutConn) nudgeDeadline() error { + if c.timeout > 0 { + return c.SetDeadline(time.Now().Add(c.timeout)) + } + return nil +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + n, err = c.Conn.Read(b) + for _, fn := range c.dialer.postRead { + fn(n, err) + } + if err == nil && n > 0 && c.timeout > 0 { + err = c.nudgeDeadline() + } + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + n, err = c.Conn.Write(b) + for _, fn := range c.dialer.postWrite { + fn(n, err) + } + if err == nil && n > 0 && c.timeout > 0 { + err = c.nudgeDeadline() + } + return n, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go new file mode 100644 index 000000000..3be398345 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go @@ -0,0 +1,177 @@ +package transport + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" +) + +// Defaults for the Transport +var ( + DefaultConnectTimeout = 5 * time.Second + DefaultReadWriteTimeout = 10 * time.Second + DefaultIdleConnectionTimeout = 50 * time.Second + DefaultExpectContinueTimeout = 1 * time.Second + DefaultKeepAliveTimeout = 30 * time.Second + + DefaultMaxConnections = 100 + + // Default to TLS 1.2 for all HTTPS requests. + DefaultTLSMinVersion uint16 = tls.VersionTLS12 +) + +var DefaultConfig = Config{ + ConnectTimeout: &DefaultConnectTimeout, + ReadWriteTimeout: &DefaultReadWriteTimeout, + IdleConnectionTimeout: &DefaultIdleConnectionTimeout, + KeepAliveTimeout: &DefaultKeepAliveTimeout, +} + +type Config struct { + ConnectTimeout *time.Duration + ReadWriteTimeout *time.Duration + IdleConnectionTimeout *time.Duration + KeepAliveTimeout *time.Duration + EnabledRedirect *bool + + PostRead []func(n int, err error) + PostWrite []func(n int, err error) +} + +func newTransportCustom(cfg *Config, fns ...func(*http.Transport)) http.RoundTripper { + tr := &http.Transport{ + DialContext: newDialer(cfg).DialContext, + TLSHandshakeTimeout: *cfg.ConnectTimeout, + IdleConnTimeout: *cfg.IdleConnectionTimeout, + MaxConnsPerHost: DefaultMaxConnections, + ExpectContinueTimeout: DefaultExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultTLSMinVersion, + }, + } + + for _, fn := range fns { + fn(tr) + } + + return tr +} + +func (c *Config) mergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func (c *Config) copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.mergeIn(c) + + for _, cfg := range cfgs { + dst.mergeIn(cfg) + } + + return dst +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.ConnectTimeout != nil { + dst.ConnectTimeout = other.ConnectTimeout + } + + if other.ReadWriteTimeout != nil { + dst.ReadWriteTimeout = other.ReadWriteTimeout + } + + if other.IdleConnectionTimeout != nil { + dst.IdleConnectionTimeout = other.IdleConnectionTimeout + } + + if other.KeepAliveTimeout != nil { + dst.KeepAliveTimeout = other.KeepAliveTimeout + } + + if other.EnabledRedirect != nil { + dst.EnabledRedirect = other.EnabledRedirect + } + + if other.PostRead != nil { + dst.PostRead = make([]func(n int, err error), len(other.PostRead)) + copy(dst.PostRead, other.PostRead) + } + + if other.PostWrite != nil { + dst.PostWrite = make([]func(n int, err error), len(other.PostWrite)) + copy(dst.PostWrite, other.PostWrite) + } +} + +func InsecureSkipVerify(enabled bool) func(*http.Transport) { + return func(t *http.Transport) { + if t.TLSClientConfig != nil { + t.TLSClientConfig.InsecureSkipVerify = enabled + } else { + t.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: enabled, + } + } + } +} + +func MaxConnections(value int) func(*http.Transport) { + return func(t *http.Transport) { + t.MaxConnsPerHost = value + } +} + +func ExpectContinueTimeout(value time.Duration) func(*http.Transport) { + return func(t *http.Transport) { + t.ExpectContinueTimeout = value + } +} + +func TLSMinVersion(value int) func(*http.Transport) { + return func(t *http.Transport) { + if t.TLSClientConfig != nil { + t.TLSClientConfig.MinVersion = uint16(value) + } else { + t.TLSClientConfig = &tls.Config{ + MinVersion: uint16(value), + } + } + } +} + +func HttpProxy(fixedURL *url.URL) func(*http.Transport) { + return func(t *http.Transport) { + t.Proxy = http.ProxyURL(fixedURL) + } +} + +func ProxyFromEnvironment() func(*http.Transport) { + return func(t *http.Transport) { + t.Proxy = http.ProxyFromEnvironment + } +} + +func NewHttpClient(cfg *Config, fns ...func(*http.Transport)) *http.Client { + cfg = DefaultConfig.copy(cfg) + client := &http.Client{ + Transport: newTransportCustom(cfg, fns...), + //Disalbe Redirect + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + if cfg.EnabledRedirect != nil && *cfg.EnabledRedirect { + client.CheckRedirect = nil + } + + return client +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go new file mode 100644 index 000000000..a0c737f73 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go @@ -0,0 +1,162 @@ +package oss + +import ( + "context" + "io" + "net/http" +) + +type OperationMetadata struct { + values map[any][]any +} + +func (m OperationMetadata) Get(key any) any { + if m.values == nil { + return nil + } + v := m.values[key] + if len(v) == 0 { + return nil + } + return v[0] +} + +func (m OperationMetadata) Values(key any) []any { + if m.values == nil { + return nil + } + return m.values[key] +} + +func (m *OperationMetadata) Add(key, value any) { + if m.values == nil { + m.values = map[any][]any{} + } + m.values[key] = append(m.values[key], value) +} + +func (m *OperationMetadata) Set(key, value any) { + if m.values == nil { + m.values = map[any][]any{} + } + m.values[key] = []any{value} +} + +func (m OperationMetadata) Has(key any) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} + +func (m OperationMetadata) Clone() OperationMetadata { + vs := make(map[any][]any, len(m.values)) + for k, v := range m.values { + vv := make([]any, len(v)) + copy(vv, v) + vs[k] = vv + } + return OperationMetadata{ + values: vs, + } +} + +type RequestCommon struct { + Headers map[string]string + Parameters map[string]string + Payload io.Reader +} + +type RequestCommonInterface interface { + GetCommonFileds() (map[string]string, map[string]string, io.Reader) +} + +func (r *RequestCommon) GetCommonFileds() (map[string]string, map[string]string, io.Reader) { + return r.Headers, r.Parameters, r.Payload +} + +type ResultCommon struct { + Status string + StatusCode int + Headers http.Header + OpMetadata OperationMetadata +} + +type ResultCommonInterface interface { + CopyIn(status string, statusCode int, headers http.Header, meta OperationMetadata) +} + +func (r *ResultCommon) CopyIn(status string, statusCode int, headers http.Header, meta OperationMetadata) { + r.Status = status + r.StatusCode = statusCode + r.Headers = headers + r.OpMetadata = meta +} + +type OperationInput struct { + OpName string + Method string + Headers map[string]string + Parameters map[string]string + Body io.Reader + + Bucket *string + Key *string + + OpMetadata OperationMetadata +} + +type OperationOutput struct { + Input *OperationInput + + Status string + StatusCode int + Headers http.Header + Body io.ReadCloser + + OpMetadata OperationMetadata + + httpRequest *http.Request +} + +type RequestBodyTracker interface { + io.Writer + Reset() +} + +type DownloadAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) +} + +type UploadAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + PutObject(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) + InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) + UploadPart(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) + CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) + AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) + ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) +} + +type OpenFileAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) +} + +type AppendFileAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + AppendObject(ctx context.Context, request *AppendObjectRequest, optFns ...func(*Options)) (*AppendObjectResult, error) +} + +type CopyAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + CopyObject(ctx context.Context, request *CopyObjectRequest, optFns ...func(*Options)) (*CopyObjectResult, error) + InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) + UploadPartCopy(ctx context.Context, request *UploadPartCopyRequest, optFns ...func(*Options)) (*UploadPartCopyResult, error) + CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) + AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) + ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) + GetObjectTagging(ctx context.Context, request *GetObjectTaggingRequest, optFns ...func(*Options)) (*GetObjectTaggingResult, error) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go new file mode 100644 index 000000000..28db8dd72 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go @@ -0,0 +1,768 @@ +package oss + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "sort" + "strconv" + "sync" + "sync/atomic" +) + +type UploaderOptions struct { + PartSize int64 + + ParallelNum int + + LeavePartsOnError bool + + EnableCheckpoint bool + + CheckpointDir string + + ClientOptions []func(*Options) +} + +type Uploader struct { + options UploaderOptions + client UploadAPIClient + featureFlags FeatureFlagsType + isEncryptionClient bool +} + +// NewUploader creates a new Uploader instance to upload objects. +// Pass In additional functional options to customize the uploader's behavior. +func NewUploader(c UploadAPIClient, optFns ...func(*UploaderOptions)) *Uploader { + options := UploaderOptions{ + PartSize: DefaultUploadPartSize, + ParallelNum: DefaultUploadParallel, + LeavePartsOnError: false, + } + + for _, fn := range optFns { + fn(&options) + } + + u := &Uploader{ + client: c, + options: options, + isEncryptionClient: false, + } + + //Get Client Feature + switch t := c.(type) { + case *Client: + u.featureFlags = t.options.FeatureFlags + case *EncryptionClient: + u.featureFlags = t.Unwrap().options.FeatureFlags + u.isEncryptionClient = true + } + + return u +} + +type UploadResult struct { + UploadId *string + + ETag *string + + VersionId *string + + HashCRC64 *string + + ResultCommon +} + +type UploadError struct { + Err error + UploadId string + Path string +} + +func (m *UploadError) Error() string { + var extra string + if m.Err != nil { + extra = fmt.Sprintf(", cause: %s", m.Err.Error()) + } + return fmt.Sprintf("upload failed, upload id: %s%s", m.UploadId, extra) +} + +func (m *UploadError) Unwrap() error { + return m.Err +} + +func (u *Uploader) UploadFrom(ctx context.Context, request *PutObjectRequest, body io.Reader, optFns ...func(*UploaderOptions)) (*UploadResult, error) { + // Uploader wrapper + delegate, err := u.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + delegate.body = body + if err = delegate.applySource(); err != nil { + return nil, err + } + + return delegate.upload() +} + +func (u *Uploader) UploadFile(ctx context.Context, request *PutObjectRequest, filePath string, optFns ...func(*UploaderOptions)) (*UploadResult, error) { + // Uploader wrapper + delegate, err := u.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + // Source + if err = delegate.checkSource(filePath); err != nil { + return nil, err + } + + var file *os.File + if file, err = delegate.openReader(); err != nil { + return nil, err + } + delegate.body = file + + if err = delegate.applySource(); err != nil { + return nil, err + } + + if err = delegate.checkCheckpoint(); err != nil { + return nil, err + } + + if err = delegate.adjustSource(); err != nil { + return nil, err + } + + result, err := delegate.upload() + + return result, delegate.closeReader(file, err) +} + +type uploaderDelegate struct { + base *Uploader + options UploaderOptions + client UploadAPIClient + context context.Context + request *PutObjectRequest + + body io.Reader + readerPos int64 + totalSize int64 + hashCRC64 uint64 + transferred int64 + + // Source's Info, from file or reader + filePath string + fileInfo os.FileInfo + + // for resumable upload + uploadId string + partNumber int32 + cseContext *EncryptionMultiPartContext + uploadedParts []Part + + partPool byteSlicePool + + checkpoint *uploadCheckpoint +} + +type uploadIdInfo struct { + uploadId string + startNum int32 + cseContext *EncryptionMultiPartContext +} + +func (u *Uploader) newDelegate(ctx context.Context, request *PutObjectRequest, optFns ...func(*UploaderOptions)) (*uploaderDelegate, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + if request.Bucket == nil { + return nil, NewErrParamNull("request.Bucket") + } + + if request.Key == nil { + return nil, NewErrParamNull("request.Key") + } + + d := uploaderDelegate{ + base: u, + options: u.options, + client: u.client, + context: ctx, + request: request, + } + + for _, opt := range optFns { + opt(&d.options) + } + + if d.options.ParallelNum <= 0 { + d.options.ParallelNum = DefaultUploadParallel + } + if d.options.PartSize <= 0 { + d.options.PartSize = DefaultUploadPartSize + } + + if _, ok := d.request.Parameters["sequential"]; ok { + d.options.ParallelNum = 1 + } + + return &d, nil +} + +func (u *uploaderDelegate) checkSource(filePath string) error { + if filePath == "" { + return NewErrParamRequired("filePath") + } + + // if !FileExists(filePath) { + // return fmt.Errorf("File not exists, %v", filePath) + // } + + info, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("File not exists, %v", filePath) + } + return err + } + + u.filePath = filePath + u.fileInfo = info + + return nil +} + +func (u *uploaderDelegate) applySource() error { + if u.body == nil { + return NewErrParamNull("the body is null") + } + + totalSize := GetReaderLen(u.body) + + //Part Size + partSize := u.options.PartSize + if totalSize > 0 { + for totalSize/partSize >= int64(MaxUploadParts) { + partSize += u.options.PartSize + } + } + + u.totalSize = totalSize + u.options.PartSize = partSize + + return nil +} + +func (u *uploaderDelegate) adjustSource() error { + // resume from upload id + if u.uploadId != "" { + // if the body supports seek + r, ok := u.body.(io.Seeker) + // not support + if !ok { + u.uploadId = "" + return nil + } + + // if upload id is valid + paginator := NewListPartsPaginator(u.client, &ListPartsRequest{ + Bucket: u.request.Bucket, + Key: u.request.Key, + UploadId: Ptr(u.uploadId), + }) + + // find consecutive sequence from min part number + var ( + checkPartNumber int32 = 1 + updateCRC64 bool = ((u.base.featureFlags & FeatureEnableCRC64CheckUpload) > 0) + hashCRC64 uint64 = 0 + page *ListPartsResult + err error + uploadedParts []Part + ) + outerLoop: + + for paginator.HasNext() { + page, err = paginator.NextPage(u.context, u.options.ClientOptions...) + if err != nil { + u.uploadId = "" + return nil + } + for _, p := range page.Parts { + if p.PartNumber != checkPartNumber || + p.Size != u.options.PartSize { + break outerLoop + } + checkPartNumber++ + uploadedParts = append(uploadedParts, p) + if updateCRC64 && p.HashCRC64 != nil { + value, _ := strconv.ParseUint(ToString(p.HashCRC64), 10, 64) + hashCRC64 = CRC64Combine(hashCRC64, value, uint64(p.Size)) + } + } + } + + partNumber := checkPartNumber - 1 + newOffset := int64(partNumber) * u.options.PartSize + if _, err := r.Seek(newOffset, io.SeekStart); err != nil { + u.uploadId = "" + return nil + } + + cseContext, err := u.resumeCSEContext(page) + if err != nil { + u.uploadId = "" + return nil + } + + u.partNumber = partNumber + u.readerPos = newOffset + u.hashCRC64 = hashCRC64 + u.cseContext = cseContext + u.uploadedParts = uploadedParts + } + return nil +} + +func (d *uploaderDelegate) checkCheckpoint() error { + if d.options.EnableCheckpoint { + d.checkpoint = newUploadCheckpoint(d.request, d.filePath, d.options.CheckpointDir, d.fileInfo, d.options.PartSize) + if err := d.checkpoint.load(); err != nil { + return err + } + + if d.checkpoint.Loaded { + d.uploadId = d.checkpoint.Info.Data.UploadInfo.UploadId + } + d.options.LeavePartsOnError = true + } + return nil +} + +func (d *uploaderDelegate) openReader() (*os.File, error) { + file, err := os.Open(d.filePath) + if err != nil { + return nil, err + } + + d.body = file + return file, nil +} + +func (d *uploaderDelegate) closeReader(file *os.File, err error) error { + if file != nil { + file.Close() + } + + if d.checkpoint != nil && err == nil { + d.checkpoint.remove() + } + + d.body = nil + d.checkpoint = nil + + return err +} + +func (d *uploaderDelegate) resumeCSEContext(result *ListPartsResult) (*EncryptionMultiPartContext, error) { + if !d.base.isEncryptionClient { + return nil, nil + } + sc, ok := d.client.(*EncryptionClient) + if !ok { + return nil, fmt.Errorf("Not EncryptionClient") + } + + envelope, err := getEnvelopeFromListParts(result) + if err != nil { + return nil, err + } + + cc, err := sc.defualtCCBuilder.ContentCipherEnv(envelope) + if err != nil { + return nil, err + } + + cseContext := &EncryptionMultiPartContext{ + ContentCipher: cc, + PartSize: ToInt64(result.ClientEncryptionPartSize), + DataSize: ToInt64(result.ClientEncryptionDataSize), + } + + if !cseContext.Valid() { + return nil, fmt.Errorf("EncryptionMultiPartContext is invalid") + } + + return cseContext, nil +} + +func (u *uploaderDelegate) upload() (*UploadResult, error) { + if u.totalSize >= 0 && u.totalSize < u.options.PartSize { + return u.singlePart() + } + return u.multiPart() +} + +func (u *uploaderDelegate) singlePart() (*UploadResult, error) { + request := &PutObjectRequest{} + copyRequest(request, u.request) + request.Body = u.body + if request.ContentType == nil { + request.ContentType = u.getContentType() + } + + result, err := u.client.PutObject(u.context, request, u.options.ClientOptions...) + + if err != nil { + return nil, u.wrapErr("", err) + } + + return &UploadResult{ + ETag: result.ETag, + VersionId: result.VersionId, + HashCRC64: result.HashCRC64, + ResultCommon: result.ResultCommon, + }, nil +} + +func (u *uploaderDelegate) nextReader() (io.ReadSeeker, int, func(), error) { + type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker + } + switch r := u.body.(type) { + case readerAtSeeker: + var err error + + n := u.options.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + if bytesLeft <= u.options.PartSize { + err = io.EOF + n = bytesLeft + } + } + + reader := io.NewSectionReader(r, u.readerPos, n) + cleanup := func() {} + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + if u.partPool == nil { + u.partPool = newByteSlicePool(u.options.PartSize) + u.partPool.ModifyCapacity(u.options.ParallelNum + 1) + } + + part, err := u.partPool.Get(u.context) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFill(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +type uploaderChunk struct { + partNum int32 + size int + body io.ReadSeeker + cleanup func() +} + +type uploadPartCRC struct { + partNumber int32 + size int + hashCRC64 *string +} + +type uploadPartCRCs []uploadPartCRC + +func (slice uploadPartCRCs) Len() int { + return len(slice) +} +func (slice uploadPartCRCs) Less(i, j int) bool { + return slice[i].partNumber < slice[j].partNumber +} +func (slice uploadPartCRCs) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +type saveErr struct { + Err error +} + +func (e saveErr) Error() string { + return fmt.Sprintf("saveErr: %v", e.Err) +} + +func (e saveErr) Unwrap() error { return e.Err } + +func (u *uploaderDelegate) multiPart() (*UploadResult, error) { + release := func() { + if u.partPool != nil { + u.partPool.Close() + } + } + defer release() + + var ( + wg sync.WaitGroup + mu sync.Mutex + parts UploadParts + errValue atomic.Value + crcParts uploadPartCRCs + enableCRC = (u.base.featureFlags & FeatureEnableCRC64CheckUpload) > 0 + ) + + // Init the multipart + uploadIdInfo, err := u.getUploadId() + if err != nil { + return nil, u.wrapErr("", err) + } + //fmt.Printf("getUploadId result: %v, %#v\n", uploadId, err) + uploadId := uploadIdInfo.uploadId + startPartNum := uploadIdInfo.startNum + + // Update Checkpoint + if u.checkpoint != nil { + u.checkpoint.Info.Data.UploadInfo.UploadId = uploadId + u.checkpoint.dump() + } + + saveErrFn := func(e error) { + if e == nil { + return + } + errValue.Store(saveErr{Err: e}) + } + + getErrFn := func() error { + v := errValue.Load() + if v == nil { + return nil + } + e, _ := v.(saveErr) + return e.Unwrap() + } + + // readChunk runs in worker goroutines to pull chunks off of the ch channel + readChunkFn := func(ch chan uploaderChunk) { + defer wg.Done() + for { + data, ok := <-ch + if !ok { + break + } + + if getErrFn() == nil { + upResult, err := u.client.UploadPart( + u.context, + &UploadPartRequest{ + Bucket: u.request.Bucket, + Key: u.request.Key, + UploadId: Ptr(uploadId), + PartNumber: data.partNum, + Body: data.body, + CSEMultiPartContext: uploadIdInfo.cseContext, + RequestPayer: u.request.RequestPayer, + }, + u.options.ClientOptions...) + //fmt.Printf("UploadPart result: %#v, %#v\n", upResult, err) + + if err == nil { + mu.Lock() + parts = append(parts, UploadPart{ETag: upResult.ETag, PartNumber: data.partNum}) + if enableCRC { + crcParts = append(crcParts, + uploadPartCRC{partNumber: data.partNum, hashCRC64: upResult.HashCRC64, size: data.size}) + } + if u.request.ProgressFn != nil { + u.transferred += int64(data.size) + u.request.ProgressFn(int64(data.size), u.transferred, u.totalSize) + } + mu.Unlock() + } else { + saveErrFn(err) + } + } + data.cleanup() + } + } + + ch := make(chan uploaderChunk, u.options.ParallelNum) + for i := 0; i < u.options.ParallelNum; i++ { + wg.Add(1) + go readChunkFn(ch) + } + + // Read and queue the parts + var ( + qnum int32 = startPartNum + qerr error = nil + ) + + // consume uploaded parts + if u.readerPos > 0 { + for _, p := range u.uploadedParts { + parts = append(parts, UploadPart{PartNumber: p.PartNumber, ETag: p.ETag}) + } + if u.request.ProgressFn != nil { + u.transferred = u.readerPos + u.request.ProgressFn(u.readerPos, u.transferred, u.totalSize) + } + } + + for getErrFn() == nil && qerr == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + cleanup func() + ) + + reader, nextChunkLen, cleanup, qerr = u.nextReader() + // check err + if qerr != nil && qerr != io.EOF { + cleanup() + saveErrFn(qerr) + break + } + + // No need to upload empty part + if nextChunkLen == 0 { + cleanup() + break + } + + qnum++ + //fmt.Printf("send chunk: %d\n", qnum) + ch <- uploaderChunk{body: reader, partNum: qnum, cleanup: cleanup, size: nextChunkLen} + } + + // Close the channel, wait for workers + close(ch) + wg.Wait() + + // Complete upload + var cmResult *CompleteMultipartUploadResult + if err = getErrFn(); err == nil { + sort.Sort(parts) + cmRequest := &CompleteMultipartUploadRequest{} + copyRequest(cmRequest, u.request) + cmRequest.UploadId = Ptr(uploadId) + cmRequest.CompleteMultipartUpload = &CompleteMultipartUpload{Parts: parts} + cmResult, err = u.client.CompleteMultipartUpload(u.context, cmRequest, u.options.ClientOptions...) + } + //fmt.Printf("CompleteMultipartUpload cmResult: %#v, %#v\n", cmResult, err) + + if err != nil { + //Abort + if !u.options.LeavePartsOnError { + abortRequest := &AbortMultipartUploadRequest{} + copyRequest(abortRequest, u.request) + abortRequest.UploadId = Ptr(uploadId) + _, _ = u.client.AbortMultipartUpload(u.context, abortRequest, u.options.ClientOptions...) + } + return nil, u.wrapErr(uploadId, err) + } + + if enableCRC { + caclCRC := fmt.Sprint(u.combineCRC(crcParts)) + if err = checkResponseHeaderCRC64(caclCRC, cmResult.Headers); err != nil { + return nil, u.wrapErr(uploadId, err) + } + } + + return &UploadResult{ + UploadId: Ptr(uploadId), + ETag: cmResult.ETag, + VersionId: cmResult.VersionId, + HashCRC64: cmResult.HashCRC64, + ResultCommon: cmResult.ResultCommon, + }, nil +} + +func (u *uploaderDelegate) getUploadId() (info uploadIdInfo, err error) { + if u.uploadId != "" { + return uploadIdInfo{ + uploadId: u.uploadId, + startNum: u.partNumber, + cseContext: u.cseContext, + }, nil + } + + // if not exist or fail, create a new upload id + request := &InitiateMultipartUploadRequest{} + copyRequest(request, u.request) + if request.ContentType == nil { + request.ContentType = u.getContentType() + } + + if u.base.isEncryptionClient { + request.CSEPartSize = &u.options.PartSize + request.CSEDataSize = &u.totalSize + } + + result, err := u.client.InitiateMultipartUpload(u.context, request, u.options.ClientOptions...) + if err != nil { + return info, err + } + + return uploadIdInfo{ + uploadId: *result.UploadId, + startNum: 0, + cseContext: result.CSEMultiPartContext, + }, nil +} + +func (u *uploaderDelegate) getContentType() *string { + if u.filePath != "" { + if contentType := TypeByExtension(u.filePath); contentType != "" { + return Ptr(contentType) + } + } + return nil +} + +func (u *uploaderDelegate) wrapErr(uploadId string, err error) error { + return &UploadError{ + UploadId: uploadId, + Path: fmt.Sprintf("oss://%s/%s", *u.request.Bucket, *u.request.Key), + Err: err} +} + +func (u *uploaderDelegate) combineCRC(crcs uploadPartCRCs) uint64 { + if len(crcs) == 0 { + return 0 + } + sort.Sort(crcs) + crc := u.hashCRC64 + for _, c := range crcs { + if c.hashCRC64 == nil { + return 0 + } + if value, err := strconv.ParseUint(*c.hashCRC64, 10, 64); err == nil { + crc = CRC64Combine(crc, value, uint64(c.size)) + } else { + break + } + } + return crc +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go new file mode 100644 index 000000000..98c11840a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go @@ -0,0 +1,405 @@ +package oss + +import ( + "bytes" + "context" + "encoding" + "errors" + "fmt" + "io" + "net/http" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "time" +) + +func init() { + for i := 0; i < len(noEscape); i++ { + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } + + defaultUserAgent = fmt.Sprintf("%s/%s (%s/%s/%s;%s)", SdkName, Version(), runtime.GOOS, + "-", runtime.GOARCH, runtime.Version()) +} + +var defaultUserAgent string +var noEscape [256]bool + +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// getNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func getNowSec() int64 { + return time.Now().Unix() +} + +// getNowGMT gets the current time in GMT format. +func getNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +func escapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + } + return false +} + +func setTimeReflectValue(dst reflect.Value, value time.Time) (err error) { + dst0 := dst + if dst.Kind() == reflect.Pointer { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + if dst.CanAddr() { + pv := dst.Addr() + if pv.CanInterface() { + if val, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return val.UnmarshalText([]byte(value.Format(time.RFC3339))) + } + } + } + return errors.New("cannot unmarshal into " + dst0.Type().String()) +} + +func setReflectValue(dst reflect.Value, data string) (err error) { + dst0 := dst + src := []byte(data) + + if dst.Kind() == reflect.Pointer { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + switch dst.Kind() { + case reflect.Invalid: + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if len(src) == 0 { + dst.SetInt(0) + return nil + } + itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if len(src) == 0 { + dst.SetUint(0) + return nil + } + utmp, err := strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Bool: + if len(src) == 0 { + dst.SetBool(false) + return nil + } + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + } + return nil +} + +func setMapStringReflectValue(dst reflect.Value, key any, data any) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Pointer { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + switch dst.Kind() { + case reflect.Invalid: + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Map: + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + mapValue := reflect.ValueOf(data) + mapKey := reflect.ValueOf(key) + dst.SetMapIndex(mapKey, mapValue) + } + return nil +} + +func isContextError(ctx context.Context, perr *error) bool { + if ctxErr := ctx.Err(); ctxErr != nil { + if *perr == nil { + *perr = ctxErr + } + return true + } + return false +} + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, io.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +func ParseOffsetAndSizeFromHeaders(headers http.Header) (offset, size int64) { + return parseOffsetAndSizeFromHeaders(headers) +} + +func parseOffsetAndSizeFromHeaders(headers http.Header) (offset, size int64) { + size = -1 + var contentLength = headers.Get("Content-Length") + if len(contentLength) != 0 { + var err error + if size, err = strconv.ParseInt(contentLength, 10, 64); err != nil { + return 0, -1 + } + } + + var contentRange = headers.Get("Content-Range") + if len(contentRange) == 0 { + return 0, size + } + + if !strings.HasPrefix(contentRange, "bytes ") { + return 0, -1 + } + + // start offset + dash := strings.IndexRune(contentRange, '-') + if dash < 0 { + return 0, -1 + } + ret, err := strconv.ParseInt(contentRange[6:dash], 10, 64) + if err != nil { + return 0, -1 + } + offset = ret + + // total size + slash := strings.IndexRune(contentRange, '/') + if slash < 0 { + return 0, -1 + } + tsize := contentRange[slash+1:] + if tsize != "*" { + ret, err = strconv.ParseInt(contentRange[slash+1:], 10, 64) + if err != nil { + return 0, -1 + } + size = ret + } + + return offset, size +} + +func minInt64(a, b int64) int64 { + if a < b { + return a + } else { + return b + } +} + +func maxInt64(a, b int64) int64 { + if a > b { + return a + } else { + return b + } +} + +func minInt(a, b int) int { + if a < b { + return a + } else { + return b + } +} + +func maxInt(a, b int) int { + if a > b { + return a + } else { + return b + } +} + +// ParseRange parses a ContentRange from a ContentRange: header. +// It only accepts bytes 22-33/42 and bytes 22-33/* format. +func ParseContentRange(s string) (from int64, to int64, total int64, err error) { + if !strings.HasPrefix(s, "bytes ") { + return from, to, total, errors.New("invalid content range") + } + + slash := strings.IndexRune(s, '/') + if slash < 0 { + return from, to, total, errors.New("invalid content range") + } + + dash := strings.IndexRune(s, '-') + if dash < 0 { + return from, to, total, errors.New("invalid content range") + } + + if slash < dash { + return from, to, total, errors.New("invalid content range") + } + + // from + ret, err := strconv.ParseInt(s[6:dash], 10, 64) + if err != nil { + return from, to, total, errors.New("invalid content range") + } + from = ret + + // to + ret, err = strconv.ParseInt(s[dash+1:slash], 10, 64) + if err != nil { + return from, to, total, errors.New("invalid content range") + } + to = ret + + // total + last := s[slash+1:] + if last == "*" { + total = -1 + } else { + ret, err = strconv.ParseInt(s[slash+1:], 10, 64) + if err != nil { + return from, to, total, errors.New("invalid content range") + } + total = ret + } + + return from, to, total, nil +} + +// ParseRange parses a HTTPRange from a Range: header. +// It only accepts single ranges. +func ParseRange(s string) (r *HTTPRange, err error) { + const preamble = "bytes=" + if !strings.HasPrefix(s, preamble) { + return nil, errors.New("range: header invalid: doesn't start with " + preamble) + } + s = s[len(preamble):] + if strings.ContainsRune(s, ',') { + return nil, errors.New("range: header invalid: contains multiple ranges which isn't supported") + } + dash := strings.IndexRune(s, '-') + if dash < 0 { + return nil, errors.New("range: header invalid: contains no '-'") + } + start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:]) + o := HTTPRange{Offset: 0, Count: 0} + if start != "" { + o.Offset, err = strconv.ParseInt(start, 10, 64) + if err != nil || o.Offset < 0 { + return nil, errors.New("range: header invalid: bad start") + } + } + if end != "" { + e, err := strconv.ParseInt(end, 10, 64) + if err != nil || e < 0 { + return nil, errors.New("range: header invalid: bad end") + } + o.Count = e - o.Offset + 1 + } + return &o, nil +} + +// FileExists returns whether the given file exists or not +func FileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return (info != nil && !info.IsDir()) +} + +// DirExists returns whether the given directory exists or not +func DirExists(dir string) bool { + info, err := os.Stat(dir) + if os.IsNotExist(err) { + return false + } + return (info != nil && info.IsDir()) +} + +// EmptyFile changes the size of the named file to zero. +func EmptyFile(filename string) bool { + err := os.Truncate(filename, 0) + return err == nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go new file mode 100644 index 000000000..c65feaf85 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go @@ -0,0 +1,95 @@ +package oss + +import ( + "io" + "reflect" + "time" +) + +func copyRequest(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +func copyOfRequest(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + if dst.Kind() == reflect.String { + dst.SetString(e.String()) + } else { + dst.Set(reflect.New(e)) + } + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + dst.Set(tempValue) + } + } + if dst.Kind() != reflect.String && src.Elem().IsValid() { + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go new file mode 100644 index 000000000..68aee396d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go @@ -0,0 +1,140 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// hashCRC64 represents the partial evaluation of a checksum. +type hashCRC64 struct { + init uint64 + crc uint64 + tab *crc64.Table +} + +// NewCRC64 NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC64(init uint64) hash.Hash64 { + return &hashCRC64{ + init: init, + crc: init, + tab: crc64.MakeTable(crc64.ECMA), + } +} + +// Size returns the number of bytes sum will return. +func (d *hashCRC64) Size() int { + return crc64.Size +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *hashCRC64) BlockSize() int { + return 1 +} + +// Reset resets the hash to its initial state. +func (d *hashCRC64) Reset() { + d.crc = d.init +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *hashCRC64) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *hashCRC64) Sum64() uint64 { + return d.crc +} + +// Sum returns hash value. +func (d *hashCRC64) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} + +var _ RequestBodyTracker = (*hashCRC64)(nil) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go new file mode 100644 index 000000000..450bc01d8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go @@ -0,0 +1,595 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", + ".webp": "image/webp", + ".323": "text/h323", + ".aab": "application/x-authoware-bin", + ".aam": "application/x-authoware-map", + ".aas": "application/x-authoware-seg", + ".acx": "application/internet-property-stream", + ".als": "audio/X-Alpha5", + ".amc": "application/x-mpeg", + ".ani": "application/octet-stream", + ".asd": "application/astound", + ".asf": "video/x-ms-asf", + ".asn": "application/astound", + ".asp": "application/x-asap", + ".asr": "video/x-ms-asf", + ".asx": "video/x-ms-asf", + ".avb": "application/octet-stream", + ".awb": "audio/amr-wb", + ".axs": "application/olescript", + ".bas": "text/plain", + ".bin ": "application/octet-stream", + ".bld": "application/bld", + ".bld2": "application/bld2", + ".bpk": "application/octet-stream", + ".c": "text/plain", + ".cal": "image/x-cals", + ".cat": "application/vnd.ms-pkiseccat", + ".ccn": "application/x-cnc", + ".cco": "application/x-cocoa", + ".cer": "application/x-x509-ca-cert", + ".cgi": "magnus-internal/cgi", + ".chat": "application/x-chat", + ".clp": "application/x-msclip", + ".cmx": "image/x-cmx", + ".co": "application/x-cult3d-object", + ".cod": "image/cis-cod", + ".conf": "text/plain", + ".cpp": "text/plain", + ".crd": "application/x-mscardfile", + ".crl": "application/pkix-crl", + ".crt": "application/x-x509-ca-cert", + ".csm": "chemical/x-csml", + ".csml": "chemical/x-csml", + ".cur": "application/octet-stream", + ".dcm": "x-lml/x-evm", + ".dcx": "image/x-dcx", + ".der": "application/x-x509-ca-cert", + ".dhtml": "text/html", + ".dot": "application/msword", + ".dwf": "drawing/x-dwf", + ".dwg": "application/x-autocad", + ".dxf": "application/x-autocad", + ".ebk": "application/x-expandedbook", + ".emb": "chemical/x-embl-dl-nucleotide", + ".embl": "chemical/x-embl-dl-nucleotide", + ".epub": "application/epub+zip", + ".eri": "image/x-eri", + ".es": "audio/echospeech", + ".esl": "audio/echospeech", + ".etc": "application/x-earthtime", + ".evm": "x-lml/x-evm", + ".evy": "application/envoy", + ".fh4": "image/x-freehand", + ".fh5": "image/x-freehand", + ".fhc": "image/x-freehand", + ".fif": "application/fractals", + ".flr": "x-world/x-vrml", + ".fm": "application/x-maker", + ".fpx": "image/x-fpx", + ".fvi": "video/isivideo", + ".gau": "chemical/x-gaussian-input", + ".gca": "application/x-gca-compressed", + ".gdb": "x-lml/x-gdb", + ".gps": "application/x-gps", + ".h": "text/plain", + ".hdm": "text/x-hdml", + ".hdml": "text/x-hdml", + ".hlp": "application/winhlp", + ".hta": "application/hta", + ".htc": "text/x-component", + ".hts": "text/html", + ".htt": "text/webviewhtml", + ".ifm": "image/gif", + ".ifs": "image/ifs", + ".iii": "application/x-iphone", + ".imy": "audio/melody", + ".ins": "application/x-internet-signup", + ".ips": "application/x-ipscript", + ".ipx": "application/x-ipix", + ".isp": "application/x-internet-signup", + ".it": "audio/x-mod", + ".itz": "audio/x-mod", + ".ivr": "i-world/i-vrml", + ".j2k": "image/j2k", + ".jam": "application/x-jam", + ".java": "text/plain", + ".jfif": "image/pipeg", + ".jpz": "image/jpeg", + ".jwc": "application/jwc", + ".kjx": "application/x-kjx", + ".lak": "x-lml/x-lak", + ".lcc": "application/fastman", + ".lcl": "application/x-digitalloca", + ".lcr": "application/x-digitalloca", + ".lgh": "application/lgh", + ".lml": "x-lml/x-lml", + ".lmlpack": "x-lml/x-lmlpack", + ".log": "text/plain", + ".lsf": "video/x-la-asf", + ".lsx": "video/x-la-asf", + ".m13": "application/x-msmediaview", + ".m14": "application/x-msmediaview", + ".m15": "audio/x-mod", + ".m3url": "audio/x-mpegurl", + ".m4b": "audio/mp4a-latm", + ".ma1": "audio/ma1", + ".ma2": "audio/ma2", + ".ma3": "audio/ma3", + ".ma5": "audio/ma5", + ".map": "magnus-internal/imagemap", + ".mbd": "application/mbedlet", + ".mct": "application/x-mascot", + ".mdb": "application/x-msaccess", + ".mdz": "audio/x-mod", + ".mel": "text/x-vmel", + ".mht": "message/rfc822", + ".mhtml": "message/rfc822", + ".mi": "application/x-mif", + ".mil": "image/x-cals", + ".mio": "audio/x-mio", + ".mmf": "application/x-skt-lbs", + ".mng": "video/x-mng", + ".mny": "application/x-msmoney", + ".moc": "application/x-mocha", + ".mocha": "application/x-mocha", + ".mod": "audio/x-mod", + ".mof": "application/x-yumekara", + ".mol": "chemical/x-mdl-molfile", + ".mop": "chemical/x-mopac-input", + ".mpa": "video/mpeg", + ".mpc": "application/vnd.mpohun.certificate", + ".mpg4": "video/mp4", + ".mpn": "application/vnd.mophun.application", + ".mpp": "application/vnd.ms-project", + ".mps": "application/x-mapserver", + ".mpv2": "video/mpeg", + ".mrl": "text/x-mrml", + ".mrm": "application/x-mrm", + ".msg": "application/vnd.ms-outlook", + ".mts": "application/metastream", + ".mtx": "application/metastream", + ".mtz": "application/metastream", + ".mvb": "application/x-msmediaview", + ".mzv": "application/metastream", + ".nar": "application/zip", + ".nbmp": "image/nbmp", + ".ndb": "x-lml/x-ndb", + ".ndwn": "application/ndwn", + ".nif": "application/x-nif", + ".nmz": "application/x-scream", + ".nokia-op-logo": "image/vnd.nok-oplogo-color", + ".npx": "application/x-netfpx", + ".nsnd": "audio/nsnd", + ".nva": "application/x-neva1", + ".nws": "message/rfc822", + ".oom": "application/x-AtlasMate-Plugin", + ".p10": "application/pkcs10", + ".p12": "application/x-pkcs12", + ".p7b": "application/x-pkcs7-certificates", + ".p7c": "application/x-pkcs7-mime", + ".p7m": "application/x-pkcs7-mime", + ".p7r": "application/x-pkcs7-certreqresp", + ".p7s": "application/x-pkcs7-signature", + ".pac": "audio/x-pac", + ".pae": "audio/x-epac", + ".pan": "application/x-pan", + ".pcx": "image/x-pcx", + ".pda": "image/x-pda", + ".pfr": "application/font-tdpfr", + ".pfx": "application/x-pkcs12", + ".pko": "application/ynd.ms-pkipko", + ".pm": "application/x-perl", + ".pma": "application/x-perfmon", + ".pmc": "application/x-perfmon", + ".pmd": "application/x-pmd", + ".pml": "application/x-perfmon", + ".pmr": "application/x-perfmon", + ".pmw": "application/x-perfmon", + ".pnz": "image/png", + ".pot,": "application/vnd.ms-powerpoint", + ".pps": "application/vnd.ms-powerpoint", + ".pqf": "application/x-cprplayer", + ".pqi": "application/cprplayer", + ".prc": "application/x-prc", + ".prf": "application/pics-rules", + ".prop": "text/plain", + ".proxy": "application/x-ns-proxy-autoconfig", + ".ptlk": "application/listenup", + ".pub": "application/x-mspublisher", + ".pvx": "video/x-pv-pvx", + ".qcp": "audio/vnd.qcelp", + ".r3t": "text/vnd.rn-realtext3d", + ".rar": "application/octet-stream", + ".rc": "text/plain", + ".rf": "image/vnd.rn-realflash", + ".rlf": "application/x-richlink", + ".rmf": "audio/x-rmf", + ".rmi": "audio/mid", + ".rmm": "audio/x-pn-realaudio", + ".rmvb": "audio/x-pn-realaudio", + ".rnx": "application/vnd.rn-realplayer", + ".rp": "image/vnd.rn-realpix", + ".rt": "text/vnd.rn-realtext", + ".rte": "x-lml/x-gps", + ".rtg": "application/metastream", + ".rv": "video/vnd.rn-realvideo", + ".rwc": "application/x-rogerwilco", + ".s3m": "audio/x-mod", + ".s3z": "audio/x-mod", + ".sca": "application/x-supercard", + ".scd": "application/x-msschedule", + ".sct": "text/scriptlet", + ".sdf": "application/e-score", + ".sea": "application/x-stuffit", + ".setpay": "application/set-payment-initiation", + ".setreg": "application/set-registration-initiation", + ".shtml": "text/html", + ".shtm": "text/html", + ".shw": "application/presentations", + ".si6": "image/si6", + ".si7": "image/vnd.stiwap.sis", + ".si9": "image/vnd.lgtwap.sis", + ".slc": "application/x-salsa", + ".smd": "audio/x-smd", + ".smp": "application/studiom", + ".smz": "audio/x-smd", + ".spc": "application/x-pkcs7-certificates", + ".spr": "application/x-sprite", + ".sprite": "application/x-sprite", + ".sdp": "application/sdp", + ".spt": "application/x-spt", + ".sst": "application/vnd.ms-pkicertstore", + ".stk": "application/hyperstudio", + ".stl": "application/vnd.ms-pkistl", + ".stm": "text/html", + ".svf": "image/vnd", + ".svh": "image/svh", + ".svr": "x-world/x-svr", + ".swfl": "application/x-shockwave-flash", + ".tad": "application/octet-stream", + ".talk": "text/x-speech", + ".taz": "application/x-tar", + ".tbp": "application/x-timbuktu", + ".tbt": "application/x-timbuktu", + ".tgz": "application/x-compressed", + ".thm": "application/vnd.eri.thm", + ".tki": "application/x-tkined", + ".tkined": "application/x-tkined", + ".toc": "application/toc", + ".toy": "image/toy", + ".trk": "x-lml/x-gps", + ".trm": "application/x-msterminal", + ".tsi": "audio/tsplayer", + ".tsp": "application/dsptype", + ".ttf": "application/octet-stream", + ".ttz": "application/t-time", + ".uls": "text/iuls", + ".ult": "audio/x-mod", + ".uu": "application/x-uuencode", + ".uue": "application/x-uuencode", + ".vcf": "text/x-vcard", + ".vdo": "video/vdo", + ".vib": "audio/vib", + ".viv": "video/vivo", + ".vivo": "video/vivo", + ".vmd": "application/vocaltec-media-desc", + ".vmf": "application/vocaltec-media-file", + ".vmi": "application/x-dreamcast-vms-info", + ".vms": "application/x-dreamcast-vms", + ".vox": "audio/voxware", + ".vqe": "audio/x-twinvq-plugin", + ".vqf": "audio/x-twinvq", + ".vql": "audio/x-twinvq", + ".vre": "x-world/x-vream", + ".vrt": "x-world/x-vrt", + ".vrw": "x-world/x-vream", + ".vts": "workbook/formulaone", + ".wcm": "application/vnd.ms-works", + ".wdb": "application/vnd.ms-works", + ".web": "application/vnd.xara", + ".wi": "image/wavelet", + ".wis": "application/x-InstallShield", + ".wks": "application/vnd.ms-works", + ".wmd": "application/x-ms-wmd", + ".wmf": "application/x-msmetafile", + ".wmlscript": "text/vnd.wap.wmlscript", + ".wmz": "application/x-ms-wmz", + ".wpng": "image/x-up-wpng", + ".wps": "application/vnd.ms-works", + ".wpt": "x-lml/x-gps", + ".wri": "application/x-mswrite", + ".wrz": "x-world/x-vrml", + ".ws": "text/vnd.wap.wmlscript", + ".wsc": "application/vnd.wap.wmlscriptc", + ".wv": "video/wavelet", + ".wxl": "application/x-wxl", + ".x-gzip": "application/x-gzip", + ".xaf": "x-world/x-vrml", + ".xar": "application/vnd.xara", + ".xdm": "application/x-xdma", + ".xdma": "application/x-xdma", + ".xdw": "application/vnd.fujixerox.docuworks", + ".xhtm": "application/xhtml+xml", + ".xla": "application/vnd.ms-excel", + ".xlc": "application/vnd.ms-excel", + ".xll": "application/x-excel", + ".xlm": "application/vnd.ms-excel", + ".xlt": "application/vnd.ms-excel", + ".xlw": "application/vnd.ms-excel", + ".xm": "audio/x-mod", + ".xmz": "audio/x-mod", + ".xof": "x-world/x-vrml", + ".xpi": "application/x-xpinstall", + ".xsit": "text/xml", + ".yz1": "application/x-yz1", + ".z": "application/x-compress", + ".zac": "application/x-zaurus-zac", + ".json": "application/json", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + ext := path.Ext(filePath) + typ := mime.TypeByExtension(ext) + if typ == "" { + typ = extToMimeType[strings.ToLower(ext)] + } else { + if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") { + typ = removeCharsetInMimeType(typ) + } + } + return typ +} + +// Remove charset from mime type +func removeCharsetInMimeType(typ string) (str string) { + temArr := strings.Split(typ, ";") + var builder strings.Builder + for i, s := range temArr { + tmpStr := strings.Trim(s, " ") + if strings.Contains(tmpStr, "charset=") { + continue + } + if i == 0 { + builder.WriteString(s) + } else { + builder.WriteString("; " + s) + } + } + return builder.String() +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go new file mode 100644 index 000000000..55ea6bb64 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go @@ -0,0 +1,248 @@ +package oss + +import ( + "context" + "fmt" + "sync" +) + +type byteSlicePool interface { + Get(context.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go new file mode 100644 index 000000000..e91f1c6be --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go @@ -0,0 +1,84 @@ +package oss + +import ( + "net/url" + "strings" +) + +func isValidRegion(region string) bool { + for _, v := range region { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return false + } + } + return region != "" +} + +func isValidEndpoint(endpoint *url.URL) bool { + return (endpoint != nil) +} + +func isValidBucketName(bucketName *string) bool { + if bucketName == nil { + return false + } + + nameLen := len(*bucketName) + if nameLen < 3 || nameLen > 63 { + return false + } + + if (*bucketName)[0] == '-' || (*bucketName)[nameLen-1] == '-' { + return false + } + + for _, v := range *bucketName { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return false + } + } + return true +} + +func isValidObjectName(objectName *string) bool { + if objectName == nil || len(*objectName) == 0 { + return false + } + return true +} + +func isValidRange(r *string) bool { + if _, err := ParseRange(*r); err != nil { + return false + } + return true +} + +var supportedMethod = map[string]struct{}{ + "GET": {}, + "PUT": {}, + "HEAD": {}, + "POST": {}, + "DELETE": {}, + "OPTIONS": {}, +} + +func isValidMethod(method string) bool { + if _, ok := supportedMethod[method]; ok { + return true + } + return false +} + +var supportedCopyDirective = map[string]struct{}{ + "COPY": {}, + "REPLACE": {}, +} + +func isValidCopyDirective(value string) bool { + upper := strings.ToUpper(value) + if _, ok := supportedCopyDirective[upper]; ok { + return true + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go new file mode 100644 index 000000000..0f2dd3057 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go @@ -0,0 +1,34 @@ +package oss + +import ( + "bytes" + "fmt" + "sync" +) + +const ( + major = "1" + minor = "2" + patch = "3" + tag = "" + + SdkName = "alibabacloud-go-sdk-v2" +) + +var once sync.Once +var version string + +func Version() string { + once.Do(func() { + ver := fmt.Sprintf("%s.%s.%s", major, minor, patch) + verBuilder := bytes.NewBufferString(ver) + if tag != "" && tag != "-" { + _, err := verBuilder.WriteString(tag) + if err != nil { + verBuilder = bytes.NewBufferString(ver) + } + } + version = verBuilder.String() + }) + return version +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go new file mode 100644 index 000000000..34aaa48f8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go @@ -0,0 +1,246 @@ +package oss + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "strings" + "unicode" + "unicode/utf8" +) + +type XmlDecoderLite struct { + reader io.Reader + attributePrefix string + useRawToken bool +} + +func NewXmlDecoderLite(r io.Reader) *XmlDecoderLite { + return &XmlDecoderLite{ + reader: r, + attributePrefix: "+@", + useRawToken: true, + } +} + +func (dec *XmlDecoderLite) Decode(root *XmlNode) error { + return dec.decodeXML(root) +} + +type XmlNode struct { + Children []*XmlChildren + Data []string +} + +type XmlChildren struct { + K string + V []*XmlNode +} + +func (n *XmlNode) addChild(s string, c *XmlNode) { + if n.Children == nil { + n.Children = make([]*XmlChildren, 0) + } + for _, childEntry := range n.Children { + if childEntry.K == s { + childEntry.V = append(childEntry.V, c) + return + } + } + n.Children = append(n.Children, &XmlChildren{K: s, V: []*XmlNode{c}}) +} + +func (n *XmlNode) value() any { + if len(n.Children) > 0 { + return n.GetMap() + } + if n.Data != nil { + return n.Data[0] + } + return nil +} + +func (n *XmlNode) GetMap() map[string]any { + node := map[string]any{} + for _, kv := range n.Children { + label := kv.K + children := kv.V + if len(children) > 1 { + vals := make([]any, 0) + for _, child := range children { + vals = append(vals, child.value()) + } + node[label] = vals + } else { + node[label] = children[0].value() + } + } + return node +} + +type element struct { + parent *element + n *XmlNode + label string +} + +func (dec *XmlDecoderLite) decodeXML(root *XmlNode) error { + xmlDec := xml.NewDecoder(dec.reader) + + started := false + + // Create first element from the root node + elem := &element{ + parent: nil, + n: root, + } + + getToken := func() (xml.Token, error) { + if dec.useRawToken { + return xmlDec.RawToken() + } + return xmlDec.Token() + } + + for { + t, e := getToken() + if e != nil && !errors.Is(e, io.EOF) { + return e + } + if t == nil { + break + } + + switch se := t.(type) { + case xml.StartElement: + elem = &element{ + parent: elem, + n: &XmlNode{}, + label: se.Name.Local, + } + + for _, a := range se.Attr { + elem.n.addChild(dec.attributePrefix+a.Name.Local, &XmlNode{Data: []string{a.Value}}) + } + case xml.CharData: + newBit := trimNonGraphic(string(se)) + if !started && len(newBit) > 0 { + return fmt.Errorf("invalid XML: Encountered chardata [%v] outside of XML node", newBit) + } + + if len(newBit) > 0 { + elem.n.Data = append(elem.n.Data, newBit) + } + case xml.EndElement: + if elem.parent != nil { + elem.parent.n.addChild(elem.label, elem.n) + } + elem = elem.parent + } + started = true + } + + return nil +} + +func trimNonGraphic(s string) string { + if s == "" { + return s + } + + var first *int + var last int + for i, r := range []rune(s) { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) { + continue + } + + if first == nil { + f := i + first = &f + last = i + } else { + last = i + } + } + + if first == nil { + return "" + } + + return string([]rune(s)[*first : last+1]) +} + +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character +) + +// escapeXml EscapeString writes to p the properly escaped XML equivalent +// of the plain text data s. +func escapeXml(s string) string { + var p strings.Builder + var esc []byte + hextable := "0123456789ABCDEF" + escPattern := []byte("�") + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + if r >= 0x00 && r < 0x20 { + escPattern[3] = hextable[r>>4] + escPattern[4] = hextable[r&0x0f] + esc = escPattern + } else { + esc = escFFFD + } + break + } + continue + } + p.WriteString(s[last : i-width]) + p.Write(esc) + last = i + } + p.WriteString(s[last:]) + return p.String() +} + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} diff --git a/vendor/github.com/aliyun/credentials-go/LICENSE b/vendor/github.com/aliyun/credentials-go/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go new file mode 100644 index 000000000..3d5ed0f2d --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go @@ -0,0 +1,145 @@ +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/alibabacloud-go/debug/debug" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type Request struct { + Method string // http request method + URL string // http url + Protocol string // http or https + Host string // http host + ReadTimeout time.Duration + ConnectTimeout time.Duration + Proxy string // http proxy + Form map[string]string // http form + Body []byte // request body for JSON or stream + Path string + Queries map[string]string + Headers map[string]string +} + +func (req *Request) BuildRequestURL() string { + httpUrl := fmt.Sprintf("%s://%s%s", req.Protocol, req.Host, req.Path) + if req.URL != "" { + httpUrl = req.URL + } + + querystring := utils.GetURLFormedMap(req.Queries) + if querystring != "" { + httpUrl = httpUrl + "?" + querystring + } + + return fmt.Sprintf("%s %s", req.Method, httpUrl) +} + +type Response struct { + StatusCode int + Headers map[string]string + Body []byte +} + +var newRequest = http.NewRequest + +type do func(req *http.Request) (*http.Response, error) + +var hookDo = func(fn do) do { + return fn +} + +var debuglog = debug.Init("credential") + +func Do(req *Request) (res *Response, err error) { + querystring := utils.GetURLFormedMap(req.Queries) + // do request + httpUrl := fmt.Sprintf("%s://%s%s?%s", req.Protocol, req.Host, req.Path, querystring) + if req.URL != "" { + httpUrl = req.URL + } + + var body io.Reader + if req.Method == "GET" { + body = strings.NewReader("") + } else if req.Body != nil { + body = bytes.NewReader(req.Body) + } else { + body = strings.NewReader(utils.GetURLFormedMap(req.Form)) + } + + httpRequest, err := newRequest(req.Method, httpUrl, body) + if err != nil { + return + } + + if req.Form != nil { + httpRequest.Header["Content-Type"] = []string{"application/x-www-form-urlencoded"} + } + + for key, value := range req.Headers { + if value != "" { + debuglog("> %s: %s", key, value) + httpRequest.Header.Set(key, value) + } + } + + httpClient := &http.Client{} + + if req.ReadTimeout != 0 { + httpClient.Timeout = req.ReadTimeout + req.ConnectTimeout + } + + transport := http.DefaultTransport.(*http.Transport).Clone() + if req.Proxy != "" { + var proxy *url.URL + proxy, err = url.Parse(req.Proxy) + if err != nil { + return + } + transport.Proxy = http.ProxyURL(proxy) + } + + if req.ConnectTimeout != 0 { + transport.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: req.ConnectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } + } + + httpClient.Transport = transport + + httpResponse, err := hookDo(httpClient.Do)(httpRequest) + if err != nil { + return + } + + defer httpResponse.Body.Close() + + responseBody, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return + } + res = &Response{ + StatusCode: httpResponse.StatusCode, + Headers: make(map[string]string), + Body: responseBody, + } + for key, v := range httpResponse.Header { + res.Headers[key] = v[0] + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go new file mode 100644 index 000000000..a94088c6b --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go @@ -0,0 +1,18 @@ +package utils + +import ( + "os" + "runtime" +) + +var getOS = func() string { + return runtime.GOOS +} + +func GetHomePath() string { + if getOS() == "windows" { + return os.Getenv("USERPROFILE") + } + + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go new file mode 100644 index 000000000..432395cf4 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go @@ -0,0 +1,36 @@ +package utils + +import ( + "context" + "net" + "time" +) + +// Runtime is for setting timeout, proxy and host +type Runtime struct { + ReadTimeout int + ConnectTimeout int + Proxy string + Host string + STSEndpoint string +} + +// NewRuntime returns a Runtime +func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime { + return &Runtime{ + ReadTimeout: readTimeout, + ConnectTimeout: connectTimeout, + Proxy: proxy, + Host: host, + } +} + +// Timeout is for connect Timeout +func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: connectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go new file mode 100644 index 000000000..fffee1eda --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go @@ -0,0 +1,204 @@ +package utils + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + mathrand "math/rand" + "net/url" + "os" + "runtime" + "strconv" + "sync/atomic" + "time" +) + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) { + return fn +} + +var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { + return fn +} + +// GetUUID returns a uuid +func GetUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +// RandStringBytes returns a rand string +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[mathrand.Intn(len(letterBytes))] + } + return string(b) +} + +// ShaHmac1 return a string which has been hashed +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +// Sha256WithRsa return a string which has been hashed with Rsa +func Sha256WithRsa(source, secret string) string { + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} + +// GetMD5Base64 returns a string which has been base64 +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +// GetTimeInFormatISO8601 returns a time string +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +// GetURLFormedMap returns a url encoded string +func GetURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, RandStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := hookRead(rand.Read)(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +var processStartTime int64 = time.Now().UnixNano() / 1e6 +var seqId int64 = 0 + +func getGID() uint64 { + // https://blog.sgmansfield.com/2015/12/goroutine-ids/ + b := make([]byte, 64) + b = b[:runtime.Stack(b, false)] + b = bytes.TrimPrefix(b, []byte("goroutine ")) + b = b[:bytes.IndexByte(b, ' ')] + n, _ := strconv.ParseUint(string(b), 10, 64) + return n +} + +func GetNonce() (uuidHex string) { + routineId := getGID() + currentTime := time.Now().UnixNano() / 1e6 + seq := atomic.AddInt64(&seqId, 1) + randNum := mathrand.Int63() + msg := fmt.Sprintf("%d-%d-%d-%d-%d", processStartTime, routineId, currentTime, seq, randNum) + h := md5.New() + h.Write([]byte(msg)) + return hex.EncodeToString(h.Sum(nil)) +} + +// Get first non-empty value +func GetDefaultString(values ...string) string { + for _, v := range values { + if v != "" { + return v + } + } + + return "" +} + +// set back the memoried enviroment variables +type Rollback func() + +func Memory(keys ...string) Rollback { + // remenber enviroment variables + m := make(map[string]string) + for _, key := range keys { + m[key] = os.Getenv(key) + } + + return func() { + for _, key := range keys { + os.Setenv(key, m[key]) + } + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go new file mode 100644 index 000000000..facac3181 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go @@ -0,0 +1,266 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type CLIProfileCredentialsProvider struct { + profileFile string + profileName string + innerProvider CredentialsProvider +} + +type CLIProfileCredentialsProviderBuilder struct { + provider *CLIProfileCredentialsProvider +} + +func (b *CLIProfileCredentialsProviderBuilder) WithProfileFile(profileFile string) *CLIProfileCredentialsProviderBuilder { + b.provider.profileFile = profileFile + return b +} + +func (b *CLIProfileCredentialsProviderBuilder) WithProfileName(profileName string) *CLIProfileCredentialsProviderBuilder { + b.provider.profileName = profileName + return b +} + +func (b *CLIProfileCredentialsProviderBuilder) Build() (provider *CLIProfileCredentialsProvider, err error) { + // 优先级: + // 1. 使用显示指定的 profileFile + // 2. 使用环境变量(ALIBABA_CLOUD_CONFIG_FILE)指定的 profileFile + // 3. 兜底使用 path.Join(homeDir, ".aliyun/config") 作为 profileFile + if b.provider.profileFile == "" { + b.provider.profileFile = os.Getenv("ALIBABA_CLOUD_CONFIG_FILE") + } + // 优先级: + // 1. 使用显示指定的 profileName + // 2. 使用环境变量(ALIBABA_CLOUD_PROFILE)制定的 profileName + // 3. 使用 CLI 配置中的当前 profileName + if b.provider.profileName == "" { + b.provider.profileName = os.Getenv("ALIBABA_CLOUD_PROFILE") + } + + if strings.ToLower(os.Getenv("ALIBABA_CLOUD_CLI_PROFILE_DISABLED")) == "true" { + err = errors.New("the CLI profile is disabled") + return + } + + provider = b.provider + return +} + +func NewCLIProfileCredentialsProviderBuilder() *CLIProfileCredentialsProviderBuilder { + return &CLIProfileCredentialsProviderBuilder{ + provider: &CLIProfileCredentialsProvider{}, + } +} + +type profile struct { + Name string `json:"name"` + Mode string `json:"mode"` + AccessKeyID string `json:"access_key_id"` + AccessKeySecret string `json:"access_key_secret"` + SecurityToken string `json:"sts_token"` + RegionID string `json:"region_id"` + RoleArn string `json:"ram_role_arn"` + RoleSessionName string `json:"ram_session_name"` + DurationSeconds int `json:"expired_seconds"` + StsRegion string `json:"sts_region"` + EnableVpc bool `json:"enable_vpc"` + SourceProfile string `json:"source_profile"` + RoleName string `json:"ram_role_name"` + OIDCTokenFile string `json:"oidc_token_file"` + OIDCProviderARN string `json:"oidc_provider_arn"` + Policy string `json:"policy"` + ExternalId string `json:"external_id"` + SignInUrl string `json:"cloud_sso_sign_in_url"` + AccountId string `json:"cloud_sso_account_id"` + AccessConfig string `json:"cloud_sso_access_config"` + AccessToken string `json:"access_token"` + AccessTokenExpire int64 `json:"cloud_sso_access_token_expire"` +} + +type configuration struct { + Current string `json:"current"` + Profiles []*profile `json:"profiles"` +} + +func newConfigurationFromPath(cfgPath string) (conf *configuration, err error) { + bytes, err := ioutil.ReadFile(cfgPath) + if err != nil { + err = fmt.Errorf("reading aliyun cli config from '%s' failed %v", cfgPath, err) + return + } + + conf = &configuration{} + + err = json.Unmarshal(bytes, conf) + if err != nil { + err = fmt.Errorf("unmarshal aliyun cli config from '%s' failed: %s", cfgPath, string(bytes)) + return + } + + if conf.Profiles == nil || len(conf.Profiles) == 0 { + err = fmt.Errorf("no any configured profiles in '%s'", cfgPath) + return + } + + return +} + +func (conf *configuration) getProfile(name string) (profile *profile, err error) { + for _, p := range conf.Profiles { + if p.Name == name { + profile = p + return + } + } + + err = fmt.Errorf("unable to get profile with '%s'", name) + return +} + +func (provider *CLIProfileCredentialsProvider) getCredentialsProvider(conf *configuration, profileName string) (credentialsProvider CredentialsProvider, err error) { + p, err := conf.getProfile(profileName) + if err != nil { + return + } + + switch p.Mode { + case "AK": + credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + Build() + case "StsToken": + credentialsProvider, err = NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + WithSecurityToken(p.SecurityToken). + Build() + case "RamRoleArn": + previousProvider, err1 := NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + Build() + if err1 != nil { + return nil, err1 + } + + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previousProvider). + WithRoleArn(p.RoleArn). + WithRoleSessionName(p.RoleSessionName). + WithDurationSeconds(p.DurationSeconds). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithPolicy(p.Policy). + WithExternalId(p.ExternalId). + Build() + case "EcsRamRole": + credentialsProvider, err = NewECSRAMRoleCredentialsProviderBuilder().WithRoleName(p.RoleName).Build() + case "OIDC": + credentialsProvider, err = NewOIDCCredentialsProviderBuilder(). + WithOIDCTokenFilePath(p.OIDCTokenFile). + WithOIDCProviderARN(p.OIDCProviderARN). + WithRoleArn(p.RoleArn). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithDurationSeconds(p.DurationSeconds). + WithRoleSessionName(p.RoleSessionName). + WithPolicy(p.Policy). + Build() + case "ChainableRamRoleArn": + previousProvider, err1 := provider.getCredentialsProvider(conf, p.SourceProfile) + if err1 != nil { + err = fmt.Errorf("get source profile failed: %s", err1.Error()) + return + } + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previousProvider). + WithRoleArn(p.RoleArn). + WithRoleSessionName(p.RoleSessionName). + WithDurationSeconds(p.DurationSeconds). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithPolicy(p.Policy). + WithExternalId(p.ExternalId). + Build() + case "CloudSSO": + credentialsProvider, err = NewCloudSSOCredentialsProviderBuilder(). + WithSignInUrl(p.SignInUrl). + WithAccountId(p.AccountId). + WithAccessConfig(p.AccessConfig). + WithAccessToken(p.AccessToken). + WithAccessTokenExpire(p.AccessTokenExpire). + Build() + default: + err = fmt.Errorf("unsupported profile mode '%s'", p.Mode) + } + + return +} + +// 默认设置为 GetHomePath,测试时便于 mock +var getHomePath = utils.GetHomePath + +func (provider *CLIProfileCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.innerProvider == nil { + cfgPath := provider.profileFile + if cfgPath == "" { + homeDir := getHomePath() + if homeDir == "" { + err = fmt.Errorf("cannot found home dir") + return + } + + cfgPath = path.Join(homeDir, ".aliyun/config.json") + } + + conf, err1 := newConfigurationFromPath(cfgPath) + if err1 != nil { + err = err1 + return + } + + if provider.profileName == "" { + provider.profileName = conf.Current + } + + provider.innerProvider, err = provider.getCredentialsProvider(conf, provider.profileName) + if err != nil { + return + } + } + + innerCC, err := provider.innerProvider.GetCredentials() + if err != nil { + return + } + + providerName := innerCC.ProviderName + if providerName == "" { + providerName = provider.innerProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: innerCC.AccessKeyId, + AccessKeySecret: innerCC.AccessKeySecret, + SecurityToken: innerCC.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + + return +} + +func (provider *CLIProfileCredentialsProvider) GetProviderName() string { + return "cli_profile" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go new file mode 100644 index 000000000..7bc29b243 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go @@ -0,0 +1,216 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type CloudSSOCredentialsProvider struct { + signInUrl string + accountId string + accessConfig string + accessToken string + accessTokenExpire int64 + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions +} + +type CloudSSOCredentialsProviderBuilder struct { + provider *CloudSSOCredentialsProvider +} + +type cloudCredentialOptions struct { + AccountId string `json:"AccountId"` + AccessConfigurationId string `json:"AccessConfigurationId"` +} + +type cloudCredentials struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` +} + +type cloudCredentialResponse struct { + CloudCredential *cloudCredentials `json:"CloudCredential"` + RequestId string `json:"RequestId"` +} + +func NewCloudSSOCredentialsProviderBuilder() *CloudSSOCredentialsProviderBuilder { + return &CloudSSOCredentialsProviderBuilder{ + provider: &CloudSSOCredentialsProvider{}, + } +} + +func (b *CloudSSOCredentialsProviderBuilder) WithSignInUrl(signInUrl string) *CloudSSOCredentialsProviderBuilder { + b.provider.signInUrl = signInUrl + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccountId(accountId string) *CloudSSOCredentialsProviderBuilder { + b.provider.accountId = accountId + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessConfig(accessConfig string) *CloudSSOCredentialsProviderBuilder { + b.provider.accessConfig = accessConfig + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessToken(accessToken string) *CloudSSOCredentialsProviderBuilder { + b.provider.accessToken = accessToken + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessTokenExpire(accessTokenExpire int64) *CloudSSOCredentialsProviderBuilder { + b.provider.accessTokenExpire = accessTokenExpire + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *CloudSSOCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) Build() (provider *CloudSSOCredentialsProvider, err error) { + if b.provider.accessToken == "" || b.provider.accessTokenExpire == 0 || b.provider.accessTokenExpire-time.Now().Unix() <= 0 { + err = errors.New("CloudSSO access token is empty or expired, please re-login with cli") + return + } + + if b.provider.signInUrl == "" || b.provider.accountId == "" || b.provider.accessConfig == "" { + err = errors.New("CloudSSO sign in url or account id or access config is empty") + return + } + + provider = b.provider + return +} + +func (provider *CloudSSOCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + url, err := url.Parse(provider.signInUrl) + if err != nil { + return nil, err + } + + req := &httputil.Request{ + Method: "POST", + Protocol: url.Scheme, + Host: url.Host, + Path: "/cloud-credentials", + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + body := cloudCredentialOptions{ + AccountId: provider.accountId, + AccessConfigurationId: provider.accessConfig, + } + + bodyBytes, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to marshal options: %w", err) + } + + req.Body = bodyBytes + + // set headers + req.Headers["Accept"] = "application/json" + req.Headers["Content-Type"] = "application/json" + req.Headers["Authorization"] = fmt.Sprintf("Bearer %s", provider.accessToken) + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token from sso failed: " + err = errors.New(message + string(res.Body)) + return + } + var data cloudCredentialResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get session token from sso failed, json.Unmarshal fail: %s", err.Error()) + return + } + if data.CloudCredential == nil { + err = fmt.Errorf("get session token from sso failed, fail to get credentials") + return + } + + if data.CloudCredential.AccessKeyId == "" || data.CloudCredential.AccessKeySecret == "" || data.CloudCredential.SecurityToken == "" { + err = fmt.Errorf("refresh session token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: data.CloudCredential.AccessKeyId, + AccessKeySecret: data.CloudCredential.AccessKeySecret, + SecurityToken: data.CloudCredential.SecurityToken, + Expiration: data.CloudCredential.Expiration, + } + return +} + +func (provider *CloudSSOCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *CloudSSOCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *CloudSSOCredentialsProvider) GetProviderName() string { + return "cloud_sso" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go new file mode 100644 index 000000000..26592fd22 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go @@ -0,0 +1,22 @@ +package providers + +// 下一版本 Credentials 包 +// - 分离 bearer token +// - 从 config 传递迁移到真正的 credentials provider 模式 +// - 删除 GetAccessKeyId()/GetAccessKeySecret()/GetSecurityToken() 方法,只保留 GetCredentials() + +// The credentials struct +type Credentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + ProviderName string +} + +// The credentials provider interface, return credentials and provider name +type CredentialsProvider interface { + // Get credentials + GetCredentials() (*Credentials, error) + // Get credentials provider name + GetProviderName() string +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go new file mode 100644 index 000000000..597625f6f --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go @@ -0,0 +1,113 @@ +package providers + +import ( + "fmt" + "os" + "strings" +) + +type DefaultCredentialsProvider struct { + providerChain []CredentialsProvider + lastUsedProvider CredentialsProvider +} + +func NewDefaultCredentialsProvider() (provider *DefaultCredentialsProvider) { + providers := []CredentialsProvider{} + + // Add static ak or sts credentials provider + envProvider, err := NewEnvironmentVariableCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, envProvider) + } + + // oidc check + oidcProvider, err := NewOIDCCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, oidcProvider) + } + + // cli credentials provider + cliProfileProvider, err := NewCLIProfileCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, cliProfileProvider) + } + + // profile credentials provider + profileProvider, err := NewProfileCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, profileProvider) + } + + // Add IMDS + ecsRamRoleProvider, err := NewECSRAMRoleCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, ecsRamRoleProvider) + } + + // credentials uri + if os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") != "" { + credentialsUriProvider, err := NewURLCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, credentialsUriProvider) + } + } + + return &DefaultCredentialsProvider{ + providerChain: providers, + } +} + +func (provider *DefaultCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.lastUsedProvider != nil { + inner, err1 := provider.lastUsedProvider.GetCredentials() + if err1 != nil { + err = err1 + return + } + + providerName := inner.ProviderName + if providerName == "" { + providerName = provider.lastUsedProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: inner.AccessKeyId, + AccessKeySecret: inner.AccessKeySecret, + SecurityToken: inner.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + return + } + + errors := []string{} + for _, p := range provider.providerChain { + provider.lastUsedProvider = p + inner, errInLoop := p.GetCredentials() + if errInLoop != nil { + errors = append(errors, errInLoop.Error()) + // 如果有错误,进入下一个获取过程 + continue + } + + if inner != nil { + providerName := inner.ProviderName + if providerName == "" { + providerName = p.GetProviderName() + } + cc = &Credentials{ + AccessKeyId: inner.AccessKeyId, + AccessKeySecret: inner.AccessKeySecret, + SecurityToken: inner.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + return + } + } + + err = fmt.Errorf("unable to get credentials from any of the providers in the chain: %s", strings.Join(errors, ", ")) + return +} + +func (provider *DefaultCredentialsProvider) GetProviderName() string { + return "default" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go new file mode 100644 index 000000000..9a917b2bf --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go @@ -0,0 +1,283 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type ECSRAMRoleCredentialsProvider struct { + roleName string + disableIMDSv1 bool + // for sts + session *sessionCredentials + expirationTimestamp int64 + // for http options + httpOptions *HttpOptions +} + +type ECSRAMRoleCredentialsProviderBuilder struct { + provider *ECSRAMRoleCredentialsProvider +} + +func NewECSRAMRoleCredentialsProviderBuilder() *ECSRAMRoleCredentialsProviderBuilder { + return &ECSRAMRoleCredentialsProviderBuilder{ + provider: &ECSRAMRoleCredentialsProvider{}, + } +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithRoleName(roleName string) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.roleName = roleName + return builder +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithDisableIMDSv1(disableIMDSv1 bool) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.disableIMDSv1 = disableIMDSv1 + return builder +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +const defaultMetadataTokenDuration = 21600 // 6 hours + +func (builder *ECSRAMRoleCredentialsProviderBuilder) Build() (provider *ECSRAMRoleCredentialsProvider, err error) { + + if strings.ToLower(os.Getenv("ALIBABA_CLOUD_ECS_METADATA_DISABLED")) == "true" { + err = errors.New("IMDS credentials is disabled") + return + } + + // 设置 roleName 默认值 + if builder.provider.roleName == "" { + builder.provider.roleName = os.Getenv("ALIBABA_CLOUD_ECS_METADATA") + } + + if !builder.provider.disableIMDSv1 { + builder.provider.disableIMDSv1 = strings.ToLower(os.Getenv("ALIBABA_CLOUD_IMDSV1_DISABLED")) == "true" + } + + provider = builder.provider + return +} + +type ecsRAMRoleResponse struct { + Code *string `json:"Code"` + AccessKeyId *string `json:"AccessKeyId"` + AccessKeySecret *string `json:"AccessKeySecret"` + SecurityToken *string `json:"SecurityToken"` + LastUpdated *string `json:"LastUpdated"` + Expiration *string `json:"Expiration"` +} + +func (provider *ECSRAMRoleCredentialsProvider) needUpdateCredential() bool { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *ECSRAMRoleCredentialsProvider) getRoleName() (roleName string, err error) { + req := &httputil.Request{ + Method: "GET", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/meta-data/ram/security-credentials/", + Headers: map[string]string{}, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + metadataToken, err := provider.getMetadataToken() + if err != nil { + return "", err + } + if metadataToken != "" { + req.Headers["x-aliyun-ecs-metadata-token"] = metadataToken + } + + res, err := httpDo(req) + if err != nil { + err = fmt.Errorf("get role name failed: %s", err.Error()) + return + } + + if res.StatusCode != 200 { + err = fmt.Errorf("get role name failed: %s %d", req.BuildRequestURL(), res.StatusCode) + return + } + + roleName = strings.TrimSpace(string(res.Body)) + return +} + +func (provider *ECSRAMRoleCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + roleName := provider.roleName + if roleName == "" { + roleName, err = provider.getRoleName() + if err != nil { + return + } + } + + req := &httputil.Request{ + Method: "GET", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/meta-data/ram/security-credentials/" + roleName, + Headers: map[string]string{}, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + metadataToken, err := provider.getMetadataToken() + if err != nil { + return nil, err + } + if metadataToken != "" { + req.Headers["x-aliyun-ecs-metadata-token"] = metadataToken + } + + res, err := httpDo(req) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + + if res.StatusCode != 200 { + err = fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", res.StatusCode, string(res.Body)) + return + } + + var data ecsRAMRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + + if data.AccessKeyId == nil || data.AccessKeySecret == nil || data.SecurityToken == nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get credentials") + return + } + + if *data.Code != "Success" { + err = fmt.Errorf("refresh Ecs sts token err, Code is not Success") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.AccessKeyId, + AccessKeySecret: *data.AccessKeySecret, + SecurityToken: *data.SecurityToken, + Expiration: *data.Expiration, + } + return +} + +func (provider *ECSRAMRoleCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.session == nil || provider.needUpdateCredential() { + session, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.session = session + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", session.Expiration) + if err2 != nil { + return nil, err2 + } + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.session.AccessKeyId, + AccessKeySecret: provider.session.AccessKeySecret, + SecurityToken: provider.session.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *ECSRAMRoleCredentialsProvider) GetProviderName() string { + return "ecs_ram_role" +} + +func (provider *ECSRAMRoleCredentialsProvider) getMetadataToken() (metadataToken string, err error) { + // PUT http://100.100.100.200/latest/api/token + req := &httputil.Request{ + Method: "PUT", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/api/token", + Headers: map[string]string{ + "X-aliyun-ecs-metadata-token-ttl-seconds": strconv.Itoa(defaultMetadataTokenDuration), + }, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, _err := httpDo(req) + if _err != nil { + if provider.disableIMDSv1 { + err = fmt.Errorf("get metadata token failed: %s", _err.Error()) + } + return + } + if res.StatusCode != 200 { + if provider.disableIMDSv1 { + err = fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", res.StatusCode, string(res.Body)) + } + return + } + metadataToken = string(res.Body) + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go new file mode 100644 index 000000000..27fe33b9e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go @@ -0,0 +1,55 @@ +package providers + +import ( + "fmt" + "os" +) + +type EnvironmentVariableCredentialsProvider struct { +} + +type EnvironmentVariableCredentialsProviderBuilder struct { + provider *EnvironmentVariableCredentialsProvider +} + +func NewEnvironmentVariableCredentialsProviderBuilder() *EnvironmentVariableCredentialsProviderBuilder { + return &EnvironmentVariableCredentialsProviderBuilder{ + provider: &EnvironmentVariableCredentialsProvider{}, + } +} + +func (builder *EnvironmentVariableCredentialsProviderBuilder) Build() (provider *EnvironmentVariableCredentialsProvider, err error) { + provider = builder.provider + return +} + +func (provider *EnvironmentVariableCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + accessKeyId := os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + + if accessKeyId == "" { + err = fmt.Errorf("unable to get credentials from enviroment variables, Access key ID must be specified via environment variable (ALIBABA_CLOUD_ACCESS_KEY_ID)") + return + } + + accessKeySecret := os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + + if accessKeySecret == "" { + err = fmt.Errorf("unable to get credentials from enviroment variables, Access key secret must be specified via environment variable (ALIBABA_CLOUD_ACCESS_KEY_SECRET)") + return + } + + securityToken := os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + + cc = &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + ProviderName: provider.GetProviderName(), + } + + return +} + +func (provider *EnvironmentVariableCredentialsProvider) GetProviderName() string { + return "env" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go new file mode 100644 index 000000000..6839abd3e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go @@ -0,0 +1,7 @@ +package providers + +import ( + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +var httpDo = httputil.Do diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go new file mode 100644 index 000000000..ae7194c24 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go @@ -0,0 +1,278 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type OIDCCredentialsProvider struct { + oidcProviderARN string + oidcTokenFilePath string + roleArn string + roleSessionName string + durationSeconds int + policy string + // for sts endpoint + stsRegionId string + enableVpc bool + stsEndpoint string + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions +} + +type OIDCCredentialsProviderBuilder struct { + provider *OIDCCredentialsProvider +} + +func NewOIDCCredentialsProviderBuilder() *OIDCCredentialsProviderBuilder { + return &OIDCCredentialsProviderBuilder{ + provider: &OIDCCredentialsProvider{}, + } +} + +func (b *OIDCCredentialsProviderBuilder) WithOIDCProviderARN(oidcProviderArn string) *OIDCCredentialsProviderBuilder { + b.provider.oidcProviderARN = oidcProviderArn + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithOIDCTokenFilePath(oidcTokenFilePath string) *OIDCCredentialsProviderBuilder { + b.provider.oidcTokenFilePath = oidcTokenFilePath + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithRoleArn(roleArn string) *OIDCCredentialsProviderBuilder { + b.provider.roleArn = roleArn + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithRoleSessionName(roleSessionName string) *OIDCCredentialsProviderBuilder { + b.provider.roleSessionName = roleSessionName + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithDurationSeconds(durationSeconds int) *OIDCCredentialsProviderBuilder { + b.provider.durationSeconds = durationSeconds + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithStsRegionId(regionId string) *OIDCCredentialsProviderBuilder { + b.provider.stsRegionId = regionId + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithEnableVpc(enableVpc bool) *OIDCCredentialsProviderBuilder { + b.provider.enableVpc = enableVpc + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithPolicy(policy string) *OIDCCredentialsProviderBuilder { + b.provider.policy = policy + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithSTSEndpoint(stsEndpoint string) *OIDCCredentialsProviderBuilder { + b.provider.stsEndpoint = stsEndpoint + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *OIDCCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *OIDCCredentialsProviderBuilder) Build() (provider *OIDCCredentialsProvider, err error) { + if b.provider.roleSessionName == "" { + b.provider.roleSessionName = "credentials-go-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + + if b.provider.oidcTokenFilePath == "" { + b.provider.oidcTokenFilePath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE") + } + + if b.provider.oidcTokenFilePath == "" { + err = errors.New("the OIDCTokenFilePath is empty") + return + } + + if b.provider.oidcProviderARN == "" { + b.provider.oidcProviderARN = os.Getenv("ALIBABA_CLOUD_OIDC_PROVIDER_ARN") + } + + if b.provider.oidcProviderARN == "" { + err = errors.New("the OIDCProviderARN is empty") + return + } + + if b.provider.roleArn == "" { + b.provider.roleArn = os.Getenv("ALIBABA_CLOUD_ROLE_ARN") + } + + if b.provider.roleArn == "" { + err = errors.New("the RoleArn is empty") + return + } + + if b.provider.durationSeconds == 0 { + b.provider.durationSeconds = 3600 + } + + if b.provider.durationSeconds < 900 { + err = errors.New("the Assume Role session duration should be in the range of 15min - max duration seconds") + } + + if b.provider.stsEndpoint == "" { + if !b.provider.enableVpc { + b.provider.enableVpc = strings.ToLower(os.Getenv("ALIBABA_CLOUD_VPC_ENDPOINT_ENABLED")) == "true" + } + prefix := "sts" + if b.provider.enableVpc { + prefix = "sts-vpc" + } + if b.provider.stsRegionId != "" { + b.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, b.provider.stsRegionId) + } else if region := os.Getenv("ALIBABA_CLOUD_STS_REGION"); region != "" { + b.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, region) + } else { + b.provider.stsEndpoint = "sts.aliyuncs.com" + } + } + + provider = b.provider + return +} + +func (provider *OIDCCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + req := &httputil.Request{ + Method: "POST", + Protocol: "https", + Host: provider.stsEndpoint, + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + queries := make(map[string]string) + queries["Version"] = "2015-04-01" + queries["Action"] = "AssumeRoleWithOIDC" + queries["Format"] = "JSON" + queries["Timestamp"] = utils.GetTimeInFormatISO8601() + req.Queries = queries + + bodyForm := make(map[string]string) + bodyForm["RoleArn"] = provider.roleArn + bodyForm["OIDCProviderArn"] = provider.oidcProviderARN + token, err := ioutil.ReadFile(provider.oidcTokenFilePath) + if err != nil { + return + } + + bodyForm["OIDCToken"] = string(token) + if provider.policy != "" { + bodyForm["Policy"] = provider.policy + } + + bodyForm["RoleSessionName"] = provider.roleSessionName + bodyForm["DurationSeconds"] = strconv.Itoa(provider.durationSeconds) + req.Form = bodyForm + + // set headers + req.Headers["Accept-Encoding"] = "identity" + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token failed: " + err = errors.New(message + string(res.Body)) + return + } + var data assumeRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get oidc sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + if data.Credentials == nil { + err = fmt.Errorf("get oidc sts token err, fail to get credentials") + return + } + + if data.Credentials.AccessKeyId == nil || data.Credentials.AccessKeySecret == nil || data.Credentials.SecurityToken == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.Credentials.AccessKeyId, + AccessKeySecret: *data.Credentials.AccessKeySecret, + SecurityToken: *data.Credentials.SecurityToken, + Expiration: *data.Credentials.Expiration, + } + return +} + +func (provider *OIDCCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *OIDCCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *OIDCCredentialsProvider) GetProviderName() string { + return "oidc_role_arn" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go new file mode 100644 index 000000000..c26548e3e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go @@ -0,0 +1,169 @@ +package providers + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/aliyun/credentials-go/credentials/internal/utils" + "gopkg.in/ini.v1" +) + +type ProfileCredentialsProvider struct { + profileName string + innerProvider CredentialsProvider +} + +type ProfileCredentialsProviderBuilder struct { + provider *ProfileCredentialsProvider +} + +func NewProfileCredentialsProviderBuilder() (builder *ProfileCredentialsProviderBuilder) { + return &ProfileCredentialsProviderBuilder{ + provider: &ProfileCredentialsProvider{}, + } +} + +func (b *ProfileCredentialsProviderBuilder) WithProfileName(profileName string) *ProfileCredentialsProviderBuilder { + b.provider.profileName = profileName + return b +} + +func (b *ProfileCredentialsProviderBuilder) Build() (provider *ProfileCredentialsProvider, err error) { + // 优先级: + // 1. 使用显示指定的 profileName + // 2. 使用环境变量(ALIBABA_CLOUD_PROFILE)指定的 profileName + // 3. 兜底使用 default 作为 profileName + b.provider.profileName = utils.GetDefaultString(b.provider.profileName, os.Getenv("ALIBABA_CLOUD_PROFILE"), "default") + + provider = b.provider + return +} + +func (provider *ProfileCredentialsProvider) getCredentialsProvider(ini *ini.File) (credentialsProvider CredentialsProvider, err error) { + section, err := ini.GetSection(provider.profileName) + if err != nil { + err = errors.New("ERROR: Can not load section" + err.Error()) + return + } + + value, err := section.GetKey("type") + if err != nil { + err = errors.New("ERROR: Can not find credential type" + err.Error()) + return + } + + switch value.String() { + case "access_key": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + if err1 != nil || err2 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + + if value1.String() == "" || value2.String() == "" { + err = errors.New("ERROR: Value can't be empty") + return + } + + credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(value1.String()). + WithAccessKeySecret(value2.String()). + Build() + case "ecs_ram_role": + value1, err1 := section.GetKey("role_name") + if err1 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + credentialsProvider, err = NewECSRAMRoleCredentialsProviderBuilder().WithRoleName(value1.String()).Build() + case "ram_role_arn": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + value3, err3 := section.GetKey("role_arn") + value4, err4 := section.GetKey("role_session_name") + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" { + err = errors.New("ERROR: Value can't be empty") + return + } + previous, err5 := NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(value1.String()). + WithAccessKeySecret(value2.String()). + Build() + if err5 != nil { + err = errors.New("get previous credentials provider failed") + return + } + rawPolicy, _ := section.GetKey("policy") + policy := "" + if rawPolicy != nil { + policy = rawPolicy.String() + } + + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previous). + WithRoleArn(value3.String()). + WithRoleSessionName(value4.String()). + WithPolicy(policy). + WithDurationSeconds(3600). + Build() + default: + err = errors.New("ERROR: Failed to get credential") + } + return +} + +func (provider *ProfileCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.innerProvider == nil { + sharedCfgPath := os.Getenv("ALIBABA_CLOUD_CREDENTIALS_FILE") + if sharedCfgPath == "" { + homeDir := getHomePath() + if homeDir == "" { + err = fmt.Errorf("cannot found home dir") + return + } + + sharedCfgPath = path.Join(homeDir, ".alibabacloud/credentials") + } + + ini, err1 := ini.Load(sharedCfgPath) + if err1 != nil { + err = errors.New("ERROR: Can not open file" + err1.Error()) + return + } + + provider.innerProvider, err = provider.getCredentialsProvider(ini) + if err != nil { + return + } + } + + innerCC, err := provider.innerProvider.GetCredentials() + if err != nil { + return + } + + providerName := innerCC.ProviderName + if providerName == "" { + providerName = provider.innerProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: innerCC.AccessKeyId, + AccessKeySecret: innerCC.AccessKeySecret, + SecurityToken: innerCC.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + + return +} + +func (provider *ProfileCredentialsProvider) GetProviderName() string { + return "profile" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go new file mode 100644 index 000000000..969e271ec --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go @@ -0,0 +1,375 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type assumedRoleUser struct { +} + +type credentials struct { + SecurityToken *string `json:"SecurityToken"` + Expiration *string `json:"Expiration"` + AccessKeySecret *string `json:"AccessKeySecret"` + AccessKeyId *string `json:"AccessKeyId"` +} + +type assumeRoleResponse struct { + RequestID *string `json:"RequestId"` + AssumedRoleUser *assumedRoleUser `json:"AssumedRoleUser"` + Credentials *credentials `json:"Credentials"` +} + +type sessionCredentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Expiration string +} + +type HttpOptions struct { + Proxy string + // Connection timeout, in milliseconds. + ConnectTimeout int + // Read timeout, in milliseconds. + ReadTimeout int +} + +type RAMRoleARNCredentialsProvider struct { + // for previous credentials + accessKeyId string + accessKeySecret string + securityToken string + credentialsProvider CredentialsProvider + + roleArn string + roleSessionName string + durationSeconds int + policy string + externalId string + // for sts endpoint + stsRegionId string + enableVpc bool + stsEndpoint string + // for http options + httpOptions *HttpOptions + // inner + expirationTimestamp int64 + lastUpdateTimestamp int64 + previousProviderName string + sessionCredentials *sessionCredentials +} + +type RAMRoleARNCredentialsProviderBuilder struct { + provider *RAMRoleARNCredentialsProvider +} + +func NewRAMRoleARNCredentialsProviderBuilder() *RAMRoleARNCredentialsProviderBuilder { + return &RAMRoleARNCredentialsProviderBuilder{ + provider: &RAMRoleARNCredentialsProvider{}, + } +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithSecurityToken(securityToken string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.securityToken = securityToken + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithCredentialsProvider(credentialsProvider CredentialsProvider) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.credentialsProvider = credentialsProvider + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithRoleArn(roleArn string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.roleArn = roleArn + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithStsRegionId(regionId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.stsRegionId = regionId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithEnableVpc(enableVpc bool) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.enableVpc = enableVpc + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithStsEndpoint(endpoint string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.stsEndpoint = endpoint + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithRoleSessionName(roleSessionName string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.roleSessionName = roleSessionName + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithPolicy(policy string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.policy = policy + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithExternalId(externalId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.externalId = externalId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithDurationSeconds(durationSeconds int) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.durationSeconds = durationSeconds + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) Build() (provider *RAMRoleARNCredentialsProvider, err error) { + if builder.provider.credentialsProvider == nil { + if builder.provider.accessKeyId != "" && builder.provider.accessKeySecret != "" && builder.provider.securityToken != "" { + builder.provider.credentialsProvider, err = NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(builder.provider.accessKeyId). + WithAccessKeySecret(builder.provider.accessKeySecret). + WithSecurityToken(builder.provider.securityToken). + Build() + if err != nil { + return + } + } else if builder.provider.accessKeyId != "" && builder.provider.accessKeySecret != "" { + builder.provider.credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(builder.provider.accessKeyId). + WithAccessKeySecret(builder.provider.accessKeySecret). + Build() + if err != nil { + return + } + } else { + err = errors.New("must specify a previous credentials provider to assume role") + return + } + } + + if builder.provider.roleArn == "" { + if roleArn := os.Getenv("ALIBABA_CLOUD_ROLE_ARN"); roleArn != "" { + builder.provider.roleArn = roleArn + } else { + err = errors.New("the RoleArn is empty") + return + } + } + + if builder.provider.roleSessionName == "" { + if roleSessionName := os.Getenv("ALIBABA_CLOUD_ROLE_SESSION_NAME"); roleSessionName != "" { + builder.provider.roleSessionName = roleSessionName + } else { + builder.provider.roleSessionName = "credentials-go-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + } + + // duration seconds + if builder.provider.durationSeconds == 0 { + // default to 3600 + builder.provider.durationSeconds = 3600 + } + + if builder.provider.durationSeconds < 900 { + err = errors.New("session duration should be in the range of 900s - max session duration") + return + } + + // sts endpoint + if builder.provider.stsEndpoint == "" { + if !builder.provider.enableVpc { + builder.provider.enableVpc = strings.ToLower(os.Getenv("ALIBABA_CLOUD_VPC_ENDPOINT_ENABLED")) == "true" + } + prefix := "sts" + if builder.provider.enableVpc { + prefix = "sts-vpc" + } + if builder.provider.stsRegionId != "" { + builder.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, builder.provider.stsRegionId) + } else if region := os.Getenv("ALIBABA_CLOUD_STS_REGION"); region != "" { + builder.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, region) + } else { + builder.provider.stsEndpoint = "sts.aliyuncs.com" + } + } + + provider = builder.provider + return +} + +func (provider *RAMRoleARNCredentialsProvider) getCredentials(cc *Credentials) (session *sessionCredentials, err error) { + method := "POST" + req := &httputil.Request{ + Method: method, + Protocol: "https", + Host: provider.stsEndpoint, + Headers: map[string]string{}, + } + + queries := make(map[string]string) + queries["Version"] = "2015-04-01" + queries["Action"] = "AssumeRole" + queries["Format"] = "JSON" + queries["Timestamp"] = utils.GetTimeInFormatISO8601() + queries["SignatureMethod"] = "HMAC-SHA1" + queries["SignatureVersion"] = "1.0" + queries["SignatureNonce"] = utils.GetNonce() + queries["AccessKeyId"] = cc.AccessKeyId + + if cc.SecurityToken != "" { + queries["SecurityToken"] = cc.SecurityToken + } + + bodyForm := make(map[string]string) + bodyForm["RoleArn"] = provider.roleArn + if provider.policy != "" { + bodyForm["Policy"] = provider.policy + } + if provider.externalId != "" { + bodyForm["ExternalId"] = provider.externalId + } + bodyForm["RoleSessionName"] = provider.roleSessionName + bodyForm["DurationSeconds"] = strconv.Itoa(provider.durationSeconds) + req.Form = bodyForm + + // caculate signature + signParams := make(map[string]string) + for key, value := range queries { + signParams[key] = value + } + for key, value := range bodyForm { + signParams[key] = value + } + + stringToSign := utils.GetURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = method + "&%2F&" + stringToSign + secret := cc.AccessKeySecret + "&" + queries["Signature"] = utils.ShaHmac1(stringToSign, secret) + + req.Queries = queries + + // set headers + req.Headers["Accept-Encoding"] = "identity" + req.Headers["Content-Type"] = "application/x-www-form-urlencoded" + req.Headers["x-acs-credentials-provider"] = cc.ProviderName + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + err = errors.New("refresh session token failed: " + string(res.Body)) + return + } + var data assumeRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("refresh RoleArn sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + if data.Credentials == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + if data.Credentials.AccessKeyId == nil || data.Credentials.AccessKeySecret == nil || data.Credentials.SecurityToken == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.Credentials.AccessKeyId, + AccessKeySecret: *data.Credentials.AccessKeySecret, + SecurityToken: *data.Credentials.SecurityToken, + Expiration: *data.Credentials.Expiration, + } + return +} + +func (provider *RAMRoleARNCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *RAMRoleARNCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + // 获取前置凭证 + previousCredentials, err1 := provider.credentialsProvider.GetCredentials() + if err1 != nil { + return nil, err1 + } + sessionCredentials, err2 := provider.getCredentials(previousCredentials) + if err2 != nil { + return nil, err2 + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err != nil { + return nil, err + } + + provider.expirationTimestamp = expirationTime.Unix() + provider.lastUpdateTimestamp = time.Now().Unix() + provider.previousProviderName = previousCredentials.ProviderName + provider.sessionCredentials = sessionCredentials + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), provider.previousProviderName), + } + return +} + +func (provider *RAMRoleARNCredentialsProvider) GetProviderName() string { + return "ram_role_arn" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go new file mode 100644 index 000000000..bd3660ccc --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go @@ -0,0 +1,67 @@ +package providers + +import ( + "errors" + "os" +) + +type StaticAKCredentialsProvider struct { + accessKeyId string + accessKeySecret string +} + +type StaticAKCredentialsProviderBuilder struct { + provider *StaticAKCredentialsProvider +} + +func NewStaticAKCredentialsProviderBuilder() *StaticAKCredentialsProviderBuilder { + return &StaticAKCredentialsProviderBuilder{ + provider: &StaticAKCredentialsProvider{}, + } +} + +func (builder *StaticAKCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *StaticAKCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *StaticAKCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *StaticAKCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *StaticAKCredentialsProviderBuilder) Build() (provider *StaticAKCredentialsProvider, err error) { + if builder.provider.accessKeyId == "" { + builder.provider.accessKeyId = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + } + + if builder.provider.accessKeyId == "" { + err = errors.New("the access key id is empty") + return + } + + if builder.provider.accessKeySecret == "" { + builder.provider.accessKeySecret = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + } + + if builder.provider.accessKeySecret == "" { + err = errors.New("the access key secret is empty") + return + } + + provider = builder.provider + return +} + +func (provider *StaticAKCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + cc = &Credentials{ + AccessKeyId: provider.accessKeyId, + AccessKeySecret: provider.accessKeySecret, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *StaticAKCredentialsProvider) GetProviderName() string { + return "static_ak" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go new file mode 100644 index 000000000..ad5715187 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go @@ -0,0 +1,83 @@ +package providers + +import ( + "errors" + "os" +) + +type StaticSTSCredentialsProvider struct { + accessKeyId string + accessKeySecret string + securityToken string +} + +type StaticSTSCredentialsProviderBuilder struct { + provider *StaticSTSCredentialsProvider +} + +func NewStaticSTSCredentialsProviderBuilder() *StaticSTSCredentialsProviderBuilder { + return &StaticSTSCredentialsProviderBuilder{ + provider: &StaticSTSCredentialsProvider{}, + } +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *StaticSTSCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *StaticSTSCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithSecurityToken(securityToken string) *StaticSTSCredentialsProviderBuilder { + builder.provider.securityToken = securityToken + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) Build() (provider *StaticSTSCredentialsProvider, err error) { + if builder.provider.accessKeyId == "" { + builder.provider.accessKeyId = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + } + + if builder.provider.accessKeyId == "" { + err = errors.New("the access key id is empty") + return + } + + if builder.provider.accessKeySecret == "" { + builder.provider.accessKeySecret = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + } + + if builder.provider.accessKeySecret == "" { + err = errors.New("the access key secret is empty") + return + } + + if builder.provider.securityToken == "" { + builder.provider.securityToken = os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + } + + if builder.provider.securityToken == "" { + err = errors.New("the security token is empty") + return + } + + provider = builder.provider + return +} + +func (provider *StaticSTSCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + cc = &Credentials{ + AccessKeyId: provider.accessKeyId, + AccessKeySecret: provider.accessKeySecret, + SecurityToken: provider.securityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *StaticSTSCredentialsProvider) GetProviderName() string { + return "static_sts" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go new file mode 100644 index 000000000..ccd877d16 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go @@ -0,0 +1,152 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type URLCredentialsProvider struct { + url string + // for sts + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions + // inner + expirationTimestamp int64 +} + +type URLCredentialsProviderBuilder struct { + provider *URLCredentialsProvider +} + +func NewURLCredentialsProviderBuilder() *URLCredentialsProviderBuilder { + return &URLCredentialsProviderBuilder{ + provider: &URLCredentialsProvider{}, + } +} + +func (builder *URLCredentialsProviderBuilder) WithUrl(url string) *URLCredentialsProviderBuilder { + builder.provider.url = url + return builder +} + +func (builder *URLCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *URLCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +func (builder *URLCredentialsProviderBuilder) Build() (provider *URLCredentialsProvider, err error) { + + if builder.provider.url == "" { + builder.provider.url = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + + if builder.provider.url == "" { + err = errors.New("the url is empty") + return + } + + provider = builder.provider + return +} + +type urlResponse struct { + AccessKeyId *string `json:"AccessKeyId"` + AccessKeySecret *string `json:"AccessKeySecret"` + SecurityToken *string `json:"SecurityToken"` + Expiration *string `json:"Expiration"` +} + +func (provider *URLCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + req := &httputil.Request{ + Method: "GET", + URL: provider.url, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("get credentials from %s failed: %s", req.BuildRequestURL(), string(res.Body)) + return + } + + var resp urlResponse + err = json.Unmarshal(res.Body, &resp) + if err != nil { + err = fmt.Errorf("get credentials from %s failed with error, json unmarshal fail: %s", req.BuildRequestURL(), err.Error()) + return + } + + if resp.AccessKeyId == nil || resp.AccessKeySecret == nil || resp.SecurityToken == nil || resp.Expiration == nil { + err = fmt.Errorf("refresh credentials from %s failed: %s", req.BuildRequestURL(), string(res.Body)) + return + } + + session = &sessionCredentials{ + AccessKeyId: *resp.AccessKeyId, + AccessKeySecret: *resp.AccessKeySecret, + SecurityToken: *resp.SecurityToken, + Expiration: *resp.Expiration, + } + return +} + +func (provider *URLCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *URLCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *URLCredentialsProvider) GetProviderName() string { + return "credential_uri" +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f18a1706a..8f2287c0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -112,6 +112,22 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid +# github.com/alibabacloud-go/debug v1.0.1 +## explicit; go 1.18 +github.com/alibabacloud-go/debug/debug +# github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3 +## explicit; go 1.18 +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport +# github.com/aliyun/credentials-go v1.4.7 +## explicit; go 1.14 +github.com/aliyun/credentials-go/credentials/internal/http +github.com/aliyun/credentials-go/credentials/internal/utils +github.com/aliyun/credentials-go/credentials/providers # github.com/aws/aws-sdk-go-v2 v1.33.0 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws From bd120997b104316cf11dfbe4cdf1f1dd6b854f02 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 10:38:27 +0700 Subject: [PATCH 02/21] feat: implement FileStat method for OSS storage --- pbm/storage/oss/oss.go | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 71a99f4a8..e8c57251a 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -1,11 +1,14 @@ package oss import ( + "context" "fmt" "io" + "path" "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" + "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" ) @@ -52,7 +55,27 @@ func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { } func (o *OSS) FileStat(name string) (storage.FileInfo, error) { - return storage.FileInfo{}, nil + inf := storage.FileInfo{} + + res, err := o.ossCli.HeadObject(context.Background(), &oss.HeadObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + }) + if err != nil { + var serr *oss.ServiceError + if errors.As(err, &serr) && serr.Code == "NoSuchKey" { + return inf, storage.ErrNotExist + } + return inf, errors.Wrap(err, "get OSS object header") + } + + inf.Name = name + inf.Size = res.ContentLength + if inf.Size == 0 { + return inf, storage.ErrEmpty + } + + return inf, nil } func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { From 6249fdd02bf9e97850b42df5f14d2e8999cf06d9 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 10:52:09 +0700 Subject: [PATCH 03/21] feat: implement Delete method for OSS storage --- pbm/storage/oss/oss.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index e8c57251a..74a1d63e1 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -54,6 +54,7 @@ func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { return nil, nil } +// FileStat returns file info. It returns error if file is empty or not exists. func (o *OSS) FileStat(name string) (storage.FileInfo, error) { inf := storage.FileInfo{} @@ -78,14 +79,28 @@ func (o *OSS) FileStat(name string) (storage.FileInfo, error) { return inf, nil } +// List scans path with prefix and returns all files with given suffix. +// Both prefix and suffix can be omitted. func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { return nil, nil } +// Delete deletes given file. +// It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { + key := path.Join(o.cfg.Prefix, name) + path.Join(o.cfg.Prefix, name) + _, err := o.ossCli.DeleteObject(context.Background(), &oss.DeleteObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + }) + if err != nil { + return errors.Wrapf(err, "delete %s/%s file from OSS", o.cfg.Bucket, key) + } return nil } +// Copy makes a copy of the src objec/file under dst name func (o *OSS) Copy(src, dst string) error { return nil } From 3a23edda8825a6a40165c70e0a6268a3cfcccf13 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 10:52:24 +0700 Subject: [PATCH 04/21] feat: add support for OSS storage type in ParseType function --- pbm/storage/storage.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 117f0c51f..335f412ac 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -65,6 +65,8 @@ func ParseType(s string) Type { return Blackhole case string(GCS): return GCS + case string(OSS): + return OSS default: return Undefined } From 2261920de78e8e92148981e7c20d9d2c651f9737 Mon Sep 17 00:00:00 2001 From: heryheming Date: Fri, 15 Aug 2025 17:15:46 +0700 Subject: [PATCH 05/21] feat: add function List --- pbm/storage/oss/oss.go | 46 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 74a1d63e1..fd4f7c194 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "path" + "strings" "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" @@ -81,10 +82,53 @@ func (o *OSS) FileStat(name string) (storage.FileInfo, error) { // List scans path with prefix and returns all files with given suffix. // Both prefix and suffix can be omitted. + func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { - return nil, nil + + prfx := path.Join(o.cfg.Prefix, prefix) + if prfx != "" && !strings.HasSuffix(prfx, "/") { + prfx += "/" + } + + var files []storage.FileInfo + var marker *string + for { + res, err := o.ossCli.ListObjects(context.Background(), &oss.ListObjectsRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Prefix: oss.Ptr(prfx), + Marker: marker, + }) + if err != nil { + return nil, errors.Wrap(err, "list OSS objects") + } + for _, obj := range res.Contents { + var key string + if obj.Key != nil { + key = *obj.Key + } + if suffix != "" && !strings.HasSuffix(key, suffix) { + continue + } + if key == "" || strings.HasSuffix(key, "/") { + continue + } + name := strings.TrimPrefix(key, o.cfg.Prefix) + name = strings.TrimPrefix(name, "/") + files = append(files, storage.FileInfo{ + Name: name, + Size: obj.Size, + }) + } + if res.IsTruncated { + marker = res.NextMarker + continue + } + break + } + return files, nil } + // Delete deletes given file. // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { From 7301e2c5bbcccacea64f5b9ebdaed91ff38edd93 Mon Sep 17 00:00:00 2001 From: heryheming Date: Mon, 18 Aug 2025 15:45:53 +0700 Subject: [PATCH 06/21] fix: Update configureClient --- pbm/storage/oss/client.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 2da86d864..02b2bb65d 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -128,8 +128,14 @@ func (c *cred) GetCredentials(ctx context.Context) (osscred.Credentials, error) } func configureClient(config *Config) (*oss.Client, error) { - if config.Region == "" { - return nil, fmt.Errorf("oss region is required") + if config == nil { + return nil, fmt.Errorf("config is nil") + } + if config.Retryer == nil { + config.Retryer = &Retryer{MaxAttempts: 3, MaxBackoff: defaultRetryerMaxBackoff, BaseDelay: defaultRetryBaseDelay} + } + if config.Region == "" || config.Bucket == "" || config.Credentials.AccessKeyID == "" || config.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("Missing required OSS config: %+v", config) } cred, err := newCred(config) From 296a2d3e6d14b2201ae9ea2d0d785f0e5d479da9 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 18 Aug 2025 17:46:10 +0700 Subject: [PATCH 07/21] feat: implement the remaining storage function for oss Signed-off-by: Imre Nagi --- pbm/storage/oss/client.go | 17 +++-- pbm/storage/oss/oss.go | 155 ++++++++++++++++++++++++++++++++------ 2 files changed, 143 insertions(+), 29 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 02b2bb65d..ee2bed042 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -7,7 +7,6 @@ import ( "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" osscred "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" - "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" "github.com/aliyun/credentials-go/credentials/providers" ) @@ -61,11 +60,15 @@ func (cfg *Config) Cast() error { cfg.Retryer.MaxBackoff = defaultRetryerMaxBackoff } } + if cfg.MaxUploadParts <= 0 { + cfg.MaxUploadParts = maxPart + } return nil } const ( defaultSessionExpiration = 3600 + maxPart = int32(10000) ) func newCred(config *Config) (*cred, error) { @@ -147,12 +150,12 @@ func configureClient(config *Config) (*oss.Client, error) { WithRegion(config.Region). WithCredentialsProvider(cred). WithSignatureVersion(oss.SignatureVersionV4). - WithRetryMaxAttempts(config.Retryer.MaxAttempts). - WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { - ro.MaxAttempts = config.Retryer.MaxAttempts - ro.MaxBackoff = config.Retryer.MaxBackoff - ro.BaseDelay = config.Retryer.BaseDelay - })). + // WithRetryMaxAttempts(config.Retryer.MaxAttempts). + // WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { + // ro.MaxAttempts = config.Retryer.MaxAttempts + // ro.MaxBackoff = config.Retryer.MaxBackoff + // ro.BaseDelay = config.Retryer.BaseDelay + // })). WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) if config.EndpointURL != "" { diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index fd4f7c194..010bdffd9 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -1,6 +1,7 @@ package oss import ( + "bytes" "context" "fmt" "io" @@ -48,11 +49,114 @@ func (o *OSS) Type() storage.Type { } func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error { - return nil + opts := storage.GetDefaultOpts() + for _, opt := range options { + if err := opt(opts); err != nil { + return errors.Wrap(err, "processing options for save") + } + } + + // TODO move it somewhere + defaultPartSize := int64(10 * 1024 * 1024) // 10MB + minPartSize := int64(5 * 1024 * 1024) // 5MB + + partSize := storage.ComputePartSize( + opts.Size, + defaultPartSize, + minPartSize, + int64(o.cfg.MaxUploadParts), + int64(o.cfg.UploadPartSize), + ) + + if o.log != nil && opts.UseLogger { + o.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + opts.Size, + storage.PrettySize(opts.Size), + partSize, + storage.PrettySize(partSize)) + } + + key := path.Join(o.cfg.Prefix, name) + + // Use multipart upload + initResult, err := o.ossCli.InitiateMultipartUpload(context.Background(), &oss.InitiateMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + }) + if err != nil { + return errors.Wrap(err, "initiate multipart upload") + } + uploadID := initResult.UploadId + + var completeParts []oss.UploadPart + partNumber := int32(1) + buf := make([]byte, partSize) + + for { + n, err := data.Read(buf) + if n > 0 { + uploadPartResult, uerr := o.ossCli.UploadPart(context.Background(), &oss.UploadPartRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + PartNumber: partNumber, + Body: bytes.NewReader(buf[:n]), + }) + if uerr != nil { + _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + }) + return errors.Wrap(uerr, "upload part") + } + completeParts = append(completeParts, oss.UploadPart{ + ETag: uploadPartResult.ETag, + PartNumber: partNumber, + }) + partNumber++ + } + + if err == io.EOF { + break + } + if err != nil { + _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + }) + return errors.Wrap(err, "read chunk") + } + } + + _, err = o.ossCli.CompleteMultipartUpload(context.Background(), &oss.CompleteMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + CompleteMultipartUpload: &oss.CompleteMultipartUpload{ + Parts: completeParts, + }, + }) + + return errors.Wrap(err, "complete multipart upload") } func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { - return nil, nil + res, err := o.ossCli.GetObject(context.Background(), &oss.GetObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + }) + if err != nil { + var serr *oss.ServiceError + if errors.As(err, &serr) && serr.Code == "NoSuchKey" { + return nil, storage.ErrNotExist + } + return nil, errors.Wrap(err, "get object") + } + + return res.Body, nil } // FileStat returns file info. It returns error if file is empty or not exists. @@ -82,45 +186,46 @@ func (o *OSS) FileStat(name string) (storage.FileInfo, error) { // List scans path with prefix and returns all files with given suffix. // Both prefix and suffix can be omitted. - func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { - prfx := path.Join(o.cfg.Prefix, prefix) if prfx != "" && !strings.HasSuffix(prfx, "/") { prfx += "/" } var files []storage.FileInfo - var marker *string + var continuationToken *string for { - res, err := o.ossCli.ListObjects(context.Background(), &oss.ListObjectsRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Prefix: oss.Ptr(prfx), - Marker: marker, + res, err := o.ossCli.ListObjectsV2(context.Background(), &oss.ListObjectsV2Request{ + Bucket: oss.Ptr(o.cfg.Bucket), + Prefix: oss.Ptr(prfx), + ContinuationToken: continuationToken, }) if err != nil { return nil, errors.Wrap(err, "list OSS objects") } for _, obj := range res.Contents { - var key string + key := "" if obj.Key != nil { key = *obj.Key } - if suffix != "" && !strings.HasSuffix(key, suffix) { + + f := strings.TrimPrefix(key, prfx) + if len(f) == 0 { continue } - if key == "" || strings.HasSuffix(key, "/") { - continue + if f[0] == '/' { + f = f[1:] + } + + if strings.HasSuffix(f, suffix) { + files = append(files, storage.FileInfo{ + Name: f, + Size: obj.Size, + }) } - name := strings.TrimPrefix(key, o.cfg.Prefix) - name = strings.TrimPrefix(name, "/") - files = append(files, storage.FileInfo{ - Name: name, - Size: obj.Size, - }) } if res.IsTruncated { - marker = res.NextMarker + continuationToken = res.NextContinuationToken continue } break @@ -128,7 +233,6 @@ func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { return files, nil } - // Delete deletes given file. // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { @@ -146,5 +250,12 @@ func (o *OSS) Delete(name string) error { // Copy makes a copy of the src objec/file under dst name func (o *OSS) Copy(src, dst string) error { - return nil + _, err := o.ossCli.CopyObject(context.Background(), &oss.CopyObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, dst)), + SourceBucket: oss.Ptr(o.cfg.Bucket), + SourceKey: oss.Ptr(path.Join(o.cfg.Prefix, src)), + }) + + return errors.Wrap(err, "copy object") } From 5c9ad8167f57491f12702aef26343ad29a783eae Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 18 Aug 2025 20:35:53 +0700 Subject: [PATCH 08/21] refactor(PBM-1588): move default part size constants to client.go --- pbm/storage/oss/client.go | 45 +++++++++++++++++++++++---------------- pbm/storage/oss/oss.go | 4 ---- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index ee2bed042..dfed28da9 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -7,15 +7,20 @@ import ( "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" osscred "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" "github.com/aliyun/credentials-go/credentials/providers" ) const ( defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb + minPartSize int64 = 5 * 1024 * 1024 // 5MB defaultS3Region = "ap-southeast-5" + maxPart int32 = 10000 - defaultRetryBaseDelay = 30 * time.Millisecond - defaultRetryerMaxBackoff = 300 * time.Second + defaultRetryMaxAttempts = 5 + defaultRetryBaseDelay = 30 * time.Millisecond + defaultRetryerMaxBackoff = 300 * time.Second + defaultSessionDurationSeconds = 3600 ) //nolint:lll @@ -53,12 +58,21 @@ func (cfg *Config) Cast() error { cfg.Region = defaultS3Region } if cfg.Retryer != nil { + if cfg.Retryer.MaxAttempts == 0 { + cfg.Retryer.MaxAttempts = defaultRetryMaxAttempts + } if cfg.Retryer.BaseDelay == 0 { cfg.Retryer.BaseDelay = defaultRetryBaseDelay } if cfg.Retryer.MaxBackoff == 0 { cfg.Retryer.MaxBackoff = defaultRetryerMaxBackoff } + } else { + cfg.Retryer = &Retryer{ + MaxAttempts: defaultRetryMaxAttempts, + MaxBackoff: defaultRetryerMaxBackoff, + BaseDelay: defaultRetryBaseDelay, + } } if cfg.MaxUploadParts <= 0 { cfg.MaxUploadParts = maxPart @@ -66,11 +80,6 @@ func (cfg *Config) Cast() error { return nil } -const ( - defaultSessionExpiration = 3600 - maxPart = int32(10000) -) - func newCred(config *Config) (*cred, error) { var credentialsProvider providers.CredentialsProvider var err error @@ -101,7 +110,7 @@ func newCred(config *Config) (*cred, error) { WithCredentialsProvider(internalProvider). WithRoleArn(config.Credentials.RoleARN). WithRoleSessionName(config.Credentials.SessionName). - WithDurationSeconds(defaultSessionExpiration). + WithDurationSeconds(defaultSessionDurationSeconds). Build() if err != nil { return nil, fmt.Errorf("ram role credential provider: %w", err) @@ -134,10 +143,10 @@ func configureClient(config *Config) (*oss.Client, error) { if config == nil { return nil, fmt.Errorf("config is nil") } - if config.Retryer == nil { - config.Retryer = &Retryer{MaxAttempts: 3, MaxBackoff: defaultRetryerMaxBackoff, BaseDelay: defaultRetryBaseDelay} - } - if config.Region == "" || config.Bucket == "" || config.Credentials.AccessKeyID == "" || config.Credentials.AccessKeySecret == "" { + + if config.Region == "" || config.Bucket == "" || + config.Credentials.AccessKeyID == "" || + config.Credentials.AccessKeySecret == "" { return nil, fmt.Errorf("Missing required OSS config: %+v", config) } @@ -150,12 +159,12 @@ func configureClient(config *Config) (*oss.Client, error) { WithRegion(config.Region). WithCredentialsProvider(cred). WithSignatureVersion(oss.SignatureVersionV4). - // WithRetryMaxAttempts(config.Retryer.MaxAttempts). - // WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { - // ro.MaxAttempts = config.Retryer.MaxAttempts - // ro.MaxBackoff = config.Retryer.MaxBackoff - // ro.BaseDelay = config.Retryer.BaseDelay - // })). + WithRetryMaxAttempts(config.Retryer.MaxAttempts). + WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { + ro.MaxAttempts = config.Retryer.MaxAttempts + ro.MaxBackoff = config.Retryer.MaxBackoff + ro.BaseDelay = config.Retryer.BaseDelay + })). WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) if config.EndpointURL != "" { diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 010bdffd9..bd074a915 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -56,10 +56,6 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error } } - // TODO move it somewhere - defaultPartSize := int64(10 * 1024 * 1024) // 10MB - minPartSize := int64(5 * 1024 * 1024) // 5MB - partSize := storage.ComputePartSize( opts.Size, defaultPartSize, From 3b299a25d575a300acfb7d0e74bffda7a9c5f266 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 09:04:45 +0700 Subject: [PATCH 09/21] fix: add missing cast for oss Signed-off-by: Imre Nagi --- pbm/config/config.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index b4027d72a..64644337c 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -308,6 +308,8 @@ func (s *StorageConf) Cast() error { return s.Filesystem.Cast() case storage.S3: return s.S3.Cast() + case storage.OSS: + return s.OSS.Cast() case storage.GCS: return nil case storage.Azure: // noop From 710853e61e056035c549cd40aefb8293d8af1361 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 09:57:28 +0700 Subject: [PATCH 10/21] feat: add OSS credential masking and clone method for Config --- pbm/config/config.go | 13 +++++++++++++ pbm/storage/oss/client.go | 9 +++++++++ 2 files changed, 22 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 64644337c..02849bc11 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -161,6 +161,17 @@ func (c *Config) String() string { c.Storage.GCS.Credentials.HMACSecret = "***" } } + if c.Storage.OSS != nil { + if c.Storage.OSS.Credentials.AccessKeyID != "" { + c.Storage.OSS.Credentials.AccessKeyID = "***" + } + if c.Storage.OSS.Credentials.AccessKeySecret != "" { + c.Storage.OSS.Credentials.AccessKeySecret = "***" + } + if c.Storage.OSS.Credentials.SecurityToken != "" { + c.Storage.OSS.Credentials.SecurityToken = "***" + } + } b, err := yaml.Marshal(c) if err != nil { @@ -249,6 +260,8 @@ func (s *StorageConf) Clone() *StorageConf { rv.Azure = s.Azure.Clone() case storage.GCS: rv.GCS = s.GCS.Clone() + case storage.OSS: + rv.OSS = s.OSS.Clone() case storage.Blackhole: // no config } diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index dfed28da9..ff3b56e7a 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -80,6 +80,15 @@ func (cfg *Config) Cast() error { return nil } +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + rv := *cfg + return &rv +} + func newCred(config *Config) (*cred, error) { var credentialsProvider providers.CredentialsProvider var err error From 4b178275f8142a829dad18b49103c2aa19e65b58 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 10:02:59 +0700 Subject: [PATCH 11/21] fix: update ConnectTimeout type to time.Duration and set default value --- pbm/storage/oss/client.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index ff3b56e7a..f25229767 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -21,6 +21,7 @@ const ( defaultRetryBaseDelay = 30 * time.Millisecond defaultRetryerMaxBackoff = 300 * time.Second defaultSessionDurationSeconds = 3600 + defaultConnectTimeout = 5 * time.Second ) //nolint:lll @@ -34,9 +35,9 @@ type Config struct { Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` - ConnectTimeout int `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` - UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` - MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` + ConnectTimeout time.Duration `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` + UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` + MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` } type Retryer struct { @@ -57,6 +58,9 @@ func (cfg *Config) Cast() error { if cfg.Region == "" { cfg.Region = defaultS3Region } + if cfg.ConnectTimeout == 0 { + cfg.ConnectTimeout = defaultConnectTimeout + } if cfg.Retryer != nil { if cfg.Retryer.MaxAttempts == 0 { cfg.Retryer.MaxAttempts = defaultRetryMaxAttempts @@ -174,7 +178,7 @@ func configureClient(config *Config) (*oss.Client, error) { ro.MaxBackoff = config.Retryer.MaxBackoff ro.BaseDelay = config.Retryer.BaseDelay })). - WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) + WithConnectTimeout(config.ConnectTimeout) if config.EndpointURL != "" { ossConfig = ossConfig.WithEndpoint(config.EndpointURL) From 16ca10f4d8c679d31d8000f9d29b18adcded3b21 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 15:43:49 +0700 Subject: [PATCH 12/21] fix: use multipart upload and copy for upload Signed-off-by: Imre Nagi --- pbm/storage/oss/oss.go | 97 +++++++++--------------------------------- 1 file changed, 20 insertions(+), 77 deletions(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index bd074a915..824447847 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -1,7 +1,6 @@ package oss import ( - "bytes" "context" "fmt" "io" @@ -56,87 +55,31 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error } } + if opts.Size > 0 { + o.log.Debug("uploading %s with size %d", name, opts.Size) + } else { + o.log.Debug("uploading %s", name) + } + partSize := storage.ComputePartSize( opts.Size, - defaultPartSize, - minPartSize, - int64(o.cfg.MaxUploadParts), - int64(o.cfg.UploadPartSize), + oss.DefaultPartSize, + oss.MinPartSize, + int64(oss.MaxUploadParts), + int64(oss.DefaultUploadPartSize), ) - if o.log != nil && opts.UseLogger { - o.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", - name, - opts.Size, - storage.PrettySize(opts.Size), - partSize, - storage.PrettySize(partSize)) - } - - key := path.Join(o.cfg.Prefix, name) - - // Use multipart upload - initResult, err := o.ossCli.InitiateMultipartUpload(context.Background(), &oss.InitiateMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), + uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { + uo.PartSize = partSize + uo.LeavePartsOnError = true }) - if err != nil { - return errors.Wrap(err, "initiate multipart upload") - } - uploadID := initResult.UploadId - var completeParts []oss.UploadPart - partNumber := int32(1) - buf := make([]byte, partSize) - - for { - n, err := data.Read(buf) - if n > 0 { - uploadPartResult, uerr := o.ossCli.UploadPart(context.Background(), &oss.UploadPartRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - PartNumber: partNumber, - Body: bytes.NewReader(buf[:n]), - }) - if uerr != nil { - _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - }) - return errors.Wrap(uerr, "upload part") - } - completeParts = append(completeParts, oss.UploadPart{ - ETag: uploadPartResult.ETag, - PartNumber: partNumber, - }) - partNumber++ - } - - if err == io.EOF { - break - } - if err != nil { - _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - }) - return errors.Wrap(err, "read chunk") - } - } - - _, err = o.ossCli.CompleteMultipartUpload(context.Background(), &oss.CompleteMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - CompleteMultipartUpload: &oss.CompleteMultipartUpload{ - Parts: completeParts, - }, - }) + _, err := uploader.UploadFrom(context.Background(), &oss.PutObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + }, data) - return errors.Wrap(err, "complete multipart upload") + return errors.Wrap(err, "put object") } func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { @@ -246,12 +189,12 @@ func (o *OSS) Delete(name string) error { // Copy makes a copy of the src objec/file under dst name func (o *OSS) Copy(src, dst string) error { - _, err := o.ossCli.CopyObject(context.Background(), &oss.CopyObjectRequest{ + uploader := oss.NewCopier(o.ossCli) + _, err := uploader.Copy(context.Background(), &oss.CopyObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, dst)), SourceBucket: oss.Ptr(o.cfg.Bucket), SourceKey: oss.Ptr(path.Join(o.cfg.Prefix, src)), }) - return errors.Wrap(err, "copy object") } From b06b1236f77181ebac2c7cbe13ebae6ab37a1076 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 16:24:25 +0700 Subject: [PATCH 13/21] fix: update part size parameters in OSS Save method --- pbm/storage/oss/oss.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 824447847..144f99b1d 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -63,15 +63,14 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error partSize := storage.ComputePartSize( opts.Size, - oss.DefaultPartSize, + defaultPartSize, oss.MinPartSize, - int64(oss.MaxUploadParts), - int64(oss.DefaultUploadPartSize), + int64(o.cfg.MaxUploadParts), + int64(o.cfg.UploadPartSize), ) uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { uo.PartSize = partSize - uo.LeavePartsOnError = true }) _, err := uploader.UploadFrom(context.Background(), &oss.PutObjectRequest{ From 723d4b9d0d265d4ada987c52988a9e8f48bf589a Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 18:53:07 +0700 Subject: [PATCH 14/21] chore: add unit tests for ComputePartSize function --- pbm/storage/storage_test.go | 99 +++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 pbm/storage/storage_test.go diff --git a/pbm/storage/storage_test.go b/pbm/storage/storage_test.go new file mode 100644 index 000000000..74809190c --- /dev/null +++ b/pbm/storage/storage_test.go @@ -0,0 +1,99 @@ +package storage_test + +import ( + "testing" + + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +func TestComputePartSize(t *testing.T) { + const ( + _ = iota + KB = 1 << (10 * iota) + MB + GB + ) + + const ( + defaultSize = 10 * MB + minSize = 5 * MB + maxParts = 10000 + ) + + tests := []struct { + name string + fileSize int64 + userSize int64 + want int64 + }{ + { + name: "default", + fileSize: 0, + userSize: 0, + want: defaultSize, + }, + { + name: "user size provided", + fileSize: 0, + userSize: 20 * MB, + want: 20 * MB, + }, + { + name: "user size less than min", + fileSize: 0, + userSize: 4 * MB, + want: minSize, + }, + { + name: "file size requires larger part size", + fileSize: 100 * GB, + userSize: 0, + want: 100 * GB / maxParts * 15 / 10, + }, + { + name: "file size requires larger part size than user size", + fileSize: 100 * GB, + userSize: 10 * MB, + want: 100 * GB / maxParts * 15 / 10, + }, + { + name: "file size does not require larger part size", + fileSize: 50 * GB, + userSize: 0, + want: defaultSize, + }, + { + name: "file size with user size", + fileSize: 50 * GB, + userSize: 12 * MB, + want: 12 * MB, + }, + { + name: "zero file size", + fileSize: 0, + userSize: 0, + want: defaultSize, + }, + { + name: "zero user size", + fileSize: 100 * GB, + userSize: 0, + want: 100 * GB / maxParts * 15 / 10, + }, + { + name: "negative user size", + fileSize: 0, + userSize: -1, + want: defaultSize, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := storage.ComputePartSize(tt.fileSize, defaultSize, minSize, maxParts, tt.userSize) + if got != tt.want { + t.Errorf("ComputePartSize() = %v, want %v", got, tt.want) + } + }) + } +} From 15477fff6de6f3d4c646d8a4bd7d32051f804bec Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Wed, 20 Aug 2025 05:54:16 +0700 Subject: [PATCH 15/21] fix: remove redundant path.Join call in Delete method and correct comment in Copy method --- pbm/storage/oss/oss.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 144f99b1d..636f935c3 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -175,7 +175,6 @@ func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { key := path.Join(o.cfg.Prefix, name) - path.Join(o.cfg.Prefix, name) _, err := o.ossCli.DeleteObject(context.Background(), &oss.DeleteObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(key), @@ -186,7 +185,7 @@ func (o *OSS) Delete(name string) error { return nil } -// Copy makes a copy of the src objec/file under dst name +// Copy makes a copy of the src object/file under dst name func (o *OSS) Copy(src, dst string) error { uploader := oss.NewCopier(o.ossCli) _, err := uploader.Copy(context.Background(), &oss.CopyObjectRequest{ From 6acd6a3573035384be70979ae62f9e13a43221a4 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Thu, 28 Aug 2025 06:18:30 +0700 Subject: [PATCH 16/21] feat: add server-side encryption support and update upload part size handling --- pbm/storage/oss/client.go | 15 ++++++++-- pbm/storage/oss/oss.go | 60 ++++++++++++++++++++++++++++++++------- 2 files changed, 61 insertions(+), 14 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index f25229767..27cd41b47 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -12,8 +12,6 @@ import ( ) const ( - defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb - minPartSize int64 = 5 * 1024 * 1024 // 5MB defaultS3Region = "ap-southeast-5" maxPart int32 = 10000 @@ -36,8 +34,16 @@ type Config struct { Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` ConnectTimeout time.Duration `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` - UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` + UploadPartSize int64 `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` + + ServerSideEncryption *SSE `bson:"serverSideEncryption,omitempty" json:"serverSideEncryption,omitempty" yaml:"serverSideEncryption,omitempty"` +} + +type SSE struct { + EncryptionMethod string `bson:"encryptionMethod,omitempty" json:"encryptionMethod,omitempty" yaml:"encryptionMethod,omitempty"` + EncryptionAlgorithm string `bson:"encryptionAlgorithm,omitempty" json:"encryptionAlgorithm,omitempty" yaml:"encryptionAlgorithm,omitempty"` + EncryptionKeyID string `bson:"encryptionKeyId,omitempty" json:"encryptionKeyId,omitempty" yaml:"encryptionKeyId,omitempty"` } type Retryer struct { @@ -81,6 +87,9 @@ func (cfg *Config) Cast() error { if cfg.MaxUploadParts <= 0 { cfg.MaxUploadParts = maxPart } + if cfg.UploadPartSize <= 0 { + cfg.UploadPartSize = oss.DefaultUploadPartSize + } return nil } diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 636f935c3..5589a8469 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -16,6 +16,12 @@ import ( var _ storage.Storage = &OSS{} +const ( + ServerSideEncryptionAes256 = "AES256" + ServerSideEncryptionKMS = "KMS" + ServerSideEncryptionSM4 = "SM4" +) + func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { if err := cfg.Cast(); err != nil { return nil, fmt.Errorf("cast config: %w", err) @@ -61,9 +67,28 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error o.log.Debug("uploading %s", name) } + req := &oss.PutObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + } + + if o.cfg.ServerSideEncryption != nil { + sse := o.cfg.ServerSideEncryption + switch sse.EncryptionMethod { + case ServerSideEncryptionSM4: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionSM4) + case ServerSideEncryptionKMS: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionKMS) + req.ServerSideDataEncryption = oss.Ptr(sse.EncryptionAlgorithm) + req.ServerSideEncryptionKeyId = oss.Ptr(sse.EncryptionKeyID) + default: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionAes256) + } + } + partSize := storage.ComputePartSize( opts.Size, - defaultPartSize, + o.cfg.UploadPartSize, oss.MinPartSize, int64(o.cfg.MaxUploadParts), int64(o.cfg.UploadPartSize), @@ -72,11 +97,7 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { uo.PartSize = partSize }) - - _, err := uploader.UploadFrom(context.Background(), &oss.PutObjectRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), - }, data) + _, err := uploader.UploadFrom(context.Background(), req, data) return errors.Wrap(err, "put object") } @@ -101,10 +122,12 @@ func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { func (o *OSS) FileStat(name string) (storage.FileInfo, error) { inf := storage.FileInfo{} - res, err := o.ossCli.HeadObject(context.Background(), &oss.HeadObjectRequest{ + req := &oss.HeadObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), - }) + } + + res, err := o.ossCli.HeadObject(context.Background(), req) if err != nil { var serr *oss.ServiceError if errors.As(err, &serr) && serr.Code == "NoSuchKey" { @@ -187,12 +210,27 @@ func (o *OSS) Delete(name string) error { // Copy makes a copy of the src object/file under dst name func (o *OSS) Copy(src, dst string) error { - uploader := oss.NewCopier(o.ossCli) - _, err := uploader.Copy(context.Background(), &oss.CopyObjectRequest{ + req := &oss.CopyObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, dst)), SourceBucket: oss.Ptr(o.cfg.Bucket), SourceKey: oss.Ptr(path.Join(o.cfg.Prefix, src)), - }) + } + + if o.cfg.ServerSideEncryption != nil { + sse := o.cfg.ServerSideEncryption + switch sse.EncryptionMethod { + case ServerSideEncryptionSM4: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionSM4) + case ServerSideEncryptionKMS: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionKMS) + req.ServerSideDataEncryption = oss.Ptr(sse.EncryptionAlgorithm) + req.ServerSideEncryptionKeyId = oss.Ptr(sse.EncryptionKeyID) + default: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionAes256) + } + } + copier := oss.NewCopier(o.ossCli, func(co *oss.CopierOptions) {}) + _, err := copier.Copy(context.Background(), req) return errors.Wrap(err, "copy object") } From b7f5927c561d40af93a15a19fbfbea9971ebc889 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Wed, 24 Sep 2025 10:26:36 +0700 Subject: [PATCH 17/21] fix: implement missing storage method --- pbm/storage/oss/oss.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 5589a8469..8d3169b70 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -234,3 +234,7 @@ func (o *OSS) Copy(src, dst string) error { _, err := copier.Copy(context.Background(), req) return errors.Wrap(err, "copy object") } + +func (o *OSS) DownloadStat() storage.DownloadStat { + return storage.DownloadStat{} +} From 311ba918c6c6f2d96daed02b32951c2867898413 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Wed, 24 Sep 2025 14:36:43 +0700 Subject: [PATCH 18/21] feat: use NewSplitMergeMW Signed-off-by: Imre Nagi --- pbm/storage/oss/client.go | 17 +++++++++++++++++ pbm/storage/oss/oss.go | 6 +++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 27cd41b47..dc8f1abab 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -20,6 +20,7 @@ const ( defaultRetryerMaxBackoff = 300 * time.Second defaultSessionDurationSeconds = 3600 defaultConnectTimeout = 5 * time.Second + defaultMaxObjSizeGB = 48800 // 48.8 TB ) //nolint:lll @@ -36,6 +37,7 @@ type Config struct { ConnectTimeout time.Duration `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` UploadPartSize int64 `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` + MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` ServerSideEncryption *SSE `bson:"serverSideEncryption,omitempty" json:"serverSideEncryption,omitempty" yaml:"serverSideEncryption,omitempty"` } @@ -99,9 +101,24 @@ func (cfg *Config) Clone() *Config { } rv := *cfg + if cfg.MaxObjSizeGB != nil { + v := *cfg.MaxObjSizeGB + rv.MaxObjSizeGB = &v + } + if cfg.Retryer != nil { + v := *cfg.Retryer + rv.Retryer = &v + } return &rv } +func (cfg *Config) GetMaxObjSizeGB() float64 { + if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { + return *cfg.MaxObjSizeGB + } + return defaultMaxObjSizeGB +} + func newCred(config *Config) (*cred, error) { var credentialsProvider providers.CredentialsProvider var err error diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 8d3169b70..0101a6ec8 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -22,7 +22,7 @@ const ( ServerSideEncryptionSM4 = "SM4" ) -func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { +func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { if err := cfg.Cast(); err != nil { return nil, fmt.Errorf("cast config: %w", err) } @@ -38,8 +38,8 @@ func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { log: l, ossCli: client, } - - return o, nil + + return storage.NewSplitMergeMW(o, cfg.GetMaxObjSizeGB()), nil } type OSS struct { From 6788bf5f3f7a85745d48cb2bf9b6fb5d9fc75b49 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 29 Sep 2025 09:46:48 +0700 Subject: [PATCH 19/21] fix: resolve review comments --- pbm/storage/oss/client.go | 8 ++++++-- pbm/storage/oss/oss.go | 19 +++++++++++-------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index dc8f1abab..3f6fc6b6a 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -12,7 +12,7 @@ import ( ) const ( - defaultS3Region = "ap-southeast-5" + defaultS3Region = "us-east-1" maxPart int32 = 10000 defaultRetryMaxAttempts = 5 @@ -20,7 +20,7 @@ const ( defaultRetryerMaxBackoff = 300 * time.Second defaultSessionDurationSeconds = 3600 defaultConnectTimeout = 5 * time.Second - defaultMaxObjSizeGB = 48800 // 48.8 TB + defaultMaxObjSizeGB = 48700 // 48.8 TB ) //nolint:lll @@ -109,6 +109,10 @@ func (cfg *Config) Clone() *Config { v := *cfg.Retryer rv.Retryer = &v } + if cfg.ServerSideEncryption != nil { + a := *cfg.ServerSideEncryption + rv.ServerSideEncryption = &a + } return &rv } diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 0101a6ec8..590dbe4f0 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -38,7 +38,7 @@ func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { log: l, ossCli: client, } - + return storage.NewSplitMergeMW(o, cfg.GetMaxObjSizeGB()), nil } @@ -61,12 +61,6 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error } } - if opts.Size > 0 { - o.log.Debug("uploading %s with size %d", name, opts.Size) - } else { - o.log.Debug("uploading %s", name) - } - req := &oss.PutObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), @@ -88,12 +82,21 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error partSize := storage.ComputePartSize( opts.Size, - o.cfg.UploadPartSize, + oss.DefaultUploadPartSize, oss.MinPartSize, int64(o.cfg.MaxUploadParts), int64(o.cfg.UploadPartSize), ) + if o.log != nil && opts.UseLogger { + o.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + opts.Size, + storage.PrettySize(opts.Size), + partSize, + storage.PrettySize(partSize)) + } + uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { uo.PartSize = partSize }) From 8667b86b9974cdf19633272e10002bd54c140bc1 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 29 Sep 2025 10:08:26 +0700 Subject: [PATCH 20/21] fix: call FileStat before deleting file --- pbm/storage/oss/oss.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 590dbe4f0..fe1a88bf5 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -200,6 +200,10 @@ func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { // Delete deletes given file. // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { + if _, err := o.FileStat(name); err == storage.ErrNotExist { + return err + } + key := path.Join(o.cfg.Prefix, name) _, err := o.ossCli.DeleteObject(context.Background(), &oss.DeleteObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), From fb43864ab8ead69695e7f7be6b791ea07b076898 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 29 Sep 2025 10:09:40 +0700 Subject: [PATCH 21/21] fix: change incorrect default var name --- pbm/storage/oss/client.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 3f6fc6b6a..7b897edd5 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -12,8 +12,8 @@ import ( ) const ( - defaultS3Region = "us-east-1" - maxPart int32 = 10000 + defaultOSSRegion = "us-east-1" + maxPart int32 = 10000 defaultRetryMaxAttempts = 5 defaultRetryBaseDelay = 30 * time.Millisecond @@ -64,7 +64,7 @@ type Credentials struct { func (cfg *Config) Cast() error { if cfg.Region == "" { - cfg.Region = defaultS3Region + cfg.Region = defaultOSSRegion } if cfg.ConnectTimeout == 0 { cfg.ConnectTimeout = defaultConnectTimeout