diff --git a/tensorstore/kvstore/BUILD b/tensorstore/kvstore/BUILD index c878dee8c..35b35cc77 100644 --- a/tensorstore/kvstore/BUILD +++ b/tensorstore/kvstore/BUILD @@ -14,6 +14,7 @@ DRIVER_DOCS = [ "neuroglancer_uint64_sharded", "ocdbt", "s3", + # "s3_sdk", "tsgrpc", "zarr3_sharding_indexed", "zip", diff --git a/tensorstore/kvstore/s3/BUILD b/tensorstore/kvstore/s3/BUILD index 5d5473802..e00e29088 100644 --- a/tensorstore/kvstore/s3/BUILD +++ b/tensorstore/kvstore/s3/BUILD @@ -222,6 +222,34 @@ tensorstore_cc_test( ], ) +tensorstore_cc_library( + name = "new_s3_request_builder", + srcs = [ + "new_s3_request_builder.cc", + ], + hdrs = [ + "new_s3_request_builder.h" + ], + deps = [ + "//tensorstore/kvstore/s3_sdk:s3_context", + "//tensorstore/internal/http", + "@com_github_aws_cpp_sdk//:core", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:cord", + ] +) + +tensorstore_cc_test( + name = "new_s3_request_builder_test", + srcs = [ + "new_s3_request_builder_test.cc", + ], + deps = [ + ":new_s3_request_builder", + "@com_google_googletest//:gtest_main", + ] +) + tensorstore_cc_library( name = "validate", srcs = [ diff --git a/tensorstore/kvstore/s3/new_s3_request_builder.cc b/tensorstore/kvstore/s3/new_s3_request_builder.cc new file mode 100644 index 000000000..e69de29bb diff --git a/tensorstore/kvstore/s3/new_s3_request_builder.h b/tensorstore/kvstore/s3/new_s3_request_builder.h new file mode 100644 index 000000000..9e4e28150 --- /dev/null +++ b/tensorstore/kvstore/s3/new_s3_request_builder.h @@ -0,0 +1,143 @@ +#include +#include +#include +#include + + +#include +#include +#include +#include + +#include "absl/strings/cord.h" + +#include "tensorstore/internal/http/http_request.h" +#include "tensorstore/kvstore/s3_sdk/s3_context.h" + +namespace tensorstore { +namespace internal_kvstore_s3 { + +// Make an absl::Cord look like a streambuf +class CordStreambuf : public std::streambuf { +public: + CordStreambuf(const absl::Cord& cord) : cord_(cord), current_(cord_.char_begin()) { + setg(nullptr, nullptr, nullptr); + } + +protected: + // Refill the get area of the buffer + int_type underflow() override { + if (current_ == cord_.char_end()) { + return traits_type::eof(); + } + + // Set buffer pointers for the next character + setg(const_cast(&*current_), + const_cast(&*current_), + const_cast(&*std::next(current_))); + + return traits_type::to_int_type(*current_++); + } + +private: + const absl::Cord& cord_; + absl::Cord::CharIterator current_; +}; + +// Make an absl::Cord look like an iostream +class CordIOStream : public std::iostream { +public: + CordIOStream(const absl::Cord& cord) : std::iostream(&buffer_), buffer_(cord) { + rdbuf(&buffer_); + } + +private: + CordStreambuf buffer_; +}; + +class AwsHttpRequestAdapter : public Aws::Http::Standard::StandardHttpRequest { +private: + static Aws::Http::HttpMethod FromStringMethod(std::string_view method) { + if(method == "GET") { + return Aws::Http::HttpMethod::HTTP_GET; + } else if (method == "PUT") { + return Aws::Http::HttpMethod::HTTP_PUT; + } else if (method == "HEAD") { + return Aws::Http::HttpMethod::HTTP_HEAD; + } else if (method == "DELETE") { + return Aws::Http::HttpMethod::HTTP_DELETE; + } else if (method == "POST") { + return Aws::Http::HttpMethod::HTTP_POST; + } else if (method == "PATCH") { + return Aws::Http::HttpMethod::HTTP_PATCH; + } else { + // NOTE: return an error + return Aws::Http::HttpMethod::HTTP_GET; + } + } + +public: + AwsHttpRequestAdapter(std::string_view method, std::string endpoint_url) : + Aws::Http::Standard::StandardHttpRequest(Aws::Http::URI(Aws::String(endpoint_url)), + FromStringMethod(method)) {} +}; + +/// Similar interface to S3RequestBuilder, +/// but builds an AwsHttpRequestAdapter internally +class NewS3RequestBuilder { +public: + NewS3RequestBuilder(std::string_view method, std::string endpoint_url) : + request_(method, endpoint_url) {} + + NewS3RequestBuilder & AddBody(const absl::Cord & body) { + // NOTE: eliminate allocation + auto cord_adapter = std::make_shared(body); + request_.AddContentBody(cord_adapter); + return *this; + } + + NewS3RequestBuilder & AddHeader(std::string_view header) { + auto delim_pos = header.find(':'); + assert(delim_pos != std::string_view::npos); + // NOTE: string copies + request_.SetHeaderValue(Aws::String(header.substr(0, delim_pos)), + Aws::String(header.substr(delim_pos + 1))); + return *this; + } + + NewS3RequestBuilder & AddQueryParameter(std::string key, std::string value) { + // Note: string copies + request_.AddQueryStringParameter(key.c_str(), Aws::String(value)); + return *this; + } + + internal_http::HttpRequest BuildRequest(AwsContext ctx) { + auto signer = Aws::Client::AWSAuthV4Signer(ctx.cred_provider_, "s3", "us-east-1"); + assert(!request_.HasAuthorization()); + auto succeeded = signer.SignRequest(request_, true); + assert(succeeded); + assert(request_.HasAuthorization()); + auto method = Aws::Http::HttpMethodMapper::GetNameForHttpMethod(request_.GetMethod()); + auto aws_headers = request_.GetHeaders(); + + std::vector headers; + headers.reserve(aws_headers.size()); + + for(auto & pair: aws_headers) { + headers.emplace_back(absl::StrFormat("%s: %s", pair.first, pair.second)); + } + + return internal_http::HttpRequest{ + std::move(method), + std::string(request_.GetURIString(true)), + "", + headers}; + } + +public: + std::shared_ptr body_; + AwsHttpRequestAdapter request_; +}; + +} // namespace internal_kvstore_s3 +} // namespace tensorstore diff --git a/tensorstore/kvstore/s3/new_s3_request_builder_test.cc b/tensorstore/kvstore/s3/new_s3_request_builder_test.cc new file mode 100644 index 000000000..da5c4c62d --- /dev/null +++ b/tensorstore/kvstore/s3/new_s3_request_builder_test.cc @@ -0,0 +1,24 @@ +#include + +#include "tensorstore/kvstore/s3_sdk/s3_context.h" +#include "tensorstore/kvstore/s3/new_s3_request_builder.h" + + +using ::tensorstore::internal_kvstore_s3::NewS3RequestBuilder; + +namespace { + +TEST(NewS3RequestBuilderTest, Basic) { + auto ctx = tensorstore::internal_kvstore_s3::GetAwsContext(); + auto builder = NewS3RequestBuilder("get", "http://bucket") + .AddBody(absl::Cord{"foobar"}) + .AddHeader("foo: bar") + .AddQueryParameter("qux", "baz"); + + auto req = builder.BuildRequest(*ctx); + EXPECT_TRUE(builder.request_.HasAuthorization()); + + ABSL_LOG(INFO) << req; +} + +} // namespace diff --git a/tensorstore/kvstore/s3_sdk/BUILD b/tensorstore/kvstore/s3_sdk/BUILD new file mode 100644 index 000000000..03807f1a8 --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/BUILD @@ -0,0 +1,122 @@ +# Placeholder: load py_binary +load("//bazel:tensorstore.bzl", "tensorstore_cc_library", "tensorstore_cc_test") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +filegroup( + name = "doc_sources", + srcs = glob([ + "*.rst", + "*.yml", + ]), +) + + +tensorstore_cc_library( + name = "cord_streambuf", + srcs = ["cord_streambuf.cc"], + hdrs = ["cord_streambuf.h"], + deps = [ + "@com_google_absl//absl/strings:cord", + ] +) + + +tensorstore_cc_library( + name = "s3_context", + srcs = ["s3_context.cc"], + hdrs = ["s3_context.h"], + deps = [ + ":cord_streambuf", + "//tensorstore/util:executor", + "//tensorstore/internal/http", + "//tensorstore/internal/http:curl_transport", + "//tensorstore/internal/thread:thread_pool", + "@com_google_absl//absl/log:absl_log", + "@com_google_absl//absl/synchronization", + "@com_github_aws_cpp_sdk//:core", + ] +) + +tensorstore_cc_test( + name = "s3_context_test", + size = "small", + srcs = ["s3_context_test.cc"], + deps = [ + ":s3_context", + "@com_github_aws_cpp_sdk//:s3", + "@com_google_googletest//:gtest_main", + ] +) + +tensorstore_cc_test( + name = "cord_streambuf_test", + size = "small", + srcs = ["cord_streambuf_test.cc"], + deps = [ + ":cord_streambuf", + "@com_google_googletest//:gtest_main", + "@com_github_aws_cpp_sdk//:core", + ] +) + +py_binary( + name = "moto_server", + testonly = 1, + srcs = ["moto_server.py"], + tags = [ + "manual", + "notap", + "skip-cmake", + ], + deps = ["@pypa_moto//:moto"], +) + +tensorstore_cc_test( + name = "localstack_test", + size = "small", + srcs = ["localstack_test.cc"], + args = [ + "--localstack_binary=$(location :moto_server)", + "--binary_mode=moto", + ], + data = [":moto_server"], + flaky = 1, # Spawning the test process can be flaky. + tags = [ + "cpu:2", + "requires-net:loopback", + "skip-cmake", + ], + deps = [ + ":s3_context", + "//tensorstore:context", + "//tensorstore:json_serialization_options_base", + "//tensorstore/internal:env", + "//tensorstore/internal:json_gtest", + "//tensorstore/internal/http", + "//tensorstore/internal/http:curl_transport", + "//tensorstore/internal/http:transport_test_utils", + "//tensorstore/internal/os:subprocess", + "//tensorstore/kvstore", + "//tensorstore/kvstore:batch_util", + "//tensorstore/kvstore:test_util", + "//tensorstore/util:future", + "//tensorstore/util:result", + "//tensorstore/util:status_testutil", + "@com_github_nlohmann_json//:json", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/log:absl_check", + "@com_google_absl//absl/log:absl_log", + "@com_google_absl//absl/status", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:cord", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/time", + "@com_google_googletest//:gtest_main", + "@com_github_aws_cpp_sdk//:core", + "@com_github_aws_cpp_sdk//:s3", + + ], +) diff --git a/tensorstore/kvstore/s3_sdk/cord_streambuf.cc b/tensorstore/kvstore/s3_sdk/cord_streambuf.cc new file mode 100644 index 000000000..d758de7f5 --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/cord_streambuf.cc @@ -0,0 +1,234 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensorstore/kvstore/s3_sdk/cord_streambuf.h" + +#include +#include +#include + +#include "absl/strings/cord.h" + +using absl::Cord; +using absl::CordBuffer; +using std::streamsize; +using std::streampos; +using std::streamoff; +using std::ios_base; + +namespace tensorstore { +namespace internal_kvstore_s3 { + +CordStreamBuf::CordStreamBuf() : + CordStreamBuf(Cord()) {} + +CordStreamBuf::CordStreamBuf(Cord && cord) : + mode_(ios_base::out | ios_base::in), + cord_(std::move(cord)), + read_chunk_(cord_.Chunks().begin()) { + + assert(eback() == nullptr); + assert(gptr() == nullptr); + assert(egptr() == nullptr); + assert(pbase() == nullptr); + assert(pptr() == nullptr); + assert(epptr() == nullptr); + + // Set up the get area to point at the first chunk, + // if the Cord has data + if(read_chunk_ != cord_.Chunks().end()) { + char_type * data = const_cast(read_chunk_->data()); + setg(data, data, data + read_chunk_->size()); + } +} + +Cord CordStreamBuf::DetachCord() { + Cord result; + std::swap(result, cord_); + read_chunk_ = cord_.Chunks().begin(); + assert(read_chunk_ == cord_.Chunks().end()); + setg(nullptr, nullptr, nullptr); + setp(nullptr, nullptr); + return result; +} + +void CordStreamBuf::AssignCord(Cord && cord) { + setg(nullptr, nullptr, nullptr); + setp(nullptr, nullptr); + + cord_ = std::move(cord); + read_chunk_ = cord_.Chunks().begin(); + + if(read_chunk_ != cord_.Chunks().end()) { + char_type * data = const_cast(read_chunk_->data()); + setg(data, data, data + read_chunk_->size()); + } +} + +// Bulk put operation +streamsize CordStreamBuf::xsputn(const char * s, streamsize count) { + if(!(mode_ & ios_base::out)) return 0; + streamsize n = count; + bool first = true; + streamsize p = 0; + + while (n > 0) { + CordBuffer buffer = first ? cord_.GetAppendBuffer(n) + : CordBuffer::CreateWithDefaultLimit(n); + + auto span = buffer.available_up_to(n); + for(int i = 0; i < span.size(); ++i, ++p) span[i] = s[p]; + buffer.IncreaseLengthBy(span.size()); + cord_.Append(std::move(buffer)); + n -= span.size(); + first = false; + } + + MaybeSetGetArea(); + return p; +} + +// Handle buffer overflow. +CordStreamBuf::int_type CordStreamBuf::overflow(int_type ch) { + // Not writing or eof received + if(!(mode_ & ios_base::out)) return traits_type::eof(); + if(traits_type::eq_int_type(ch, traits_type::eof())) return traits_type::eof(); + auto c = traits_type::to_char_type(ch); + cord_.Append(absl::string_view(&c, 1)); + MaybeSetGetArea(); + return ch; +} + +// Bulk get operation +streamsize CordStreamBuf::xsgetn(char * s, streamsize count) { + // Not reading + if(!(mode_ & ios_base::in)) return 0; + streamsize bytes_read = 0; + + while(bytes_read < count && read_chunk_ != cord_.Chunks().end()) { + assert(read_chunk_->size() == egptr() - eback()); // invariant + auto bytes_to_read = std::min(gremaining(), count - bytes_read); + for(streamsize i = 0, consumed = gconsumed(); i < bytes_to_read; ++i) { + s[bytes_read + i] = read_chunk_->operator[](consumed + i); + } + if(gptr() + bytes_to_read < egptr()) { + // Data remains in the get area + setg(eback(), gptr() + bytes_to_read, egptr()); + } else if(++read_chunk_ != cord_.Chunks().end()) { + // Initialise get area for next iteration + char_type * data = const_cast(read_chunk_->data()); + setg(data, data, data + read_chunk_->size()); + } + + bytes_read += bytes_to_read; + }; + + return bytes_read; +} + +// Handle buffer underflow. +CordStreamBuf::int_type CordStreamBuf::underflow() { + // Not reading or no more Cord data + if(!(mode_ & ios_base::in)) return traits_type::eof(); + if(read_chunk_ == cord_.Chunks().end()) return traits_type::eof(); + if(gptr() < egptr()) { + return traits_type::to_int_type(*gptr()); + } + if(++read_chunk_ == cord_.Chunks().end()) return traits_type::eof(); + char_type * data = const_cast(read_chunk_->data()); + setg(data, data, data + read_chunk_->size()); + return traits_type::to_int_type(*data); +} + +streampos CordStreamBuf::seekoff(streamoff off, ios_base::seekdir way, ios_base::openmode which) { + if (which == ios_base::in) { + if (way == ios_base::beg) { + if(off > cord_.size()) return traits_type::eof(); + // Seek from the beginning of the cord + auto n = off; + read_chunk_ = cord_.Chunks().begin(); + while(n > read_chunk_->size()) { + n -= read_chunk_->size(); + ++read_chunk_; + } + char_type * data = const_cast(read_chunk_->data()); + setg(data, data + n, data + read_chunk_->size()); + return off; + } else if (way == ios_base::cur) { + if(read_chunk_ == cord_.Chunks().end()) return traits_type::eof(); + auto current = gconsumed(); + for(auto c = cord_.Chunks().begin(); c != read_chunk_; ++c) { + current += c->size(); + } + + auto n = off; + // Advance forward in the current chunk + if(n > 0 && gremaining() > 0) { + auto bytes_to_remove = std::min(n, gremaining()); + n -= bytes_to_remove; + gbump(bytes_to_remove); + } + // Advance forward by Cord chunks, + // consuming any remaining characters + // in the chunk + while(n > 0) { + if(++read_chunk_ == cord_.Chunks().end()) return traits_type::eof(); + auto bytes_to_advance = std::min(n, read_chunk_->size()); + char_type * data = const_cast(read_chunk_->data()); + setg(data, data + bytes_to_advance, data + read_chunk_->size()); + n -= bytes_to_advance; + } + + return current + off; + } else if (way == ios_base::end) { + // Seeks past the stream end are unsupported + if(off > 0) return traits_type::eof(); + auto n = cord_.size() + off; + read_chunk_ = cord_.Chunks().begin(); + while(n > read_chunk_->size()) { + n -= read_chunk_->size(); + ++read_chunk_; + } + char_type * data = const_cast(read_chunk_->data()); + setg(data, data + n, data + read_chunk_->size()); + return cord_.size() + off; + } + } else if (which == ios_base::out) { + // This streambuf only supports appends. + // Don't respect the off argument, always return + // the append position + return cord_.size(); + } + return traits_type::eof(); +} + +streamsize CordStreamBuf::gconsumed() const { + return gptr() - eback(); +}; + +streamsize CordStreamBuf::gremaining() const { + return egptr() - gptr(); +} + +void CordStreamBuf::MaybeSetGetArea() { + if(read_chunk_ == cord_.Chunks().end()) { + read_chunk_ = cord_.Chunks().begin(); + if(read_chunk_ == cord_.Chunks().end()) return; + char_type * data = const_cast(read_chunk_->data()); + setg(data, data, data + read_chunk_->size()); + } +} + +} // namespace internal_kvstore_s3 +} // namespace tensorstore diff --git a/tensorstore/kvstore/s3_sdk/cord_streambuf.h b/tensorstore/kvstore/s3_sdk/cord_streambuf.h new file mode 100644 index 000000000..1b5aa6f7b --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/cord_streambuf.h @@ -0,0 +1,102 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TENSORSTORE_KVSTORE_S3_STREAMBUF_H_ +#define TENSORSTORE_KVSTORE_S3_STREAMBUF_H_ + +#include +#include + +#include "absl/strings/cord.h" + +namespace tensorstore { +namespace internal_kvstore_s3 { + +/// Basic implementation of a std::basic_streambuf backed by an abseil Cord. +/// It should be used in two modes +/// (1) Append-only writing mode, where data is appended to the underlying Cord +/// (2) Read mode, where data is read from the Cord. Seeking is supported +/// within the Stream Buffer. +/// +/// The streambuf get area is assigned to a chunk of the underlying Cord, +/// referred to by read_chunk_: this is usually set up by +/// setg(read_chunk_->data(), +/// read_chunk_->data(), +/// read_chunk->data() + read_chunk_->size()); +/// Then, characters are consumed from this area until underflow occurs, +/// at which point, the get area is constructed from the next chunk +class CordStreamBuf : public std::basic_streambuf { +public: + // Creates an empty stream buffer for writing + CordStreamBuf(); + // Creates a stream buffer for reading from the supplied Cord + CordStreamBuf(absl::Cord && cord); + + // Obtain read access to the backing Cord + const absl::Cord & GetCord() const { return cord_; } + + // Returns the underlying Cord, resetting the underlying stream + absl::Cord DetachCord(); + // Takes the supplied Cord as the underlying Cord, + // resetting the underlying stream + void AssignCord(absl::Cord && cord); + +protected: + // Bulk put operation + virtual std::streamsize xsputn(const char * s, std::streamsize count) override; + // Bulk get operation + virtual std::streamsize xsgetn(char * s, std::streamsize count) override; + // Handle buffer overflow. + virtual int_type overflow(int_type ch) override; + // Handle buffer underflow. + virtual int_type underflow() override; + + // Seek within the underlying Cord. + // Seeking in the get area is supported + // Seeking in the put area always returns the length of the Cord + // (only appends are supported). + // do not appear to be called by the AWS C++ SDK + virtual std::streampos seekoff( + std::streamoff off, + std::ios_base::seekdir way, + std::ios_base::openmode which = std::ios_base::in | std::ios_base::out) override; + + // Seek within the underlying Cord (only seeks in the get area are supported) + virtual std::streampos seekpos( + std::streampos sp, + std::ios_base::openmode which = std::ios_base::in | std::ios_base::out) override { + return seekoff(sp - std::streampos(0), std::ios_base::beg, which); + } + + // Number of get characters consumed in the current read chunk + // (gptr() - eback()) + std::streamsize gconsumed() const; + // Number of characters remaining in the current read chunk + // (egptr() - gptr()) + std::streamsize gremaining() const; + +private: + // Configure the get area after put operations. + void MaybeSetGetArea(); + +private: + std::ios_base::openmode mode_; + absl::Cord cord_; + absl::Cord::ChunkIterator read_chunk_; +}; + +} // namespace internal_kvstore_s3 +} // namespace tensorstore + +#endif // TENSORSTORE_KVSTORE_S3_STREAMBUF_H_ \ No newline at end of file diff --git a/tensorstore/kvstore/s3_sdk/cord_streambuf_test.cc b/tensorstore/kvstore/s3_sdk/cord_streambuf_test.cc new file mode 100644 index 000000000..eda73260d --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/cord_streambuf_test.cc @@ -0,0 +1,272 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensorstore/kvstore/s3_sdk/cord_streambuf.h" + +#include +#include +#include + +#include "absl/strings/cord.h" + +#include +#include + +#include + +using std::ios_base; + +using ::absl::Cord; +using ::absl::CordBuffer; +using ::Aws::Utils::Stream::DefaultUnderlyingStream; +using ::Aws::MakeUnique; +using ::tensorstore::internal_kvstore_s3::CordStreamBuf; + +namespace { + +static constexpr char kAwsTag[] = "AWS"; +static constexpr int kNBuffers = 3; +static constexpr auto kBufferSize = CordBuffer::kDefaultLimit; + +absl::Cord ThreeBufferCord() { + absl::Cord cord; + for(char ch = 0; ch < kNBuffers; ++ch) { + cord.Append(std::string(kBufferSize, '1' + ch)); + } + assert(std::distance(cord.Chunks().begin(), cord.Chunks().end()) == 3); + return cord; +} + +/// Remove this +TEST(CordStreamBufTest, Chunks) { + using ::absl::CordBuffer; + absl::Cord cord; + + auto buffer = cord.GetAppendBuffer(32); + auto span = buffer.available_up_to(32); + EXPECT_EQ(span.size(), 32); + for(int i = 0; i < span.size(); ++i) span[i] = i; + buffer.IncreaseLengthBy(span.size()); + cord.Append(std::move(buffer)); + + EXPECT_EQ(cord.Chunks().begin()->size(), 32); + EXPECT_EQ(cord.size(), 32); + + buffer = cord.GetAppendBuffer(32); + span = buffer.available_up_to(32); + EXPECT_EQ(span.size(), 32); + for(int i = 0; i < span.size(); ++i) span[i] = i; + buffer.IncreaseLengthBy(span.size()); + cord.Append(std::move(buffer)); + + buffer = cord.GetAppendBuffer(97); + span = buffer.available_up_to(97); + EXPECT_EQ(span.size(), 97); + for(int i = 0; i < span.size(); ++i) span[i] = i; + buffer.IncreaseLengthBy(span.size()); + cord.Append(std::move(buffer)); + + EXPECT_EQ(std::distance(cord.Chunks().begin(), cord.Chunks().end()), 3); + EXPECT_EQ(cord.size(), 161); +} + + +TEST(CordStreamBufTest, Read) { + auto cord = absl::Cord{"Hello World This is a test"}; + auto is = DefaultUnderlyingStream(MakeUnique(kAwsTag, std::move(cord))); + std::istreambuf_iterator in_it{is}, end; + std::string s{in_it, end}; + EXPECT_EQ(s, "Hello World This is a test"); + EXPECT_TRUE(is.good()); + + // eof triggered. + char ch; + EXPECT_FALSE(is.get(ch)); + EXPECT_FALSE(is.good()); + EXPECT_TRUE(is.eof()); +} + + +TEST(CordStreamBufTest, Write) { + auto os = DefaultUnderlyingStream(MakeUnique(kAwsTag)); + os << "Hello World"; + EXPECT_EQ(os.tellp(), 11); + os << " "; + EXPECT_EQ(os.tellp(), 12); + os << "This is a test"; + EXPECT_EQ(os.tellp(), 26); + EXPECT_TRUE(os.good()); + auto cord = dynamic_cast(os.rdbuf())->GetCord(); + EXPECT_EQ(cord, "Hello World This is a test"); + + // Single Cord chunk + auto it = cord.Chunks().begin(); + EXPECT_EQ(*it, "Hello World This is a test"); + ++it; + EXPECT_EQ(it, cord.Chunks().end()); + EXPECT_EQ(std::distance(cord.Chunks().begin(), cord.Chunks().end()), 1); +} + + +// Test get seeks via the streambuf interface +TEST(CordStreamBufTest, BufferSeek) { + auto buffer = CordStreamBuf(ThreeBufferCord()); + + // Seeks from beginning + EXPECT_EQ(buffer.pubseekoff(0, ios_base::beg, ios_base::in), 0); + EXPECT_EQ(buffer.pubseekoff(10 + 2*kBufferSize, ios_base::beg, ios_base::in), 10 + 2*kBufferSize); + EXPECT_EQ(buffer.pubseekoff(kNBuffers * kBufferSize, ios_base::beg, ios_base::in), kNBuffers * kBufferSize); + EXPECT_EQ(buffer.pubseekoff(10 + 3*kBufferSize, ios_base::beg, ios_base::in), -1); // eof + + // Seeks from current position + EXPECT_EQ(buffer.pubseekoff(0, ios_base::beg, ios_base::in), 0); + EXPECT_EQ(buffer.pubseekoff(10, ios_base::cur, ios_base::in), 10); + EXPECT_EQ(buffer.pubseekoff(kBufferSize, ios_base::cur, ios_base::in), 10 + kBufferSize); + EXPECT_EQ(buffer.pubseekoff(kBufferSize, ios_base::cur, ios_base::in), 10 + 2*kBufferSize); + EXPECT_EQ(buffer.pubseekoff(kBufferSize - 10, ios_base::cur, ios_base::in), kNBuffers*kBufferSize); + EXPECT_EQ(buffer.pubseekoff(10, ios_base::cur, ios_base::in), -1); // eof + + // Seek from end + EXPECT_EQ(buffer.pubseekoff(0, ios_base::beg, ios_base::in), 0); + EXPECT_EQ(buffer.pubseekoff(0, ios_base::end, ios_base::in), kBufferSize * kNBuffers); + EXPECT_EQ(buffer.pubseekoff(1, ios_base::end, ios_base::in), -1); // eof + EXPECT_EQ(buffer.pubseekoff(-1, ios_base::end, ios_base::in), kBufferSize * kNBuffers - 1); + EXPECT_EQ(buffer.pubseekoff(-kBufferSize * kNBuffers, ios_base::end, ios_base::in), 0); + EXPECT_EQ(buffer.pubseekoff(1, ios_base::end, ios_base::in), -1); // eof +} + +// Test get seeks via the istream interface +TEST(CordStreamBufTest, StreamSeek) { + auto is = DefaultUnderlyingStream( + MakeUnique(kAwsTag, ThreeBufferCord())); + + // Seek from beginning + is.clear(); + is.seekg(0, is.beg); + EXPECT_EQ(is.tellg(), 0); + is.seekg(kBufferSize * kNBuffers - 1, is.beg); + EXPECT_EQ(is.tellg(), kBufferSize * kNBuffers - 1); + is.seekg(kBufferSize * kNBuffers, is.beg); + EXPECT_EQ(is.tellg(), kBufferSize * kNBuffers); + is.seekg(kBufferSize * kNBuffers + 1, is.beg); + EXPECT_EQ(is.tellg(), -1); // eof + + // Seek from current position + is.clear(); + is.seekg(0, is.beg); + EXPECT_EQ(is.tellg(), 0); + is.seekg(10, is.cur); + EXPECT_EQ(is.tellg(), 10); + is.seekg(kBufferSize, is.cur); + EXPECT_EQ(is.tellg(), 10 + kBufferSize); + is.seekg(kBufferSize, is.cur); + EXPECT_EQ(is.tellg(), 10 + 2*kBufferSize); + is.seekg(kBufferSize - 10, is.cur); + EXPECT_EQ(is.tellg(), kNBuffers*kBufferSize); + is.seekg(10, is.cur); + EXPECT_EQ(is.tellg(), -1); // eof + + // Seek from end + is.clear(); + is.seekg(0, is.end); + EXPECT_EQ(is.tellg(), kBufferSize * kNBuffers); + is.seekg(-kBufferSize * kNBuffers, is.end); + EXPECT_EQ(is.tellg(), 0); + is.seekg(-1, is.end); + EXPECT_EQ(is.tellg(), kBufferSize * kNBuffers - 1); + is.seekg(1, is.end); + EXPECT_EQ(is.tellg(), -1); // eof +} + +/// Test that reading the CordStreamBuf reads the Cord +TEST(CordStreamBufTest, GetEntireStreamBuf) { + auto is = DefaultUnderlyingStream( + MakeUnique(kAwsTag, ThreeBufferCord())); + + int count = 0; + char ch; + + while(is.get(ch)) { + EXPECT_EQ(ch, '1' + (count / kBufferSize)); + EXPECT_TRUE(is.good()); + EXPECT_FALSE(is.eof()); + ++count; + EXPECT_EQ(is.tellg(), count); + } + EXPECT_EQ(count, kBufferSize * kNBuffers); + EXPECT_FALSE(is.good()); + EXPECT_TRUE(is.eof()); +} + +/// Test get seeking within the CordStreamBuf +TEST(CordStreamBufTest, GetSeek) { + auto is = DefaultUnderlyingStream( + MakeUnique(kAwsTag, ThreeBufferCord())); + + for(char b = 0; b < kNBuffers; ++b) { + is.seekg(5 + kBufferSize * b); + EXPECT_EQ(is.tellg(), 5 + kBufferSize * b); + char result[6] = {0x00}; + is.read(result, sizeof(result)); + auto expected = std::string(sizeof(result), '1' + b); + EXPECT_EQ(std::string_view(result, sizeof(result)), expected); + EXPECT_TRUE(is.good()); + EXPECT_EQ(is.tellg(), 5 + kBufferSize * b + sizeof(result)); + } + + is.seekg(kBufferSize * kNBuffers); + EXPECT_EQ(is.tellg(), kBufferSize*kNBuffers); + EXPECT_TRUE(is.good()); + EXPECT_FALSE(is.eof()); + + EXPECT_EQ(is.get(), -1); + EXPECT_FALSE(is.good()); + EXPECT_TRUE(is.eof()); +} + +/// Test read seeking within the CordStreamBuf +/// exercises xsgetn +TEST(CordStreamBuftest, ReadSeek) { + auto is = DefaultUnderlyingStream( + MakeUnique(kAwsTag, ThreeBufferCord())); + + is.seekg(5); + + { + char result[kBufferSize] = {0x00}; + EXPECT_TRUE(is.read(result, kBufferSize)); + auto expected = std::string(kBufferSize - 5, '1') + std::string(5, '2'); + EXPECT_EQ(std::string_view(result, kBufferSize), expected); + EXPECT_EQ(is.tellg(), 5 + kBufferSize); + } + + { + char result[kBufferSize] = {0x00}; + EXPECT_TRUE(is.read(result, kBufferSize)); + auto expected = std::string(kBufferSize - 5, '2') + std::string(5, '3'); + EXPECT_EQ(std::string_view(result, kBufferSize), expected); + EXPECT_EQ(is.tellg(), 5 + 2 * kBufferSize); + } + + { + char result[kBufferSize] = {0x00}; + EXPECT_FALSE(is.read(result, kBufferSize)); + auto expected = std::string(kBufferSize - 5, '3'); + EXPECT_EQ(std::string_view(result, kBufferSize - 5), expected); + EXPECT_EQ(std::string_view(result + kBufferSize - 5, 5), std::string(5, 0)); + EXPECT_EQ(is.tellg(), -1); + } +} + +} // namespace \ No newline at end of file diff --git a/tensorstore/kvstore/s3_sdk/localstack_test.cc b/tensorstore/kvstore/s3_sdk/localstack_test.cc new file mode 100644 index 000000000..d72274825 --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/localstack_test.cc @@ -0,0 +1,462 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include +#include +#include "absl/flags/flag.h" +#include "absl/log/absl_check.h" +#include "absl/log/absl_log.h" +#include "absl/synchronization/notification.h" +#include "absl/status/status.h" +#include "absl/strings/match.h" +#include "absl/strings/str_format.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "tensorstore/context.h" +#include "tensorstore/internal/env.h" +#include "tensorstore/internal/http/curl_transport.h" +#include "tensorstore/internal/http/http_response.h" +#include "tensorstore/internal/http/http_transport.h" +#include "tensorstore/internal/http/transport_test_utils.h" +#include "tensorstore/internal/json_gtest.h" +#include "tensorstore/internal/os/subprocess.h" +#include "tensorstore/internal/thread/thread_pool.h" +#include "tensorstore/kvstore/s3_sdk/cord_streambuf.h" +#include "tensorstore/util/executor.h" +#include "tensorstore/util/result.h" + +#include "tensorstore/kvstore/s3_sdk/s3_context.h" + +// When provided with --localstack_binary, localstack_test will start +// localstack in host mode (via package localstack[runtime]). +// +// When provided with --localstack_endpoint, localstack_test will connect +// to a running localstack instance. +ABSL_FLAG(std::string, localstack_endpoint, "", "Localstack endpoint"); +ABSL_FLAG(std::string, localstack_binary, "", "Path to the localstack"); + +// --localstack_timeout is the time the process will wait for localstack. +ABSL_FLAG(absl::Duration, localstack_timeout, absl::Seconds(15), + "Time to wait for localstack process to start serving requests"); + +// --host_header can override the host: header used for signing. +// It can be, for example, s3.af-south-1.localstack.localhost.com +ABSL_FLAG(std::string, host_header, "", "Host header to use for signing"); + +// --binary_mode selects whether the `--localstack_binary` is localstack +// binary or whether it is a moto binary. +ABSL_FLAG(std::string, binary_mode, "", + "Selects options for starting --localstack_binary. Valid values are " + "[moto]. Assumes localstack otherwise."); + +// AWS bucket, region, and path. +ABSL_FLAG(std::string, aws_bucket, "testbucket", + "The S3 bucket used for the test."); + +ABSL_FLAG(std::string, aws_region, "af-south-1", + "The S3 region used for the test."); + +ABSL_FLAG(std::string, aws_path, "tensorstore/test/", + "The S3 path used for the test."); + +using ::Aws::MakeUnique; +using ::Aws::MakeShared; +using ::Aws::Utils::Stream::DefaultUnderlyingStream; + +using ::tensorstore::Context; +using ::tensorstore::MatchesJson; +using ::tensorstore::internal::GetEnv; +using ::tensorstore::internal::GetEnvironmentMap; +using ::tensorstore::internal::SetEnv; +using ::tensorstore::internal::SpawnSubprocess; +using ::tensorstore::internal::Subprocess; +using ::tensorstore::internal::SubprocessOptions; +using ::tensorstore::internal_http::GetDefaultHttpTransport; +using ::tensorstore::internal_http::HttpResponse; +using ::tensorstore::internal_http::IssueRequestOptions; +using ::tensorstore::transport_test_utils::TryPickUnusedPort; + +using ::tensorstore::internal_kvstore_s3::AwsContext; +using ::tensorstore::internal_kvstore_s3::CordStreamBuf; +using ::tensorstore::internal_kvstore_s3::CordBackedResponseStreamFactory; + +namespace { + +static constexpr char kAwsTag[] = "AWS"; +static constexpr char kAwsAccessKeyId[] = "LSIAQAAAAAAVNCBMPNSG"; +static constexpr char kAwsSecretKeyId[] = "localstackdontcare"; + +/// sha256 hash of an empty string +static constexpr char kEmptySha256[] = + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + +std::string Bucket() { return absl::GetFlag(FLAGS_aws_bucket); } +std::string Region() { return absl::GetFlag(FLAGS_aws_region); } +std::string Path() { return absl::GetFlag(FLAGS_aws_path); } + +SubprocessOptions SetupLocalstackOptions(int http_port) { + // See https://docs.localstack.cloud/references/configuration/ + // for the allowed environment variables for localstack. + SubprocessOptions options{absl::GetFlag(FLAGS_localstack_binary), + {"start", "--host"}}; + options.env.emplace(GetEnvironmentMap()); + auto& env = *options.env; + env["GATEWAY_LISTEN"] = absl::StrFormat("localhost:%d", http_port); + env["LOCALSTACK_HOST"] = + absl::StrFormat("localhost.localstack.cloud:%d", http_port); + env["SERVICES"] = "s3"; + return options; +} + +SubprocessOptions SetupMotoOptions(int http_port) { + // See https://docs.getmoto.org/en/latest/docs/getting_started.html + // and https://docs.getmoto.org/en/latest/docs/server_mode.html + SubprocessOptions options{absl::GetFlag(FLAGS_localstack_binary), + {absl::StrFormat("-p%d", http_port)}}; + options.env.emplace(GetEnvironmentMap()); + auto& env = *options.env; + ABSL_CHECK(!Region().empty()); + env["AWS_DEFAULT_REGION"] = Region(); + return options; +} + +// NOTE: Support minio as well, which needs temporary directories. +// https://min.io/docs/minio/linux/reference/minio-server/minio-server.html +// minio server --address :12123 /tmp/minio + +class LocalStackProcess { + public: + LocalStackProcess() = default; + ~LocalStackProcess() { StopProcess(); } + + void SpawnProcess() { + if (child_) return; + + const auto start_child = [this] { + http_port = TryPickUnusedPort().value_or(0); + ABSL_CHECK(http_port > 0); + + SubprocessOptions options = // + (absl::GetFlag(FLAGS_binary_mode) == "moto") + ? SetupMotoOptions(http_port) + : SetupLocalstackOptions(http_port); + + ABSL_LOG(INFO) << "Spawning: " << endpoint_url(); + + absl::SleepFor(absl::Milliseconds(10)); + TENSORSTORE_CHECK_OK_AND_ASSIGN(auto spawn_proc, + SpawnSubprocess(options)); + return spawn_proc; + }; + + Subprocess spawn_proc = start_child(); + + // Give the child process several seconds to start. + auto deadline = absl::Now() + absl::Seconds(10); + while (absl::Now() < deadline) { + absl::SleepFor(absl::Milliseconds(500)); + auto join_result = spawn_proc.Join(/*block=*/false); + + if (join_result.ok()) { + // Process has terminated. Restart. + spawn_proc = start_child(); + continue; + } else if (absl::IsUnavailable(join_result.status())) { + // Child is running. + child_.emplace(std::move(spawn_proc)); + return; + } + // TODO: Also check the http port? + } + + // Deadline has expired & there's nothing to show for it. + ABSL_LOG(FATAL) << "Failed to start process"; + } + + void StopProcess() { + if (child_) { + child_->Kill().IgnoreError(); + auto join_result = child_->Join(); + if (!join_result.ok()) { + ABSL_LOG(ERROR) << "Joining storage_testbench subprocess failed: " + << join_result.status(); + } + } + } + + std::string endpoint_url() { + return absl::StrFormat("http://localhost:%d", http_port); + } + + int http_port = 0; + std::optional child_; +}; + + +class LocalStackFixture : public ::testing::Test { + protected: + static std::shared_ptr context; + static LocalStackProcess process; + static std::shared_ptr client; + + static void SetUpTestSuite() { + if (!GetEnv("AWS_ACCESS_KEY_ID") || !GetEnv("AWS_SECRET_KEY_ID")) { + SetEnv("AWS_ACCESS_KEY_ID", kAwsAccessKeyId); + SetEnv("AWS_SECRET_KEY_ID", kAwsSecretKeyId); + } + + ABSL_CHECK(!Bucket().empty()); + + if (absl::GetFlag(FLAGS_localstack_endpoint).empty()) { + ABSL_CHECK(!absl::GetFlag(FLAGS_localstack_binary).empty()); + process.SpawnProcess(); + } + + if (!absl::StrContains(absl::GetFlag(FLAGS_localstack_endpoint), + "amazonaws.com")) { + // Only try to create the bucket when not connecting to aws. + ABSL_CHECK(!Region().empty()); + MaybeCreateBucket(); + } else { + ABSL_LOG(INFO) << "localstack_test connecting to Amazon using bucket:" + << Bucket(); + } + + CreateClient(); + } + + // Create client for use by test cases + static void CreateClient() { + // Offload AWS Client tasks onto a Tensorstore executor + class TensorStoreExecutor : public Aws::Utils::Threading::Executor { + public: + TensorStoreExecutor(): executor_(::tensorstore::internal::DetachedThreadPool(4)) {} + protected: + bool SubmitToThread(std::function && fn) override { + ::tensorstore::WithExecutor(executor_, std::move(fn))(); + return true; + } + private: + ::tensorstore::Executor executor_; + }; + + auto config = Aws::Client::ClientConfiguration{}; + config.endpointOverride = endpoint_url(); + config.region = Region(); + config.executor = Aws::MakeShared(kAwsTag); + client = std::make_shared( + config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Always, + false); + } + + static void TearDownTestSuite() { + client.reset(); + context.reset(); + process.StopProcess(); + } + + static std::string endpoint_url() { + if (absl::GetFlag(FLAGS_localstack_endpoint).empty()) { + return process.endpoint_url(); + } + return absl::GetFlag(FLAGS_localstack_endpoint); + } + + // Attempts to create the kBucket bucket on the localstack host. + static void MaybeCreateBucket() { + // Create a separate client for creating the bucket + // Without anonymous credentials bucket creation fails with 400 IllegalRegionConstraint + auto cfg = Aws::Client::ClientConfiguration{}; + cfg.endpointOverride = endpoint_url(); + cfg.region = Region(); + auto create_client = std::make_shared( + Aws::Auth::AWSCredentials(), + cfg, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Always, + false); + + auto create_request = Aws::S3::Model::CreateBucketRequest{}; + create_request.SetBucket(Bucket()); + + if (cfg.region != "us-east-1") { + auto bucket_cfg = Aws::S3::Model::CreateBucketConfiguration{}; + bucket_cfg.SetLocationConstraint( + Aws::S3::Model::BucketLocationConstraintMapper::GetBucketLocationConstraintForName( + cfg.region)); + create_request.SetCreateBucketConfiguration(bucket_cfg); + } + + auto outcome = create_client->CreateBucket(create_request); + if (!outcome.IsSuccess()) { + auto err = outcome.GetError(); + ABSL_LOG(INFO) << "Error: CreateBucket: " << + err.GetExceptionName() << ": " << err.GetMessage(); + } + else { + ABSL_LOG(INFO) << "Created bucket " << Bucket() << + " in AWS Region " << Region(); + } + } +}; + +LocalStackProcess LocalStackFixture::process; +std::shared_ptr LocalStackFixture::client = nullptr; +std::shared_ptr LocalStackFixture::context = tensorstore::internal_kvstore_s3::GetAwsContext(); + +TEST_F(LocalStackFixture, BasicSync) { + std::string payload = "this is a test"; + + // Put an object + auto put_request = Aws::S3::Model::PutObjectRequest{}; + put_request.SetResponseStreamFactory(CordBackedResponseStreamFactory); + put_request.SetBucket(Bucket()); + put_request.SetKey("portunus"); + put_request.SetBody(MakeShared(kAwsTag, MakeUnique(kAwsTag, absl::Cord{payload}))); + auto put_outcome = client->PutObject(put_request); + EXPECT_TRUE(put_outcome.IsSuccess()); + + // Put the same object with a different key + put_request = Aws::S3::Model::PutObjectRequest{}; + put_request.SetResponseStreamFactory(CordBackedResponseStreamFactory); + put_request.SetBucket(Bucket()); + put_request.SetKey("portunus0"); + put_request.SetBody(MakeShared(kAwsTag, MakeUnique(kAwsTag, absl::Cord{payload}))); + put_outcome = client->PutObject(put_request); + EXPECT_TRUE(put_outcome.IsSuccess()); + + // List the objects + auto list_request = Aws::S3::Model::ListObjectsV2Request{}; + list_request.SetResponseStreamFactory(CordBackedResponseStreamFactory); + list_request.SetBucket(Bucket()); + list_request.SetMaxKeys(1); + auto continuation_token = Aws::String{}; + Aws::Vector objects; + + do { + if (!continuation_token.empty()) { + list_request.SetContinuationToken(continuation_token); + } + + auto outcome = client->ListObjectsV2(list_request); + EXPECT_TRUE(outcome.IsSuccess()); + + auto page_objects = outcome.GetResult().GetContents(); + objects.insert(objects.end(), page_objects.begin(), page_objects.end()); + continuation_token = outcome.GetResult().GetNextContinuationToken(); + } while (!continuation_token.empty()); + + + EXPECT_EQ(objects.size(), 2); + + for (const auto &object: objects) { + EXPECT_EQ(object.GetSize(), payload.size()); + } + + // Get the contents of the key + auto get_request = Aws::S3::Model::GetObjectRequest{}; + get_request.SetResponseStreamFactory(CordBackedResponseStreamFactory); + get_request.SetBucket(Bucket()); + get_request.SetKey("portunus"); + auto get_outcome = client->GetObject(get_request); + EXPECT_TRUE(get_outcome.IsSuccess()); + std::string result; + std::getline(get_outcome.GetResult().GetBody(), result); + EXPECT_EQ(result, payload); +} + +TEST_F(LocalStackFixture, BasicAsync) { + struct TestCallbacks { + // Data relevant to GET and PUT + std::string key; + std::string payload; + + // Results and notifications + bool put_succeeded = false; + std::optional get_result; + absl::Notification done; + + void do_put() { + auto put_request = Aws::S3::Model::PutObjectRequest{}; + put_request.SetBucket(Bucket()); + put_request.SetKey(key); + put_request.SetResponseStreamFactory(CordBackedResponseStreamFactory); + put_request.SetBody(MakeShared(kAwsTag, MakeUnique(kAwsTag, absl::Cord{payload}))); + client->PutObjectAsync(put_request, [this]( + const auto *, const auto &, const auto & outcome, const auto &) { + this->on_put(outcome); + }); + } + + void on_put(const Aws::S3::Model::PutObjectOutcome & outcome) { + if(outcome.IsSuccess()) { + put_succeeded = true; + do_get(); + } else { + done.Notify(); + } + } + + void do_get() { + auto get_request = Aws::S3::Model::GetObjectRequest{}; + get_request.SetResponseStreamFactory(CordBackedResponseStreamFactory); + get_request.SetBucket(Bucket()); + get_request.SetKey(key); + client->GetObjectAsync(get_request, [this]( + const auto *, const auto &, auto outcome, const auto &) { + this->on_get(std::move(outcome)); + }); + } + + void on_get(Aws::S3::Model::GetObjectOutcome outcome) { + if(outcome.IsSuccess()) { + std::string buffer; + std::getline(outcome.GetResult().GetBody(), buffer); + get_result = buffer; + } + + done.Notify(); + } + }; + + auto callbacks = TestCallbacks{"key", "value"}; + callbacks.do_put(); + EXPECT_TRUE(callbacks.done.WaitForNotificationWithTimeout(absl::Milliseconds(10))); + EXPECT_TRUE(callbacks.put_succeeded); + EXPECT_TRUE(callbacks.get_result.has_value()); + EXPECT_EQ(callbacks.get_result.value(), callbacks.payload); +} + + + +} // namespace diff --git a/tensorstore/kvstore/s3_sdk/moto_server.py b/tensorstore/kvstore/s3_sdk/moto_server.py new file mode 100644 index 000000000..79612a070 --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/moto_server.py @@ -0,0 +1,19 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from moto.server import main + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tensorstore/kvstore/s3_sdk/s3_context.cc b/tensorstore/kvstore/s3_sdk/s3_context.cc new file mode 100644 index 000000000..d52f090ec --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/s3_context.cc @@ -0,0 +1,338 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensorstore/kvstore/s3_sdk/s3_context.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/log/absl_log.h" +#include "absl/synchronization/mutex.h" + +#include "tensorstore/internal/http/curl_transport.h" +#include "tensorstore/internal/http/http_request.h" +#include "tensorstore/internal/http/http_response.h" +#include "tensorstore/internal/http/http_transport.h" +#include "tensorstore/kvstore/s3_sdk/cord_streambuf.h" + +using AwsHttpClient = ::Aws::Http::HttpClient; +using AwsHttpRequest = ::Aws::Http::HttpRequest; +using AwsHttpResponse = ::Aws::Http::HttpResponse; +using AwsStandardHttpRequest = ::Aws::Http::Standard::StandardHttpRequest; +using AwsStandardHttpResponse = ::Aws::Http::Standard::StandardHttpResponse; +using AwsRateLimiterInterface = ::Aws::Utils::RateLimits::RateLimiterInterface; +using AwsLogLevel = ::Aws::Utils::Logging::LogLevel; +using AwsLogSystemInterface = ::Aws::Utils::Logging::LogSystemInterface; + +using ::Aws::Http::HttpMethodMapper::GetNameForHttpMethod; +using ::Aws::Auth::DefaultAWSCredentialsProviderChain; + +using ::tensorstore::internal_http::HttpRequest; +using ::tensorstore::internal_http::HttpResponse; +using ::tensorstore::internal_http::IssueRequestOptions; + +namespace tensorstore { +namespace internal_kvstore_s3 { + +namespace { + +static constexpr char kAwsTag[] = "AWS"; +static constexpr char kUserAgentHeader[] = "user-agent"; +static constexpr std::size_t k1MB = 1024 * 1024; + +// Context guarded by mutex +absl::Mutex context_mu_; +std::weak_ptr context_ ABSL_GUARDED_BY(context_mu_); + +/// Provides a custom Aws HttpClient. +/// Overrides Aws::HttpClient::MakeRequest to convert AWS HttpRequests +/// into tensorstore HttpRequests which are issued on the default tensorstore +/// HTTP transport. The returned tensorstore HttpResponse is +// converted into an AWS HttpResponse +class CustomHttpClient : public AwsHttpClient { +public: + struct RequestAndPayload { + HttpRequest request; + absl::Cord cord; + }; + + // Converts an Aws StandardHttpRequest to a tensorstore HttpRequest + RequestAndPayload FromAwsRequest(const std::shared_ptr & aws_request) const { + auto aws_headers = aws_request->GetHeaders(); + auto headers = std::vector{}; + for(auto &[name, value]: aws_headers) { + headers.emplace_back(absl::StrCat(name, ": ", value)); + } + std::string user_agent; + if(auto it = aws_headers.find(kUserAgentHeader); it != aws_headers.end()) { + user_agent = it->second; + } + + absl::Cord payload; + + // Get the underlying body as a Cord + if (auto body = aws_request->GetContentBody(); body) { + // Fast path, extract underlying Cord + if (auto cordstreambuf = dynamic_cast(body->rdbuf()); + cordstreambuf) { + payload = cordstreambuf->DetachCord(); + // TODO: remove this + } else { + // Slow path, copy characters off the stream into Cord + std::vector buffer(absl::CordBuffer::kDefaultLimit); + std::streampos original = body->tellg(); + while (body->read(buffer.data(), buffer.size()) || body->gcount() > 0) { + payload.Append(std::string_view(buffer.data(), body->gcount())); + } + + if(payload.size() > k1MB) { + ABSL_LOG(WARNING) << "Copied HttpRequest body of size " << payload.size() << " from iostream"; + } + + // Reset stream + body->clear(); + body->seekg(original); + } + } + + return RequestAndPayload{ + HttpRequest{ + GetNameForHttpMethod(aws_request->GetMethod()), + aws_request->GetURIString(true), + std::move(user_agent), + std::move(headers)}, + std::move(payload) + }; + } + + // Converts a tensorstore response to an Aws StandardHttpResponse + std::shared_ptr ToAwsResponse( + HttpResponse & ts_response, + const std::shared_ptr & aws_request) const { + + auto aws_response = Aws::MakeShared(kAwsTag, aws_request); + aws_response->SetResponseCode(static_cast(ts_response.status_code)); + for(auto &[name, value]: aws_response->GetHeaders()) { + aws_response->AddHeader(name, value); + } + + // Move Cord into the body stream + if(!ts_response.payload.empty()) { + auto & body = aws_response->GetResponseBody(); + if(auto cordstreambuf = dynamic_cast(body.rdbuf()); + cordstreambuf) { + // Fast path, directly assign the Cord + cordstreambuf->AssignCord(std::move(ts_response.payload)); + } else { + if(ts_response.payload.size() > k1MB) { + ABSL_LOG(WARNING) << "Copied HttpResponse body of size " << ts_response.payload.size() << " to iostream"; + } + + body << ts_response.payload; + } + } + + return aws_response; + } + + /// Overrides the SDK mechanism for issuing AWS HttpRequests + /// Converts AWS HttpRequests to their tensorstore requivalent, + /// which is issued on the default tensorstore transport. + /// The tensorstore response is converted into an AWS HttpResponse. + std::shared_ptr MakeRequest( + const std::shared_ptr & request, + AwsRateLimiterInterface* readLimiter = nullptr, + AwsRateLimiterInterface* writeLimiter = nullptr) const override { + // Issue the wrapped HttpRequest on a tensorstore executor + auto transport = ::tensorstore::internal_http::GetDefaultHttpTransport(); + auto [ts_request, payload] = FromAwsRequest(request); + ABSL_LOG(INFO) << ts_request << " " << payload; + auto future = transport->IssueRequest(ts_request, IssueRequestOptions(payload)); + // TODO: if possible use a continuation (future.ExecuteWhenReady) here + auto response = future.value(); + ABSL_LOG(INFO) << response; + return ToAwsResponse(response, request); + }; +}; + + +/// Custom factory overriding Aws::Http::DefaultHttpFatory +/// Generates a CustomHttpClient (which defers to tensorflow's curl library) +/// as well as overriding CreateHttpRequest to return +/// Standard Http Requests +class CustomHttpFactory : public Aws::Http::HttpClientFactory { +public: + std::shared_ptr CreateHttpClient( + const Aws::Client::ClientConfiguration & clientConfiguration) const override { + ABSL_LOG(INFO) << "Constructing custom HTTP Client"; + return Aws::MakeShared(kAwsTag); + }; + + std::shared_ptr CreateHttpRequest( + const Aws::String &uri, Aws::Http::HttpMethod method, + const Aws::IOStreamFactory &streamFactory) const override { + ABSL_LOG(INFO) << "Constructing custom HttpRequest"; + return CreateHttpRequest(Aws::Http::URI(uri), method, streamFactory); + } + + std::shared_ptr CreateHttpRequest( + const Aws::Http::URI& uri, Aws::Http::HttpMethod method, + const Aws::IOStreamFactory& streamFactory) const override + { + ABSL_LOG(INFO) << "Constructing custom HttpRequest"; + auto request = Aws::MakeShared(kAwsTag, uri, method); + request->SetResponseStreamFactory(streamFactory); + return request; + } +}; + + +/// Connect the AWS SDK's logging system to Abseil logging +class AWSLogSystem : public AwsLogSystemInterface { +public: + AWSLogSystem(AwsLogLevel log_level) : log_level_(log_level) {}; + AwsLogLevel GetLogLevel(void) const override { + return log_level_; + }; + + // Writes the stream to ProcessFormattedStatement. + void LogStream(AwsLogLevel log_level, const char* tag, + const Aws::OStringStream& messageStream) override { + LogMessage(log_level, messageStream.rdbuf()->str().c_str()); + } + + // Flushes the buffered messages if the logger supports buffering + void Flush() override { return; }; + + // Overridden, but prefer the safer LogStream + void Log(AwsLogLevel log_level, const char* tag, + const char* format, ...) override; + + // Overridden, but prefer the safer LogStream + void vaLog(AwsLogLevel log_level, const char* tag, + const char* format, va_list args) override; + +private: + void LogMessage(AwsLogLevel log_level, std::string_view message); + AwsLogLevel log_level_; +}; + +void AWSLogSystem::Log(AwsLogLevel log_level, const char* tag, + const char* format, ...) { + // https://www.open-std.org/JTC1/SC22/WG14/www/docs/n1570.pdf + // Section 7.16 + va_list args; + va_start(args, format); + vaLog(log_level, tag, format, args); + va_end(args); +} + +void AWSLogSystem::vaLog(AwsLogLevel log_level, const char* tag, + const char* format, va_list args) { + // https://www.open-std.org/JTC1/SC22/WG14/www/docs/n1570.pdf + // Section 7.16 + char buffer[256]; + vsnprintf(buffer, 256, format, args); + LogMessage(log_level, buffer); +} + +void AWSLogSystem::LogMessage(AwsLogLevel log_level, std::string_view message) { + switch(log_level) { + case AwsLogLevel::Info: + ABSL_LOG(INFO) << message; + break; + case AwsLogLevel::Warn: + ABSL_LOG(WARNING) << message; + break; + case AwsLogLevel::Error: + ABSL_LOG(ERROR) << message; + break; + case AwsLogLevel::Fatal: + ABSL_LOG(FATAL) << message; + break; + case AwsLogLevel::Trace: + case AwsLogLevel::Debug: + default: + ABSL_LOG(INFO) << message; + break; + } +} + +} // namespace + +Aws::IOStream * CordBackedResponseStreamFactory() { + return Aws::New( + kAwsTag, Aws::MakeUnique(kAwsTag)); +} + +// Initialise AWS API and Logging +std::shared_ptr GetAwsContext() { + absl::MutexLock lock(&context_mu_); + if(context_.use_count() > 0) { + ABSL_LOG(INFO) << "Returning existing AwsContext"; + return context_.lock(); + } + + auto options = Aws::SDKOptions{}; + // Customise HttpClientFactory + // Disable curl init/cleanup, tensorstore should control this + // Don't install the SIGPIPE handler + options.httpOptions.httpClientFactory_create_fn = []() { + return Aws::MakeShared(kAwsTag); + }; + options.httpOptions.initAndCleanupCurl = false; + options.httpOptions.installSigPipeHandler = false; + + // Install AWS -> Abseil Logging Translator + //auto level = AwsLogLevel::Debug; + auto level = AwsLogLevel::Info; + options.loggingOptions.logLevel = level; + options.loggingOptions.logger_create_fn = [level=level]() { + return Aws::MakeShared(kAwsTag, level); + }; + + ABSL_LOG(INFO) << "Initialising AWS SDK API"; + Aws::InitAPI(options); + ABSL_LOG(INFO) << "AWS SDK API Initialised"; + + auto provider = Aws::MakeShared(kAwsTag); + + auto ctx = std::shared_ptr( + new AwsContext{ + std::move(options), + std::move(provider)}, + [](AwsContext * ctx) { + absl::MutexLock lock(&context_mu_); + ABSL_LOG(INFO) << "Shutting down AWS SDK API"; + Aws::ShutdownAPI(ctx->options); + ABSL_LOG(INFO) << "AWS SDK API Shutdown"; + delete ctx; + }); + context_ = ctx; + return ctx; +} + +} // namespace internal_kvstore_s3 +} // namespace tensorstore diff --git a/tensorstore/kvstore/s3_sdk/s3_context.h b/tensorstore/kvstore/s3_sdk/s3_context.h new file mode 100644 index 000000000..968a73ecc --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/s3_context.h @@ -0,0 +1,41 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TENSORSTORE_KVSTORE_S3_S3_CONTEXT_H_ +#define TENSORSTORE_KVSTORE_S3_S3_CONTEXT_H_ + +#include + +#include +#include + + +namespace tensorstore { +namespace internal_kvstore_s3 { + +struct AwsContext { + Aws::SDKOptions options; + std::shared_ptr cred_provider_; +}; + +// Initialise AWS API and Logging +std::shared_ptr GetAwsContext(); + +// Return an IOStream backed by a Cord +Aws::IOStream * CordBackedResponseStreamFactory(); + +} // namespace internal_kvstore_s3 +} // neamespace tensorstore + +#endif // TENSORSTORE_KVSTORE_S3_S3_CONTEXT_H_ \ No newline at end of file diff --git a/tensorstore/kvstore/s3_sdk/s3_context_test.cc b/tensorstore/kvstore/s3_sdk/s3_context_test.cc new file mode 100644 index 000000000..a776aae63 --- /dev/null +++ b/tensorstore/kvstore/s3_sdk/s3_context_test.cc @@ -0,0 +1,99 @@ +// Copyright 2024 The TensorStore Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "absl/log/absl_log.h" + +#include +#include +#include + +#include +#include +#include + +#include "tensorstore/kvstore/s3_sdk/s3_context.h" +#include "tensorstore/internal/thread/thread_pool.h" +#include "tensorstore/util/executor.h" + +#include +#include + +using ::tensorstore::internal_kvstore_s3::GetAwsContext; +using ::tensorstore::internal_kvstore_s3::AwsContext; + +namespace { + +static constexpr char kAWSTag[] = "AWS"; + +TEST(S3ContextTest, Basic) { + auto ctx = GetAwsContext(); + EXPECT_EQ(ctx.use_count(), 1); + std::weak_ptr wp = ctx; + EXPECT_EQ(wp.use_count(), 1); + + auto ctx2 = GetAwsContext(); + EXPECT_EQ(ctx, ctx2); + EXPECT_EQ(wp.use_count(), 2); + + // sanity check basic credential retrieval + //auto creds = ctx->cred_provider_->GetAWSCredentials(); + + ctx.reset(); + ctx2.reset(); + + EXPECT_EQ(wp.use_count(), 0); + EXPECT_EQ(wp.lock(), nullptr); + + ctx = GetAwsContext(); +} + +TEST(S3ContextTest, Endpoint) { + EXPECT_EQ(Aws::S3::S3Endpoint::ForRegion("us-east-2", false, false), "s3.us-east-2.amazonaws.com"); +} + +TEST(S3ContextTest, Client) { + // Offload AWS Client tasks onto a Tensorstore executor + class TensorStoreExecutor : public Aws::Utils::Threading::Executor { + public: + TensorStoreExecutor(): executor_(::tensorstore::internal::DetachedThreadPool(4)) {} + protected: + bool SubmitToThread(std::function && fn) override { + ::tensorstore::WithExecutor(executor_, std::move(fn))(); + return true; + } + + private: + ::tensorstore::Executor executor_; + }; + + auto ctx = GetAwsContext(); + auto cfg = Aws::Client::ClientConfiguration(); + // Override the default client executor + cfg.executor = Aws::MakeShared(kAWSTag); + cfg.executor->Submit([msg = "Submission seems to work"] { ABSL_LOG(INFO) << msg; }); + auto client = Aws::S3::S3Client(cfg); + auto head_bucket = Aws::S3::Model::HeadBucketRequest().WithBucket("ratt-public-data"); + auto outcome = client.HeadBucket(head_bucket); + if(!outcome.IsSuccess()) { + auto & err = outcome.GetError(); + std::cerr << "Error: " << err.GetExceptionName() << ": " << err.GetMessage() << std::endl; + } else { + std::cout << "Success" << std::endl; + } +} + +} // namespace { \ No newline at end of file diff --git a/third_party/com_github_aws_c_auth/aws_c_auth.BUILD.bazel b/third_party/com_github_aws_c_auth/aws_c_auth.BUILD.bazel new file mode 100644 index 000000000..e1fb0fde2 --- /dev/null +++ b/third_party/com_github_aws_c_auth/aws_c_auth.BUILD.bazel @@ -0,0 +1,21 @@ +# Description: +# AWS C Auth + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_auth", + srcs = glob([ + "include/aws/auth/**/*.h", + "source/*.c", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_http//:aws_c_http", + "@com_github_aws_c_sdkutils//:aws_c_sdkutils", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_auth/workspace.bzl b/third_party/com_github_aws_c_auth/workspace.bzl new file mode 100644 index 000000000..b9e2f0604 --- /dev/null +++ b/third_party/com_github_aws_c_auth/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_auth", + sha256 = "f249a12a6ac319e929c005fb7efd5534c83d3af3a3a53722626ff60a494054bb", + strip_prefix = "aws-c-auth-0.7.22", + urls = [ + "https://github.com/awslabs/aws-c-auth/archive/refs/tags/v0.7.22.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_auth/aws_c_auth.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_auth/system.BUILD.bazel"), + cmake_name = "aws_c_auth", + cmake_target_mapping = { + "@com_github_aws_c_auth//:aws_c_auth": "aws_c_auth::aws_c_auth", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_cal/aws_c_cal.BUILD.bazel b/third_party/com_github_aws_c_cal/aws_c_cal.BUILD.bazel new file mode 100644 index 000000000..e1b5d8f26 --- /dev/null +++ b/third_party/com_github_aws_c_cal/aws_c_cal.BUILD.bazel @@ -0,0 +1,30 @@ +# Description: +# AWS s2n tls + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "aws_c_cal", + srcs = glob([ + "include/aws/cal/*.h", + "include/aws/cal/private/*.h", + "source/*.c" + ]) + select({ + "@platforms//os:windows": glob([ + "source/windows/*.c", + ]), + "@platforms//os:linux": glob([ + "source/unix/*.c", + ]), + "@platforms//os:osx": glob([ + "source/darwin/*.c", + ]) + }), + includes = ["include"], + deps = [ + "@com_github_aws_c_common//:aws_c_common", + "@com_google_boringssl//:crypto" + ] +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_cal/workspace.bzl b/third_party/com_github_aws_c_cal/workspace.bzl new file mode 100644 index 000000000..14353a4ff --- /dev/null +++ b/third_party/com_github_aws_c_cal/workspace.bzl @@ -0,0 +1,35 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_cal", + sha256 = "9c51afbece6aa7a4a3e40b99c242884c1744d7f949a3f720cea41d247ac2d06a", + strip_prefix = "aws-c-cal-0.7.0", + urls = [ + "https://github.com/awslabs/aws-c-cal/archive/refs/tags/v0.7.0.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_cal/aws_c_cal.BUILD.bazel"), + #system_build_file = Label("//third_party:com_github_aws_c_cal/system.BUILD.bazel"), + cmake_name = "aws_c_cal", + cmake_target_mapping = { + "@com_github_aws_c_cal//:aws_c_cal": "aws_c_cal::aws_c_cal", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_common/aws_c_common.BUILD.bazel b/third_party/com_github_aws_c_common/aws_c_common.BUILD.bazel new file mode 100644 index 000000000..f5e509f87 --- /dev/null +++ b/third_party/com_github_aws_c_common/aws_c_common.BUILD.bazel @@ -0,0 +1,96 @@ +# Description: +# AWS C Common + +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +write_file( + name = "write_config_h", + out = "include/aws/common/config.h", + newline = "auto", + + content = [ + "#ifndef AWS_COMMON_CONFIG_H", + "#define AWS_COMMON_CONFIG_H", + "", + "#define AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS", + "#define AWS_HAVE_GCC_INLINE_ASM", + "#define AWS_HAVE_POSIX_LARGE_FILE_SUPPORT", + ] + select({ + "@platforms//os:linux": [ + "#define AWS_HAVE_EXECINFO", + "#define AWS_HAVE_LINUX_IF_LINK_H", + ], + "@platforms//os:windows": [ + "#define AWS_HAVE_WINAPI_DESKTOP", + ], + }) + [ + "#undef AWS_HAVE_WINAPI_DESKTOP", + # TODO: improve with logic from AwsSIMD.cmake + # but this strictly requires configure style tests... + "#undef AWS_USE_CPU_EXTENSIONS", + "#undef AWS_HAVE_MSVC_INTRINSICS_X64", + "#undef AWS_HAVE_AVX2_INTRINSICS", + "#undef AWS_HAVE_AVX512_INTRINSICS", + "#undef AWS_HAVE_MM256_EXTRACT_EPI64", + "#undef AWS_HAVE_CLMUL", + "#undef AWS_HAVE_ARM32_CRC", + "#undef AWS_HAVE_ARMv8_1", + "#undef AWS_ARCH_ARM64", + "#undef AWS_ARCH_INTEL", + "#undef AWS_ARCH_INTEL_X64", + "", + "#endif" + ] +) + +cc_library( + name = "aws_c_common", + srcs = glob([ + "include/aws/common/*.h", + "include/aws/common/external/*.h", + "include/aws/common/private/*.h", + "source/*.c", + "source/arch/generic/*.c", + "source/external/**/*.h", + "source/external/**/*.c", + ]) + select({ + "@platforms//os:windows": glob([ + "source/windows/*.c", + ]), + "@platforms//os:linux": glob([ + "source/linux/*.c", + "source/posix/*.c", + ]), + "@platforms//os:osx": glob([ + "source/posix/*.c", + ]) + }), + hdrs = glob([ + "include/aws/common/*.h", + "include/aws/common/private/*.h", + ]) + [ + ":write_config_h" + ], + defines = [ + # TODO: improve this with logic from AwsThreadAffinity.cmake + "AWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE", + # Disable macro tracing API + "INTEL_NO_ITTNOTIFY_API", + ], + includes = [ + "include", + "source/external", + "source/external/libcbor" + ], + textual_hdrs = glob([ + "include/**/*.inl", + ]), + deps = [], +) + diff --git a/third_party/com_github_aws_c_common/workspace.bzl b/third_party/com_github_aws_c_common/workspace.bzl new file mode 100644 index 000000000..6706bda5a --- /dev/null +++ b/third_party/com_github_aws_c_common/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_common", + sha256 = "adf838daf6a60aa31268522105b03262d745f529bc981d3ac665424133d6f91b", + strip_prefix = "aws-c-common-0.9.23", + urls = [ + "https://github.com/awslabs/aws-c-common/archive/v0.9.23.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_common/aws_c_common.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_common/system.BUILD.bazel"), + cmake_name = "aws_c_common", + cmake_target_mapping = { + "@com_github_aws_c_common//:aws_c_common": "aws_c_common::aws_c_common", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_compression/aws_c_compression.BUILD.bazel b/third_party/com_github_aws_c_compression/aws_c_compression.BUILD.bazel new file mode 100644 index 000000000..7095a3a90 --- /dev/null +++ b/third_party/com_github_aws_c_compression/aws_c_compression.BUILD.bazel @@ -0,0 +1,20 @@ +# Description: +# AWS C Compression + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_compression", + srcs = glob([ + "include/aws/compression/**/*.h", + "source/*.c", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_common//:aws_c_common", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_compression/workspace.bzl b/third_party/com_github_aws_c_compression/workspace.bzl new file mode 100644 index 000000000..a6330ee37 --- /dev/null +++ b/third_party/com_github_aws_c_compression/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_compression", + sha256 = "517c361f3b7fffca08efd5ad251a20489794f056eab0dfffacc6d5b341df8e86", + strip_prefix = "aws-c-compression-0.2.18", + urls = [ + "https://github.com/awslabs/aws-c-compression/archive/v0.2.18.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_compression/aws_c_compression.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_compression/system.BUILD.bazel"), + cmake_name = "aws_c_compression", + cmake_target_mapping = { + "@com_github_aws_c_compression//:aws_c_compression": "aws_c_compression::aws_c_compression", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_event_stream/aws_c_event_stream.BUILD.bazel b/third_party/com_github_aws_c_event_stream/aws_c_event_stream.BUILD.bazel new file mode 100644 index 000000000..dfa376839 --- /dev/null +++ b/third_party/com_github_aws_c_event_stream/aws_c_event_stream.BUILD.bazel @@ -0,0 +1,26 @@ +# Description: +# AWS C Event Stream + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_event_stream", + srcs = glob([ + "include/**/*.h", + "source/**/*.c", + ]), + hdrs = [], + defines = [], + includes = [ + "include", + ], + deps = [ + "@com_github_aws_c_common//:aws_c_common", + "@com_github_aws_checksums//:aws_checksums", + "@com_github_aws_c_io//:aws_c_io", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_event_stream/workspace.bzl b/third_party/com_github_aws_c_event_stream/workspace.bzl new file mode 100644 index 000000000..917d432a5 --- /dev/null +++ b/third_party/com_github_aws_c_event_stream/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_event_stream", + sha256 = "c98b8fa05c2ca10aacfce7327b92a84669c2da95ccb8e7d7b3e3285fcec8beee", + strip_prefix = "aws-c-event-stream-0.4.2", + urls = [ + "https://github.com/awslabs/aws-c-event-stream/archive/v0.4.2.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_event_stream/aws_c_event_stream.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_event_stream/system.BUILD.bazel"), + cmake_name = "aws_c_event_stream", + cmake_target_mapping = { + "@com_github_aws_c_event_stream//:aws_c_event_stream": "aws_c_event_stream::aws_c_event_stream", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_http/aws_c_http.BUILD.bazel b/third_party/com_github_aws_c_http/aws_c_http.BUILD.bazel new file mode 100644 index 000000000..81fa1d460 --- /dev/null +++ b/third_party/com_github_aws_c_http/aws_c_http.BUILD.bazel @@ -0,0 +1,24 @@ +# Description: +# AWS C HTTP + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_http", + srcs = glob([ + "include/aws/http/**/*.h", + "source/*.c", + ]), + textual_hdrs = glob([ + "include/aws/http/**/*.def", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_io//:aws_c_io", + "@com_github_aws_c_compression//:aws_c_compression", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_http/workspace.bzl b/third_party/com_github_aws_c_http/workspace.bzl new file mode 100644 index 000000000..bbf05f3b0 --- /dev/null +++ b/third_party/com_github_aws_c_http/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_http", + sha256 = "a76ba75e59e1ac169df3ec00c0d1c453db1a4db85ee8acd3282a85ee63d6b31c", + strip_prefix = "aws-c-http-0.8.2", + urls = [ + "https://github.com/awslabs/aws-c-http/archive/refs/tags/v0.8.2.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_http/aws_c_http.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_http/system.BUILD.bazel"), + cmake_name = "aws_c_http", + cmake_target_mapping = { + "@com_github_aws_c_http//:aws_c_http": "aws_c_http::aws_c_http", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_io/aws_c_io.BUILD.bazel b/third_party/com_github_aws_c_io/aws_c_io.BUILD.bazel new file mode 100644 index 000000000..5f2c1e176 --- /dev/null +++ b/third_party/com_github_aws_c_io/aws_c_io.BUILD.bazel @@ -0,0 +1,45 @@ +# Description: +# AWS s2n tls + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "aws_c_io", + srcs = glob([ + "include/aws/io/**/*.h", + "source/*.h", + "source/*.c", + "source/pkcs11/**/*.h", + "source/pkcs11/**/*.c", + "source/s2n/*.h", + "source/s2n/*.c", + ]) + select({ + "@platforms//os:windows": glob([ + "source/windows/*.c", + ]), + "@platforms//os:linux": glob([ + "source/linux/*.c", + "source/posix/*.c", + ]), + "@platforms//os:osx": glob([ + "source/bsd/*.c", + "source/darwin/*.c", + "source/posix/*.c", + ]) + }), + defines = ["USE_S2N"], +# defines = [] + select({ +# "@platforms//os:linux": ["BYO_CRYPTO"], +# "//conditions:default": ["USE_S2N"], +# }), + + includes = ["include"], + deps = [ + "@com_github_aws_c_common//:aws_c_common", + "@com_github_aws_c_cal//:aws_c_cal", + #"@com_github_s2n_tls//:s2n_tls", + "@com_google_boringssl//:crypto, + ] +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_io/workspace.bzl b/third_party/com_github_aws_c_io/workspace.bzl new file mode 100644 index 000000000..e0c59a360 --- /dev/null +++ b/third_party/com_github_aws_c_io/workspace.bzl @@ -0,0 +1,35 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_io", + sha256 = "3a3b7236f70209ac12b5bafa7dd81b75cc68b691a0aa0686d6d3b7e4bbe5fbc9", + strip_prefix = "aws-c-io-0.14.9", + urls = [ + "https://github.com/awslabs/aws-c-io/archive/refs/tags/v0.14.9.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_io/aws_c_io.BUILD.bazel"), + #system_build_file = Label("//third_party:com_github_aws_c_io/system.BUILD.bazel"), + cmake_name = "aws_c_io", + cmake_target_mapping = { + "@com_github_aws_c_io//:aws_c_io": "aws_c_io::aws_c_io", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_iot/aws_c_iot.BUILD.bazel b/third_party/com_github_aws_c_iot/aws_c_iot.BUILD.bazel new file mode 100644 index 000000000..164e37d0e --- /dev/null +++ b/third_party/com_github_aws_c_iot/aws_c_iot.BUILD.bazel @@ -0,0 +1,30 @@ +# Description: +# AWS C IOT + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_iot", + srcs = glob([ + "include/aws/iotdevice/**/*.h", + "source/*.c", + ]) + select({ + "@platforms//os:linux": glob([ + "source/linux/*.c", + ]), + "@platforms//os:windows": glob([ + "source/windows/*.c", + ]), + "@platforms//os:osx": glob([ + "source/apple/*.c", + ]), + }), + includes = ["include"], + deps = [ + "@com_github_aws_c_mqtt//:aws_c_mqtt", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_iot/workspace.bzl b/third_party/com_github_aws_c_iot/workspace.bzl new file mode 100644 index 000000000..6889c80d2 --- /dev/null +++ b/third_party/com_github_aws_c_iot/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_iot", + sha256 = "6b9ae985d9b019304e86e49fc6da738ed5fff3b2778ed3617db551f1e033cadf", + strip_prefix = "aws-c-iot-0.1.21", + urls = [ + "https://github.com/awslabs/aws-c-iot/archive/refs/tags/v0.1.21.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_iot/aws_c_iot.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_iot/system.BUILD.bazel"), + cmake_name = "aws_c_iot", + cmake_target_mapping = { + "@com_github_aws_c_iot//:aws_c_iot": "aws_c_iot::aws_c_iot", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_mqtt/aws_c_mqtt.BUILD.bazel b/third_party/com_github_aws_c_mqtt/aws_c_mqtt.BUILD.bazel new file mode 100644 index 000000000..95e9b22ff --- /dev/null +++ b/third_party/com_github_aws_c_mqtt/aws_c_mqtt.BUILD.bazel @@ -0,0 +1,22 @@ +# Description: +# AWS C MQTT + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_mqtt", + srcs = glob([ + "include/aws/mqtt/**/*.h", + "source/*.c", + "source/v5/*.c", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_http//:aws_c_http", + "@com_github_aws_c_io//:aws_c_io", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_mqtt/workspace.bzl b/third_party/com_github_aws_c_mqtt/workspace.bzl new file mode 100644 index 000000000..e79fecc30 --- /dev/null +++ b/third_party/com_github_aws_c_mqtt/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_mqtt", + sha256 = "63c402b8b81b107e5c1b9b6ae0065bc025b6ad4347518bf30fbd958f999e037e", + strip_prefix = "aws-c-mqtt-0.10.1", + urls = [ + "https://github.com/awslabs/aws-c-mqtt/archive/refs/tags/v0.10.1.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_mqtt/aws_c_mqtt.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_mqtt/system.BUILD.bazel"), + cmake_name = "aws_c_mqtt", + cmake_target_mapping = { + "@com_github_aws_c_mqtt//:aws_c_mqtt": "aws_c_mqtt::aws_c_mqtt", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_s3/aws_c_s3.BUILD.bazel b/third_party/com_github_aws_c_s3/aws_c_s3.BUILD.bazel new file mode 100644 index 000000000..e6d237dc1 --- /dev/null +++ b/third_party/com_github_aws_c_s3/aws_c_s3.BUILD.bazel @@ -0,0 +1,22 @@ +# Description: +# AWS C S3 + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_s3", + srcs = glob([ + "include/aws/s3/**/*.h", + "source/*.c", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_auth//:aws_c_auth", + "@com_github_aws_c_http//:aws_c_http", + "@com_github_aws_checksums//:aws_checksums", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_s3/workspace.bzl b/third_party/com_github_aws_c_s3/workspace.bzl new file mode 100644 index 000000000..b21eb4631 --- /dev/null +++ b/third_party/com_github_aws_c_s3/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_s3", + sha256 = "28c03a19e52790cfa66e8d63c610734112edb36cc3c525712f18da4f0990a7b8", + strip_prefix = "aws-c-s3-0.5.10", + urls = [ + "https://github.com/awslabs/aws-c-s3/archive/refs/tags/v0.5.10.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_s3/aws_c_s3.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_s3/system.BUILD.bazel"), + cmake_name = "aws_c_s3", + cmake_target_mapping = { + "@com_github_aws_c_s3//:aws_c_s3": "aws_c_s3::aws_c_s3", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_c_sdkutils/aws_c_sdkutils.BUILD.bazel b/third_party/com_github_aws_c_sdkutils/aws_c_sdkutils.BUILD.bazel new file mode 100644 index 000000000..cbdf06ba2 --- /dev/null +++ b/third_party/com_github_aws_c_sdkutils/aws_c_sdkutils.BUILD.bazel @@ -0,0 +1,20 @@ +# Description: +# AWS C SDK Utils + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_c_sdkutils", + srcs = glob([ + "include/aws/sdkutils/**/*.h", + "source/**/*.c", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_common//:aws_c_common", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_c_sdkutils/workspace.bzl b/third_party/com_github_aws_c_sdkutils/workspace.bzl new file mode 100644 index 000000000..962ee1a32 --- /dev/null +++ b/third_party/com_github_aws_c_sdkutils/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_c_sdkutils", + sha256 = "4a818563d7c6636b5b245f5d22d4d7c804fa33fc4ea6976e9c296d272f4966d3", + strip_prefix = "aws-c-sdkutils-0.1.16", + urls = [ + "https://github.com/awslabs/aws-c-sdkutils/archive/refs/tags/v0.1.16.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_c_sdkutils/aws_c_sdkutils.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_c_sdkutils/system.BUILD.bazel"), + cmake_name = "aws_c_sdkutils", + cmake_target_mapping = { + "@com_github_aws_c_sdkutils//:aws_c_sdkutils": "aws_c_sdkutils::aws_c_sdkutils", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_checksums/aws_checksums.BUILD.bazel b/third_party/com_github_aws_checksums/aws_checksums.BUILD.bazel new file mode 100644 index 000000000..08ec386cb --- /dev/null +++ b/third_party/com_github_aws_checksums/aws_checksums.BUILD.bazel @@ -0,0 +1,24 @@ +# Description: +# AWS CheckSums + +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "aws_checksums", + srcs = glob([ + "include/aws/checksums/*.h", + "include/aws/checksums/private/*.h", + "source/*.c", + "source/generic/*.c", + ]), + includes = ["include"], + deps = [ + "@com_github_aws_c_common//:aws_c_common", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_checksums/workspace.bzl b/third_party/com_github_aws_checksums/workspace.bzl new file mode 100644 index 000000000..6d8cef89f --- /dev/null +++ b/third_party/com_github_aws_checksums/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_checksums", + sha256 = "bdba9d0a8b8330a89c6b8cbc00b9aa14f403d3449b37ff2e0d96d62a7301b2ee", + strip_prefix = "aws-checksums-0.1.18", + urls = [ + "https://github.com/awslabs/aws-checksums/archive/v0.1.18.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_checksums/aws_checksums.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_checksums/system.BUILD.bazel"), + cmake_name = "aws_checksums", + cmake_target_mapping = { + "@com_github_aws_checksums//:aws_checksums": "aws_checksums::aws_checksums", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_cpp_crt/aws_cpp_crt.BUILD.bazel b/third_party/com_github_aws_cpp_crt/aws_cpp_crt.BUILD.bazel new file mode 100644 index 000000000..327dc0430 --- /dev/null +++ b/third_party/com_github_aws_cpp_crt/aws_cpp_crt.BUILD.bazel @@ -0,0 +1,44 @@ +# Description: +# AWS CPP CRT + +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +write_file( + name = "generated_config_h", + out = "include/aws/crt/Config.h", + + content = [ + "#pragma once", + "#define AWS_CRT_CPP_VERSION \"0.27.1\"", + "#define AWS_CRT_CPP_VERSION_MAJOR 0", + "#define AWS_CRT_CPP_VERSION_MINOR 27", + "#define AWS_CRT_CPP_VERSION_PATCH 1", + "#define AWS_CRT_CPP_GIT_HASH \"635106906bf8dc0b877d962613f12f019f03e10a\"", + ] +) + +cc_library( + name = "aws_cpp_crt", + hdrs = [":generated_config_h"], + srcs = glob([ + "include/**/*.h", + "source/**/*.cpp", + ]), + includes = ["include"], + # https://docs.aws.amazon.com/sdkref/latest/guide/common-runtime.html#crt-dep + deps = [ + "@com_github_aws_c_event_stream//:aws_c_event_stream", + "@com_github_aws_c_auth//:aws_c_auth", + "@com_github_aws_c_cal//:aws_c_cal", + "@com_github_aws_c_iot//:aws_c_iot", + "@com_github_aws_c_mqtt//:aws_c_mqtt", + "@com_github_aws_c_s3//:aws_c_s3", + "@com_github_aws_checksums//:aws_checksums", + ], +) \ No newline at end of file diff --git a/third_party/com_github_aws_cpp_crt/workspace.bzl b/third_party/com_github_aws_cpp_crt/workspace.bzl new file mode 100644 index 000000000..84efc38e7 --- /dev/null +++ b/third_party/com_github_aws_cpp_crt/workspace.bzl @@ -0,0 +1,34 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_cpp_crt", + sha256 = "9689854b67b1a436b1cd31aae75eed8669fbb8d6240fe36684133f93e345f1ac", + strip_prefix = "aws-crt-cpp-0.27.1", + urls = [ + "https://github.com/awslabs/aws-crt-cpp/archive/refs/tags/v0.27.1.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_cpp_crt/aws_cpp_crt.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_cpp_crt/system.BUILD.bazel"), + cmake_name = "aws_cpp_crt", + cmake_target_mapping = { + "@com_github_aws_cpp_crt//:aws_cpp_crt": "aws_cpp_crt::aws_cpp_crt", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_aws_cpp_sdk/aws_cpp_sdk.BUILD.bazel b/third_party/com_github_aws_cpp_sdk/aws_cpp_sdk.BUILD.bazel new file mode 100644 index 000000000..6dd8cb9fe --- /dev/null +++ b/third_party/com_github_aws_cpp_sdk/aws_cpp_sdk.BUILD.bazel @@ -0,0 +1,188 @@ +# Description: +# AWS C++ SDK + +load("@bazel_skylib//rules:write_file.bzl", "write_file") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + +cc_library( + name = "smithy", + srcs = glob([ + "src/aws-cpp-sdk-core/include/smithy/**/*.h", + ]), + hdrs = glob([ + "src/aws-cpp-sdk-core/source/smithy/**/*.cpp", + ]), + includes = [ + "src/aws-cpp-sdk-core/include", + "src/aws-cpp-sdk-core/include/smithy/tracing/impl", + ], +) + + +write_file( + name = "write_SDKConfig_h", + out = "src/aws-cpp-sdk-core/include/aws/core/SDKConfig.h", + newline = "auto", + + content = [ + "#undef USE_AWS_MEMORY_MANAGEMENT", + "#define ENABLE_CURL_CLIENT 0", + ] + select({ + "@platforms//os:windows": [ + "#define WIN32_LEAN_AND_MEAN", + "#include ", + "#define PLATFORM_WINDOWS", + ], + "@platforms//os:macos": [ + "#define PLATFORM_MACOS" + ], + "//conditions:default": [ + "#define PLATFORM_LINUX", + ], + }), +) + +cc_library( + name = "core", + srcs = glob([ + "src/aws-cpp-sdk-core/source/*.cpp", + "src/aws-cpp-sdk-core/source/endpoint/*.cpp", + "src/aws-cpp-sdk-core/source/endpoint/internal/*.cpp", + "src/aws-cpp-sdk-core/source/http/*.cpp", + "src/aws-cpp-sdk-core/source/http/crt/*.cpp", + "src/aws-cpp-sdk-core/source/http/curl/*.cpp", + "src/aws-cpp-sdk-core/source/http/standard/*.cpp", + "src/aws-cpp-sdk-core/source/utils/*.cpp", + "src/aws-cpp-sdk-core/source/utils/memory/*.cpp", + "src/aws-cpp-sdk-core/source/utils/memory/stl/*.cpp", + "src/aws-cpp-sdk-core/source/utils/component-registry/*.cpp", + "src/aws-cpp-sdk-core/source/utils/crypto/*.cpp", + "src/aws-cpp-sdk-core/source/utils/crypto/factory/*.cpp", + "src/aws-cpp-sdk-core/source/utils/crypto/crt/*.cpp", + "src/aws-cpp-sdk-core/source/utils/logging/*.cpp", + "src/aws-cpp-sdk-core/source/utils/event/*.cpp", + "src/aws-cpp-sdk-core/source/utils/threading/*.cpp", + "src/aws-cpp-sdk-core/source/utils/stream/*.cpp", + "src/aws-cpp-sdk-core/source/utils/base64/*.cpp", + "src/aws-cpp-sdk-core/source/utils/json/*.cpp", + "src/aws-cpp-sdk-core/source/utils/xml/*.cpp", + "src/aws-cpp-sdk-core/source/external/*.cpp", + "src/aws-cpp-sdk-core/source/external/tinyxml2/*.cpp", + "src/aws-cpp-sdk-core/source/external/cjson/*.cpp", + "src/aws-cpp-sdk-core/source/platform/*.cpp", + "src/aws-cpp-sdk-core/source/config/*.cpp", + "src/aws-cpp-sdk-core/source/config/defaults/*.cpp", + "src/aws-cpp-sdk-core/source/internal/*.cpp", + "src/aws-cpp-sdk-core/source/client/*.cpp", + "src/aws-cpp-sdk-core/source/auth/*.cpp", + "src/aws-cpp-sdk-core/source/auth/signer/*.cpp", + "src/aws-cpp-sdk-core/source/auth/bearer-token-provider/*.cpp", + "src/aws-cpp-sdk-core/source/auth/signer-provider/*.cpp", + "src/aws-cpp-sdk-core/source/smithy/*.cpp", + "src/aws-cpp-sdk-core/source/smithy/tracing/*.cpp", + "src/aws-cpp-sdk-core/source/smithy/tracing/impl/*.cpp", + #"src/aws-cpp-sdk-core/source/smithy/tracing/impl/opentelemetry/*.cpp", + "src/aws-cpp-sdk-core/source/monitoring/*.cpp", + ]) + select({ + "@platforms//os:windows": glob([ + "src/aws-cpp-sdk-core/source/http/windows/*.cpp", + "src/aws-cpp-sdk-core/source/net/windows/*.cpp", + "src/aws-cpp-sdk-core/source/platform/windows/*.cpp", + ]), + "@platforms//os:linux": glob([ + "src/aws-cpp-sdk-core/source/net/linux-shared/*.cpp", + "src/aws-cpp-sdk-core/source/platform/linux-shared/*.cpp", + ]), + "//conditions:default": glob([ + "src/aws-cpp-sdk-core/source/http/standard/*.cpp", + "src/aws-cpp-sdk-core/source/net/*.cpp", + ]), + }), + hdrs = [ + ":write_SDKConfig_h", + ] + glob([ + "src/aws-cpp-sdk-core/include/aws/core/**/*.h", + ]), + defines = [], + includes = [ + "src/aws-cpp-sdk-core/include", + "src/aws-cpp-sdk-core/include/smithy/tracing/impl", + ], + linkopts = select({ + "@platforms//os:windows": [ + "-DEFAULTLIB:userenv.lib", + "-DEFAULTLIB:version.lib", + ], + "//conditions:default": [], + }), + deps = [ + "@com_github_aws_cpp_crt//:aws_cpp_crt", + "@se_curl//:curl", + ":smithy", + ], +) + +cc_library( + name = "s3", + srcs = glob([ + "generated/src/aws-cpp-sdk-s3/source/*.cpp", # AWS_S3_SOURCE + "generated/src/aws-cpp-sdk-s3/source/model/*.cpp", # AWS_S3_MODEL_SOURCE + ]), + hdrs = glob([ + "generated/src/aws-cpp-sdk-s3/include/aws/s3/*.h", # AWS_S3_HEADERS + "generated/src/aws-cpp-sdk-s3/include/aws/s3/model/*.h", # AWS_S3_MODEL_HEADERS + ]), + includes = [ + "generated/src/aws-cpp-sdk-s3/include", + ], + deps = [ + ":core", + ], +) + +cc_library( + name = "s3_encryption", + srcs = glob([ + "src/aws-cpp-sdk-s3-encryption/source/*.cpp", + "src/aws-cpp-sdk-s3-encryption/source/handlers/*.cpp", + "src/aws-cpp-sdk-s3-encryption/source/materials/*.cpp", + "src/aws-cpp-sdk-s3-encryption/source/modules/*.cpp", + ]), + hdrs = glob([ + "src/aws-cpp-sdk-s3-encryption/include/aws/s3/*.h", + "src/aws-cpp-sdk-s3-encryption/include/handlers/*.h", + "src/aws-cpp-sdk-s3-encryption/include/materials/*.h", + "src/aws-cpp-sdk-s3-encryption/include/modules/*.h", + ]), + includes = [ + "src/aws-cpp-sdk-s3-encryption/include", + ], + deps = [ + ":core", + ], +) + + +cc_library( + name = "transfer", + srcs = glob([ + "src/aws-cpp-sdk-transfer/source/transfer/*.cpp", # TRANSFER_SOURCE + ]), + hdrs = glob([ + "src/aws-cpp-sdk-transfer/include/aws/transfer/*.h", # TRANSFER_HEADERS + ]), + includes = [ + "src/aws-cpp-sdk-transfer/include", + ], + deps = [ + ":core", + ":s3", + ], +) + + diff --git a/third_party/com_github_aws_cpp_sdk/patches/update_sdk.diff b/third_party/com_github_aws_cpp_sdk/patches/update_sdk.diff new file mode 100644 index 000000000..cf493191e --- /dev/null +++ b/third_party/com_github_aws_cpp_sdk/patches/update_sdk.diff @@ -0,0 +1,35 @@ +diff --git a/aws-cpp-sdk-core/include/aws/core/external/tinyxml2/tinyxml2.h b/aws-cpp-sdk-core/include/aws/core/external/tinyxml2/tinyxml2.h +index 3721ed1..73ac4a3 100644 +--- a/aws-cpp-sdk-core/include/aws/core/external/tinyxml2/tinyxml2.h ++++ b/aws-cpp-sdk-core/include/aws/core/external/tinyxml2/tinyxml2.h +@@ -80,7 +80,7 @@ This file has been modified from its original version by Amazon: + #endif // AWS_CORE_EXPORTS + #endif // USE_IMPORT_EXPORT + #elif __GNUC__ >= 4 +- #define TINYXML2_LIB __attribute__((visibility("default"))) ++ #define TINYXML2_LIB + #endif // _WIN32 + + #ifndef TINYXML2_LIB +diff --git a/aws-cpp-sdk-core/source/client/AWSClient.cpp b/aws-cpp-sdk-core/source/client/AWSClient.cpp +index 4b2a38b..5198448 100644 +--- a/aws-cpp-sdk-core/source/client/AWSClient.cpp ++++ b/aws-cpp-sdk-core/source/client/AWSClient.cpp +@@ -232,7 +232,7 @@ HttpResponseOutcome AWSClient::AttemptExhaustively(const Aws::Http::URI& uri, + const char* signerRegion = signerRegionOverride; + Aws::String regionFromResponse; + +- Aws::String invocationId = UUID::RandomUUID(); ++ Aws::String invocationId = Aws::Utils::UUID::RandomUUID(); + RequestInfo requestInfo; + requestInfo.attempt = 1; + requestInfo.maxAttempts = 0; +@@ -358,7 +358,7 @@ HttpResponseOutcome AWSClient::AttemptExhaustively(const Aws::Http::URI& uri, + const char* signerRegion = signerRegionOverride; + Aws::String regionFromResponse; + +- Aws::String invocationId = UUID::RandomUUID(); ++ Aws::String invocationId = Aws::Utils::UUID::RandomUUID(); + RequestInfo requestInfo; + requestInfo.attempt = 1; + requestInfo.maxAttempts = 0; diff --git a/third_party/com_github_aws_cpp_sdk/workspace.bzl b/third_party/com_github_aws_cpp_sdk/workspace.bzl new file mode 100644 index 000000000..8f28d6777 --- /dev/null +++ b/third_party/com_github_aws_cpp_sdk/workspace.bzl @@ -0,0 +1,38 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_aws_cpp_sdk", + # patches = [ + # Label("//third_party:com_github_aws_cpp_sdk/patches/update_sdk.diff"), + # ], + # patch_args = ["-p1"], + sha256 = "c2a6977eb2a10066922b72e3876bccccea2902f87f9e86f978bcd3fb50a0adcc", + strip_prefix = "aws-sdk-cpp-1.11.361", + urls = [ + "https://github.com/aws/aws-sdk-cpp/archive/refs/tags/1.11.361.tar.gz", + ], + build_file = Label("//third_party:com_github_aws_cpp_sdk/aws_cpp_sdk.BUILD.bazel"), + system_build_file = Label("//third_party:com_github_aws_cpp_sdk/system.BUILD.bazel"), + cmake_name = "aws_cpp_sdk", + cmake_target_mapping = { + "@com_github_aws_cpp_sdk//:aws_cpp_sdk": "aws_cpp_sdk::aws_cpp_sdk", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/com_github_s2n_tls/s2n_tls.BUILD.bazel b/third_party/com_github_s2n_tls/s2n_tls.BUILD.bazel new file mode 100644 index 000000000..4233876d9 --- /dev/null +++ b/third_party/com_github_s2n_tls/s2n_tls.BUILD.bazel @@ -0,0 +1,25 @@ +# Description: +# AWS s2n tls + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +cc_library( + name = "s2n_tls", + srcs = glob([ + "crypto/*.h", + "crypto/*.c", + "error/*.h", + "error/*.c", + "stuffer/*.h", + "stuffer/*.c", + "tls/**/*.h", + "tls/**/*.c", + "utils/*.h", + "utils/*.c" + ]), + hdrs = glob(["api/**/*.h"]), + includes = ["api"], + deps = ["@com_google_boringssl//:crypto"] +) \ No newline at end of file diff --git a/third_party/com_github_s2n_tls/workspace.bzl b/third_party/com_github_s2n_tls/workspace.bzl new file mode 100644 index 000000000..ed874c9fa --- /dev/null +++ b/third_party/com_github_s2n_tls/workspace.bzl @@ -0,0 +1,35 @@ +# Copyright 2024 The TensorStore Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//third_party:repo.bzl", "third_party_http_archive") +load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") + + +def repo(): + maybe( + third_party_http_archive, + name = "com_github_s2n_tls", + sha256 = "84fdbaa894c722bf13cac87b8579f494c1c2d66de642e5e6104638fddea76ad9", + strip_prefix = "s2n-tls-1.4.16", + urls = [ + "https://github.com/aws/s2n-tls/archive/refs/tags/v1.4.16.tar.gz", + ], + build_file = Label("//third_party:com_github_s2n_tls/s2n_tls.BUILD.bazel"), + #system_build_file = Label("//third_party:com_github_s2n_tls/system.BUILD.bazel"), + cmake_name = "s2n_tls", + cmake_target_mapping = { + "@com_github_s2n_tls//:s2n_tls": "s2n_tls::s2n_tls", + }, + bazel_to_cmake = {}, + ) diff --git a/third_party/third_party.bzl b/third_party/third_party.bzl index 49ccf805a..8a0a27f47 100644 --- a/third_party/third_party.bzl +++ b/third_party/third_party.bzl @@ -1,10 +1,25 @@ load("//third_party:bazel_skylib/workspace.bzl", repo_bazel_skylib = "repo") load("//third_party:blake3/workspace.bzl", repo_blake3 = "repo") +load("//third_party:com_github_aws_c_auth/workspace.bzl", repo_com_github_aws_c_auth = "repo") +load("//third_party:com_github_aws_c_cal/workspace.bzl", repo_com_github_aws_c_cal = "repo") +load("//third_party:com_github_aws_c_common/workspace.bzl", repo_com_github_aws_c_common = "repo") +load("//third_party:com_github_aws_c_compression/workspace.bzl", repo_com_github_aws_c_compression = "repo") +load("//third_party:com_github_aws_c_event_stream/workspace.bzl", repo_com_github_aws_c_event_stream = "repo") +load("//third_party:com_github_aws_c_http/workspace.bzl", repo_com_github_aws_c_http = "repo") +load("//third_party:com_github_aws_c_io/workspace.bzl", repo_com_github_aws_c_io = "repo") +load("//third_party:com_github_aws_c_iot/workspace.bzl", repo_com_github_aws_c_iot = "repo") +load("//third_party:com_github_aws_c_mqtt/workspace.bzl", repo_com_github_aws_c_mqtt = "repo") +load("//third_party:com_github_aws_c_s3/workspace.bzl", repo_com_github_aws_c_s3 = "repo") +load("//third_party:com_github_aws_c_sdkutils/workspace.bzl", repo_com_github_aws_c_sdkutils = "repo") +load("//third_party:com_github_aws_checksums/workspace.bzl", repo_com_github_aws_checksums = "repo") +load("//third_party:com_github_aws_cpp_crt/workspace.bzl", repo_com_github_aws_cpp_crt = "repo") +load("//third_party:com_github_aws_cpp_sdk/workspace.bzl", repo_com_github_aws_cpp_sdk = "repo") load("//third_party:com_github_cares_cares/workspace.bzl", repo_com_github_cares_cares = "repo") load("//third_party:com_github_cncf_udpa/workspace.bzl", repo_com_github_cncf_udpa = "repo") load("//third_party:com_github_grpc_grpc/workspace.bzl", repo_com_github_grpc_grpc = "repo") load("//third_party:com_github_nlohmann_json/workspace.bzl", repo_com_github_nlohmann_json = "repo") load("//third_party:com_github_pybind_pybind11/workspace.bzl", repo_com_github_pybind_pybind11 = "repo") +load("//third_party:com_github_s2n_tls/workspace.bzl", repo_com_github_s2n_tls = "repo") load("//third_party:com_google_absl/workspace.bzl", repo_com_google_absl = "repo") load("//third_party:com_google_benchmark/workspace.bzl", repo_com_google_benchmark = "repo") load("//third_party:com_google_boringssl/workspace.bzl", repo_com_google_boringssl = "repo") @@ -46,11 +61,26 @@ load("//third_party:tinyxml2/workspace.bzl", repo_tinyxml2 = "repo") def third_party_dependencies(): repo_bazel_skylib() repo_blake3() + repo_com_github_aws_c_auth() + repo_com_github_aws_c_cal() + repo_com_github_aws_c_common() + repo_com_github_aws_c_compression() + repo_com_github_aws_c_event_stream() + repo_com_github_aws_c_http() + repo_com_github_aws_c_io() + repo_com_github_aws_c_iot() + repo_com_github_aws_c_mqtt() + repo_com_github_aws_c_s3() + repo_com_github_aws_c_sdkutils() + repo_com_github_aws_checksums() + repo_com_github_aws_cpp_crt() + repo_com_github_aws_cpp_sdk() repo_com_github_cares_cares() repo_com_github_cncf_udpa() repo_com_github_grpc_grpc() repo_com_github_nlohmann_json() repo_com_github_pybind_pybind11() + repo_com_github_s2n_tls() repo_com_google_absl() repo_com_google_benchmark() repo_com_google_boringssl()