@@ -64,10 +64,31 @@ const struct aws_byte_cursor g_user_agent_header_unknown = AWS_BYTE_CUR_INIT_FRO
6464
6565const uint32_t g_s3_max_num_upload_parts = 10000 ;
6666const size_t g_s3_min_upload_part_size = MB_TO_BYTES (5 );
67- const size_t g_streaming_buffer_size = MB_TO_BYTES ( 8 );
67+
6868const double g_default_throughput_target_gbps = 10.0 ;
69- /* TODO: disable this threshold until we have a better option for threshold */
70- const uint64_t g_streaming_object_size_threshold = UINT64_MAX ;
69+
70+ /**
71+ * Streaming buffer size selection based on experimental results on EBS:
72+ *
73+ * - Too small buffer sizes (e.g., 16KiB) impact disk read performance,
74+ * achieving only 6.73 Gbps throughput from EBS.
75+ * - Too large buffer sizes cause network connections to starve more easily
76+ * when disk reads cannot provide data fast enough.
77+ * - 1MiB buffer size provides optimal balance: sufficient disk read throughput
78+ * while maintaining reasonable retry rates due to connection starvation.
79+ */
80+ const size_t g_streaming_buffer_size = MB_TO_BYTES (1 );
81+
82+ /**
83+ * The streaming approach reduces memory consumption without introducing unexpected errors
84+ * or performance degradation.
85+ *
86+ * We start streaming for objects larger than 1TiB, with plans to lower this threshold in future iterations.
87+ *
88+ * The 1TiB threshold was chosen to minimize the blast radius of this behavioral change
89+ * while still providing meaningful memory usage improvements for large objects.
90+ */
91+ const uint64_t g_streaming_object_size_threshold = TB_TO_BYTES (1 );
7192
7293void copy_http_headers (const struct aws_http_headers * src , struct aws_http_headers * dest ) {
7394 AWS_PRECONDITION (src );
0 commit comments