Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 16 additions & 8 deletions plugins/out_s3/s3.c
Original file line number Diff line number Diff line change
Expand Up @@ -1336,11 +1336,11 @@ static int put_all_chunks(struct flb_s3 *ctx)
continue;
}

if (chunk->failures >= MAX_UPLOAD_ERRORS) {
if (chunk->failures >= ctx->ins->retry_limit) {
flb_plg_warn(ctx->ins,
"Chunk for tag %s failed to send %i times, "
"will not retry",
(char *) fsf->meta_buf, MAX_UPLOAD_ERRORS);
(char *) fsf->meta_buf, ctx->ins->retry_limit);
flb_fstore_file_inactive(ctx->fs, fsf);
continue;
}
Expand Down Expand Up @@ -1625,7 +1625,7 @@ static struct multipart_upload *get_upload(struct flb_s3 *ctx,
if (tmp_upload->upload_state == MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS) {
continue;
}
if (tmp_upload->upload_errors >= MAX_UPLOAD_ERRORS) {
if (tmp_upload->upload_errors >= ctx->ins->retry_limit) {
tmp_upload->upload_state = MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS;
flb_plg_error(ctx->ins, "Upload for %s has reached max upload errors",
tmp_upload->s3_key);
Expand Down Expand Up @@ -1871,7 +1871,7 @@ static void s3_upload_queue(struct flb_config *config, void *out_context)

/* If retry limit was reached, discard file and remove file from queue */
upload_contents->retry_counter++;
if (upload_contents->retry_counter >= MAX_UPLOAD_ERRORS) {
if (upload_contents->retry_counter >= ctx->ins->retry_limit) {
flb_plg_warn(ctx->ins, "Chunk file failed to send %d times, will not "
"retry", upload_contents->retry_counter);
s3_store_file_inactive(ctx, upload_contents->upload_file);
Expand Down Expand Up @@ -3272,6 +3272,14 @@ static void cb_s3_upload(struct flb_config *config, void *data)
if (ret != FLB_OK) {
flb_plg_error(ctx->ins, "Could not send chunk with tag %s",
(char *) fsf->meta_buf);
if(chunk->failures >= ctx->ins->retry_limit){
flb_plg_warn(ctx->ins,
"Chunk for tag %s failed to send %i times, "
"will not retry",
(char *) fsf->meta_buf, ctx->ins->retry_limit);
flb_fstore_file_inactive(ctx->fs, fsf);
continue;
}
}
}

Expand All @@ -3280,7 +3288,7 @@ static void cb_s3_upload(struct flb_config *config, void *data)
m_upload = mk_list_entry(head, struct multipart_upload, _head);
complete = FLB_FALSE;

if (m_upload->complete_errors >= MAX_UPLOAD_ERRORS) {
if (m_upload->complete_errors >= ctx->ins->retry_limit) {
flb_plg_error(ctx->ins,
"Upload for %s has reached max completion errors, "
"plugin will give up", m_upload->s3_key);
Expand Down Expand Up @@ -3789,10 +3797,10 @@ static void cb_s3_flush(struct flb_event_chunk *event_chunk,
m_upload_file, file_first_log_time);
}

/* Discard upload_file if it has failed to upload MAX_UPLOAD_ERRORS times */
if (upload_file != NULL && upload_file->failures >= MAX_UPLOAD_ERRORS) {
/* Discard upload_file if it has failed to upload retry_limit times */
if (upload_file != NULL && upload_file->failures >= ctx->ins->retry_limit) {
flb_plg_warn(ctx->ins, "File with tag %s failed to send %d times, will not "
"retry", event_chunk->tag, MAX_UPLOAD_ERRORS);
"retry", event_chunk->tag, ctx->ins->retry_limit);
s3_store_file_inactive(ctx, upload_file);
upload_file = NULL;
}
Expand Down
5 changes: 3 additions & 2 deletions plugins/out_s3/s3.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,9 @@
*
* The same is done for chunks, just to be safe, even though realistically
* I can't think of a reason why a chunk could become unsendable.
*
* The retry limit is now configurable via the retry_limit parameter.
*/
#define MAX_UPLOAD_ERRORS 5

struct upload_queue {
struct s3_file *upload_file;
Expand Down Expand Up @@ -96,7 +97,7 @@ struct multipart_upload {

struct mk_list _head;

/* see note for MAX_UPLOAD_ERRORS */
/* see note for retry_limit configuration */
int upload_errors;
int complete_errors;
};
Expand Down
Loading