Skip to content

Commit 5c7807c

Browse files
committed
add chain_future to send method
1 parent f0a57a6 commit 5c7807c

File tree

2 files changed

+15
-4
lines changed

2 files changed

+15
-4
lines changed

kafka/producer/kafka.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,7 @@ def _estimate_size_in_bytes(self, key, value, headers=[]):
538538
return LegacyRecordBatchBuilder.estimate_size_in_bytes(
539539
magic, self.config['compression_type'], key, value)
540540

541-
def send(self, topic, value=None, key=None, headers=None, partition=None, timestamp_ms=None):
541+
def send(self, topic, value=None, key=None, headers=None, partition=None, timestamp_ms=None, chain_future=None):
542542
"""Publish a message to a topic.
543543
544544
Arguments:
@@ -563,6 +563,7 @@ def send(self, topic, value=None, key=None, headers=None, partition=None, timest
563563
are tuples of str key and bytes value.
564564
timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970 UTC)
565565
to use as the message timestamp. Defaults to current time.
566+
chain_future (Future, optional): chained success and failure method
566567
567568
Returns:
568569
FutureRecordMetadata: resolves to RecordMetadata
@@ -603,7 +604,8 @@ def send(self, topic, value=None, key=None, headers=None, partition=None, timest
603604
result = self._accumulator.append(tp, timestamp_ms,
604605
key_bytes, value_bytes, headers,
605606
self.config['max_block_ms'],
606-
estimated_size=message_size)
607+
estimated_size=message_size,
608+
chain_future=chain_future)
607609
future, batch_is_full, new_batch_created = result
608610
if batch_is_full or new_batch_created:
609611
log.debug("Waking up the sender since %s is either full or"

kafka/producer/record_accumulator.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import time
88

99
import kafka.errors as Errors
10+
from kafka.future import Future
1011
from kafka.producer.buffer import SimpleBufferPool
1112
from kafka.producer.future import FutureRecordMetadata, FutureProduceResult
1213
from kafka.record.memory_records import MemoryRecordsBuilder
@@ -198,7 +199,7 @@ def __init__(self, **configs):
198199
self._drain_index = 0
199200

200201
def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms,
201-
estimated_size=0):
202+
estimated_size=0, chain_future=None):
202203
"""Add a record to the accumulator, return the append result.
203204
204205
The append result will contain the future metadata, and flag for
@@ -213,12 +214,14 @@ def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms,
213214
headers (List[Tuple[str, bytes]]): The header fields for the record
214215
max_time_to_block_ms (int): The maximum time in milliseconds to
215216
block for buffer memory to be available
216-
217+
chain_future (Future): chain future
217218
Returns:
218219
tuple: (future, batch_is_full, new_batch_created)
219220
"""
220221
assert isinstance(tp, TopicPartition), 'not TopicPartition'
221222
assert not self._closed, 'RecordAccumulator is closed'
223+
if chain_future is not None:
224+
assert isinstance(chain_future, Future), 'not Future'
222225
# We keep track of the number of appending thread to make sure we do
223226
# not miss batches in abortIncompleteBatches().
224227
self._appends_in_progress.increment()
@@ -235,6 +238,8 @@ def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms,
235238
last = dq[-1]
236239
future = last.try_append(timestamp_ms, key, value, headers)
237240
if future is not None:
241+
if chain_future:
242+
future.chain(chain_future)
238243
batch_is_full = len(dq) > 1 or last.records.is_full()
239244
return future, batch_is_full, False
240245

@@ -253,6 +258,8 @@ def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms,
253258
# Somebody else found us a batch, return the one we
254259
# waited for! Hopefully this doesn't happen often...
255260
self._free.deallocate(buf)
261+
if chain_future:
262+
future.chain(chain_future)
256263
batch_is_full = len(dq) > 1 or last.records.is_full()
257264
return future, batch_is_full, False
258265

@@ -269,6 +276,8 @@ def append(self, tp, timestamp_ms, key, value, headers, max_time_to_block_ms,
269276

270277
dq.append(batch)
271278
self._incomplete.add(batch)
279+
if chain_future:
280+
future.chain(chain_future)
272281
batch_is_full = len(dq) > 1 or batch.records.is_full()
273282
return future, batch_is_full, True
274283
finally:

0 commit comments

Comments
 (0)