@@ -380,7 +380,6 @@ class KafkaProducer(object):
380
380
}
381
381
382
382
def __init__ (self , ** configs ):
383
- log .debug ("Starting the Kafka producer" ) # trace
384
383
self .config = copy .copy (self .DEFAULT_CONFIG )
385
384
user_provided_configs = set (configs .keys ())
386
385
for key in self .config :
@@ -409,8 +408,10 @@ def __init__(self, **configs):
409
408
self .config ['api_version' ] = None
410
409
else :
411
410
self .config ['api_version' ] = tuple (map (int , deprecated .split ('.' )))
412
- log .warning ('use api_version=%s [tuple] -- "%s" as str is deprecated' ,
413
- str (self .config ['api_version' ]), deprecated )
411
+ log .warning ('%s: use api_version=%s [tuple] -- "%s" as str is deprecated' ,
412
+ self , str (self .config ['api_version' ]), deprecated )
413
+
414
+ log .debug ("%s: Starting Kafka producer" , self )
414
415
415
416
# Configure metrics
416
417
if self .config ['metrics_enabled' ]:
@@ -466,26 +467,26 @@ def __init__(self, **configs):
466
467
metadata = self ._metadata ,
467
468
)
468
469
if self ._transaction_manager .is_transactional ():
469
- log .info ("Instantiated a transactional producer." )
470
+ log .info ("%s: Instantiated a transactional producer." , self )
470
471
else :
471
- log .info ("Instantiated an idempotent producer." )
472
+ log .info ("%s: Instantiated an idempotent producer." , self )
472
473
473
474
if 'retries' not in user_provided_configs :
474
- log .info ("Overriding the default 'retries' config to 3 since the idempotent producer is enabled." )
475
+ log .info ("%s: Overriding the default 'retries' config to 3 since the idempotent producer is enabled." , self )
475
476
self .config ['retries' ] = 3
476
477
elif self .config ['retries' ] == 0 :
477
478
raise Errors .KafkaConfigurationError ("Must set 'retries' to non-zero when using the idempotent producer." )
478
479
479
480
if 'max_in_flight_requests_per_connection' not in user_provided_configs :
480
- log .info ("Overriding the default 'max_in_flight_requests_per_connection' to 1 since idempontence is enabled." )
481
+ log .info ("%s: Overriding the default 'max_in_flight_requests_per_connection' to 1 since idempontence is enabled." , self )
481
482
self .config ['max_in_flight_requests_per_connection' ] = 1
482
483
elif self .config ['max_in_flight_requests_per_connection' ] != 1 :
483
484
raise Errors .KafkaConfigurationError ("Must set 'max_in_flight_requests_per_connection' to 1 in order"
484
485
" to use the idempotent producer."
485
486
" Otherwise we cannot guarantee idempotence." )
486
487
487
488
if 'acks' not in user_provided_configs :
488
- log .info ("Overriding the default 'acks' config to 'all' since idempotence is enabled" )
489
+ log .info ("%s: Overriding the default 'acks' config to 'all' since idempotence is enabled" , self )
489
490
self .config ['acks' ] = - 1
490
491
elif self .config ['acks' ] != - 1 :
491
492
raise Errors .KafkaConfigurationError ("Must set 'acks' config to 'all' in order to use the idempotent"
@@ -509,7 +510,7 @@ def __init__(self, **configs):
509
510
510
511
self ._cleanup = self ._cleanup_factory ()
511
512
atexit .register (self ._cleanup )
512
- log .debug ("Kafka producer started" )
513
+ log .debug ("%s: Kafka producer started" , self )
513
514
514
515
def bootstrap_connected (self ):
515
516
"""Return True if the bootstrap is connected."""
@@ -564,7 +565,7 @@ def __getattr__(self, name):
564
565
self ._unregister_cleanup ()
565
566
566
567
if not hasattr (self , '_closed' ) or self ._closed :
567
- log .info ('Kafka producer closed' )
568
+ log .info ('%s: Kafka producer closed' , self )
568
569
return
569
570
if timeout is None :
570
571
# threading.TIMEOUT_MAX is available in Python3.3+
@@ -574,26 +575,26 @@ def __getattr__(self, name):
574
575
else :
575
576
assert timeout >= 0
576
577
577
- log .info ("Closing the Kafka producer with %s secs timeout." , timeout )
578
+ log .info ("%s: Closing the Kafka producer with %s secs timeout." , self , timeout )
578
579
self .flush (timeout )
579
580
invoked_from_callback = bool (threading .current_thread () is self ._sender )
580
581
if timeout > 0 :
581
582
if invoked_from_callback :
582
- log .warning ("Overriding close timeout %s secs to 0 in order to"
583
+ log .warning ("%s: Overriding close timeout %s secs to 0 in order to"
583
584
" prevent useless blocking due to self-join. This"
584
585
" means you have incorrectly invoked close with a"
585
586
" non-zero timeout from the producer call-back." ,
586
- timeout )
587
+ self , timeout )
587
588
else :
588
589
# Try to close gracefully.
589
590
if self ._sender is not None :
590
591
self ._sender .initiate_close ()
591
592
self ._sender .join (timeout )
592
593
593
594
if self ._sender is not None and self ._sender .is_alive ():
594
- log .info ("Proceeding to force close the producer since pending"
595
+ log .info ("%s: Proceeding to force close the producer since pending"
595
596
" requests could not be completed within timeout %s." ,
596
- timeout )
597
+ self , timeout )
597
598
self ._sender .force_close ()
598
599
599
600
if self ._metrics :
@@ -607,7 +608,7 @@ def __getattr__(self, name):
607
608
except AttributeError :
608
609
pass
609
610
self ._closed = True
610
- log .debug ("The Kafka producer has closed." )
611
+ log .debug ("%s: The Kafka producer has closed." , self )
611
612
612
613
def partitions_for (self , topic ):
613
614
"""Returns set of all known partitions for the topic."""
@@ -816,7 +817,7 @@ def send(self, topic, value=None, key=None, headers=None, partition=None, timest
816
817
self ._ensure_valid_record_size (message_size )
817
818
818
819
tp = TopicPartition (topic , partition )
819
- log .debug ("Sending (key=%r value=%r headers=%r) to %s" , key , value , headers , tp )
820
+ log .debug ("%s: Sending (key=%r value=%r headers=%r) to %s" , self , key , value , headers , tp )
820
821
821
822
if self ._transaction_manager and self ._transaction_manager .is_transactional ():
822
823
self ._transaction_manager .maybe_add_partition_to_transaction (tp )
@@ -825,16 +826,16 @@ def send(self, topic, value=None, key=None, headers=None, partition=None, timest
825
826
key_bytes , value_bytes , headers )
826
827
future , batch_is_full , new_batch_created = result
827
828
if batch_is_full or new_batch_created :
828
- log .debug ("Waking up the sender since %s is either full or"
829
- " getting a new batch" , tp )
829
+ log .debug ("%s: Waking up the sender since %s is either full or"
830
+ " getting a new batch" , self , tp )
830
831
self ._sender .wakeup ()
831
832
832
833
return future
833
834
# handling exceptions and record the errors;
834
835
# for API exceptions return them in the future,
835
836
# for other exceptions raise directly
836
837
except Errors .BrokerResponseError as e :
837
- log .error ("Exception occurred during message send: %s" , e )
838
+ log .error ("%s: Exception occurred during message send: %s" , self , e )
838
839
return FutureRecordMetadata (
839
840
FutureProduceResult (TopicPartition (topic , partition )),
840
841
- 1 , None , None ,
@@ -865,7 +866,7 @@ def flush(self, timeout=None):
865
866
KafkaTimeoutError: failure to flush buffered records within the
866
867
provided timeout
867
868
"""
868
- log .debug ("Flushing accumulated records in producer." ) # trace
869
+ log .debug ("%s: Flushing accumulated records in producer." , self )
869
870
self ._accumulator .begin_flush ()
870
871
self ._sender .wakeup ()
871
872
self ._accumulator .await_flush_completion (timeout = timeout )
@@ -911,7 +912,7 @@ def _wait_on_metadata(self, topic, max_wait):
911
912
if not metadata_event :
912
913
metadata_event = threading .Event ()
913
914
914
- log .debug ("Requesting metadata update for topic %s" , topic )
915
+ log .debug ("%s: Requesting metadata update for topic %s" , self , topic )
915
916
916
917
metadata_event .clear ()
917
918
future = self ._metadata .request_update ()
@@ -925,7 +926,7 @@ def _wait_on_metadata(self, topic, max_wait):
925
926
raise Errors .TopicAuthorizationFailedError (set ([topic ]))
926
927
else :
927
928
elapsed = time .time () - begin
928
- log .debug ("_wait_on_metadata woke after %s secs." , elapsed )
929
+ log .debug ("%s: _wait_on_metadata woke after %s secs." , self , elapsed )
929
930
930
931
def _serialize (self , f , topic , data ):
931
932
if not f :
@@ -972,3 +973,6 @@ def metrics(self, raw=False):
972
973
metrics [k .group ][k .name ] = {}
973
974
metrics [k .group ][k .name ] = v .value ()
974
975
return metrics
976
+
977
+ def __str__ (self ):
978
+ return "<KafkaProducer client_id=%s transactional_id=%s>" % (self .config ['client_id' ], self .config ['transactional_id' ])
0 commit comments