diff --git a/newrelic/hooks/mlmodel_gemini.py b/newrelic/hooks/mlmodel_gemini.py index 8aeb1355d..6f61c1112 100644 --- a/newrelic/hooks/mlmodel_gemini.py +++ b/newrelic/hooks/mlmodel_gemini.py @@ -175,20 +175,24 @@ def _record_embedding_success(transaction, embedding_id, linking_metadata, kwarg embedding_content = str(embedding_content) request_model = kwargs.get("model") + embedding_token_count = ( + settings.ai_monitoring.llm_token_count_callback(request_model, embedding_content) + if settings.ai_monitoring.llm_token_count_callback + else None + ) + full_embedding_response_dict = { "id": embedding_id, "span_id": span_id, "trace_id": trace_id, - "token_count": ( - settings.ai_monitoring.llm_token_count_callback(request_model, embedding_content) - if settings.ai_monitoring.llm_token_count_callback - else None - ), "request.model": request_model, "duration": ft.duration * 1000, "vendor": "gemini", "ingest_source": "Python", } + if embedding_token_count: + full_embedding_response_dict["response.usage.total_tokens"] = embedding_token_count + if settings.ai_monitoring.record_content.enabled: full_embedding_response_dict["input"] = embedding_content @@ -300,15 +304,13 @@ def _record_generation_error(transaction, linking_metadata, completion_id, kwarg "Unable to parse input message to Gemini LLM. Message content and role will be omitted from " "corresponding LlmChatCompletionMessage event. " ) + # Extract the input message content and role from the input message if it exists + input_message_content, input_role = _parse_input_message(input_message) if input_message else (None, None) - generation_config = kwargs.get("config") - if generation_config: - request_temperature = getattr(generation_config, "temperature", None) - request_max_tokens = getattr(generation_config, "max_output_tokens", None) - else: - request_temperature = None - request_max_tokens = None + # Extract data from generation config object + request_temperature, request_max_tokens = _extract_generation_config(kwargs) + # Prepare error attributes notice_error_attributes = { "http.statusCode": getattr(exc, "code", None), "error.message": getattr(exc, "message", None), @@ -348,15 +350,17 @@ def _record_generation_error(transaction, linking_metadata, completion_id, kwarg create_chat_completion_message_event( transaction, - input_message, + input_message_content, + input_role, completion_id, span_id, trace_id, # Passing the request model as the response model here since we do not have access to a response model request_model, - request_model, llm_metadata, output_message_list, + # We do not record token counts in error cases, so set all_token_counts to True so the pipeline tokenizer does not run + all_token_counts=True, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) @@ -377,6 +381,7 @@ def _handle_generation_success(transaction, linking_metadata, completion_id, kwa def _record_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, response): + settings = transaction.settings or global_settings() span_id = linking_metadata.get("span.id") trace_id = linking_metadata.get("trace.id") try: @@ -385,12 +390,14 @@ def _record_generation_success(transaction, linking_metadata, completion_id, kwa # finish_reason is an enum, so grab just the stringified value from it to report finish_reason = response.get("candidates")[0].get("finish_reason").value output_message_list = [response.get("candidates")[0].get("content")] + token_usage = response.get("usage_metadata") or {} else: # Set all values to NoneTypes since we cannot access them through kwargs or another method that doesn't # require the response object response_model = None output_message_list = [] finish_reason = None + token_usage = {} request_model = kwargs.get("model") @@ -412,13 +419,44 @@ def _record_generation_success(transaction, linking_metadata, completion_id, kwa "corresponding LlmChatCompletionMessage event. " ) - generation_config = kwargs.get("config") - if generation_config: - request_temperature = getattr(generation_config, "temperature", None) - request_max_tokens = getattr(generation_config, "max_output_tokens", None) + input_message_content, input_role = _parse_input_message(input_message) if input_message else (None, None) + + # Parse output message content + # This list should have a length of 1 to represent the output message + # Parse the message text out to pass to any registered token counting callback + output_message_content = output_message_list[0].get("parts")[0].get("text") if output_message_list else None + + # Extract token counts from response object + if token_usage: + response_prompt_tokens = token_usage.get("prompt_token_count") + response_completion_tokens = token_usage.get("candidates_token_count") + response_total_tokens = token_usage.get("total_token_count") + else: - request_temperature = None - request_max_tokens = None + response_prompt_tokens = None + response_completion_tokens = None + response_total_tokens = None + + # Calculate token counts by checking if a callback is registered and if we have the necessary content to pass + # to it. If not, then we use the token counts provided in the response object + prompt_tokens = ( + settings.ai_monitoring.llm_token_count_callback(request_model, input_message_content) + if settings.ai_monitoring.llm_token_count_callback and input_message_content + else response_prompt_tokens + ) + completion_tokens = ( + settings.ai_monitoring.llm_token_count_callback(response_model, output_message_content) + if settings.ai_monitoring.llm_token_count_callback and output_message_content + else response_completion_tokens + ) + total_tokens = ( + prompt_tokens + completion_tokens if all([prompt_tokens, completion_tokens]) else response_total_tokens + ) + + all_token_counts = bool(prompt_tokens and completion_tokens and total_tokens) + + # Extract generation config + request_temperature, request_max_tokens = _extract_generation_config(kwargs) full_chat_completion_summary_dict = { "id": completion_id, @@ -438,66 +476,78 @@ def _record_generation_success(transaction, linking_metadata, completion_id, kwa "response.number_of_messages": 1 + len(output_message_list), } + if all_token_counts: + full_chat_completion_summary_dict["response.usage.prompt_tokens"] = prompt_tokens + full_chat_completion_summary_dict["response.usage.completion_tokens"] = completion_tokens + full_chat_completion_summary_dict["response.usage.total_tokens"] = total_tokens + llm_metadata = _get_llm_attributes(transaction) full_chat_completion_summary_dict.update(llm_metadata) transaction.record_custom_event("LlmChatCompletionSummary", full_chat_completion_summary_dict) create_chat_completion_message_event( transaction, - input_message, + input_message_content, + input_role, completion_id, span_id, trace_id, response_model, - request_model, llm_metadata, output_message_list, + all_token_counts, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) +def _parse_input_message(input_message): + # The input_message will be a string if generate_content was called directly. In this case, we don't have + # access to the role, so we default to user since this was an input message + if isinstance(input_message, str): + return input_message, "user" + # The input_message will be a Google Content type if send_message was called, so we parse out the message + # text and role (which should be "user") + elif isinstance(input_message, google.genai.types.Content): + return input_message.parts[0].text, input_message.role + else: + return None, None + + +def _extract_generation_config(kwargs): + generation_config = kwargs.get("config") + if generation_config: + request_temperature = getattr(generation_config, "temperature", None) + request_max_tokens = getattr(generation_config, "max_output_tokens", None) + else: + request_temperature = None + request_max_tokens = None + + return request_temperature, request_max_tokens + + def create_chat_completion_message_event( transaction, - input_message, + input_message_content, + input_role, chat_completion_id, span_id, trace_id, response_model, - request_model, llm_metadata, output_message_list, + all_token_counts, ): try: settings = transaction.settings or global_settings() - if input_message: - # The input_message will be a string if generate_content was called directly. In this case, we don't have - # access to the role, so we default to user since this was an input message - if isinstance(input_message, str): - input_message_content = input_message - input_role = "user" - # The input_message will be a Google Content type if send_message was called, so we parse out the message - # text and role (which should be "user") - elif isinstance(input_message, google.genai.types.Content): - input_message_content = input_message.parts[0].text - input_role = input_message.role - # Set input data to NoneTypes to ensure token_count callback is not called - else: - input_message_content = None - input_role = None - + if input_message_content: message_id = str(uuid.uuid4()) chat_completion_input_message_dict = { "id": message_id, "span_id": span_id, "trace_id": trace_id, - "token_count": ( - settings.ai_monitoring.llm_token_count_callback(request_model, input_message_content) - if settings.ai_monitoring.llm_token_count_callback and input_message_content - else None - ), "role": input_role, "completion_id": chat_completion_id, # The input message will always be the first message in our request/ response sequence so this will @@ -507,6 +557,8 @@ def create_chat_completion_message_event( "vendor": "gemini", "ingest_source": "Python", } + if all_token_counts: + chat_completion_input_message_dict["token_count"] = 0 if settings.ai_monitoring.record_content.enabled: chat_completion_input_message_dict["content"] = input_message_content @@ -523,7 +575,7 @@ def create_chat_completion_message_event( # Add one to the index to account for the single input message so our sequence value is accurate for # the output message - if input_message: + if input_message_content: index += 1 message_id = str(uuid.uuid4()) @@ -532,11 +584,6 @@ def create_chat_completion_message_event( "id": message_id, "span_id": span_id, "trace_id": trace_id, - "token_count": ( - settings.ai_monitoring.llm_token_count_callback(response_model, message_content) - if settings.ai_monitoring.llm_token_count_callback - else None - ), "role": message.get("role"), "completion_id": chat_completion_id, "sequence": index, @@ -546,6 +593,9 @@ def create_chat_completion_message_event( "is_response": True, } + if all_token_counts: + chat_completion_output_message_dict["token_count"] = 0 + if settings.ai_monitoring.record_content.enabled: chat_completion_output_message_dict["content"] = message_content diff --git a/tests/mlmodel_gemini/test_embeddings.py b/tests/mlmodel_gemini/test_embeddings.py index 0fc92897b..5b4e30f86 100644 --- a/tests/mlmodel_gemini/test_embeddings.py +++ b/tests/mlmodel_gemini/test_embeddings.py @@ -15,7 +15,7 @@ import google.genai from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( - add_token_count_to_events, + add_token_count_to_embedding_events, disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, events_sans_content, @@ -93,7 +93,7 @@ def test_gemini_embedding_sync_no_content(gemini_dev_client, set_trace_info): @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) -@validate_custom_events(add_token_count_to_events(embedding_recorded_events)) +@validate_custom_events(add_token_count_to_embedding_events(embedding_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_embeddings:test_gemini_embedding_sync_with_token_count", @@ -177,7 +177,7 @@ def test_gemini_embedding_async_no_content(gemini_dev_client, loop, set_trace_in @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) -@validate_custom_events(add_token_count_to_events(embedding_recorded_events)) +@validate_custom_events(add_token_count_to_embedding_events(embedding_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_embeddings:test_gemini_embedding_async_with_token_count", diff --git a/tests/mlmodel_gemini/test_embeddings_error.py b/tests/mlmodel_gemini/test_embeddings_error.py index a65a6c2c6..f0e7aac58 100644 --- a/tests/mlmodel_gemini/test_embeddings_error.py +++ b/tests/mlmodel_gemini/test_embeddings_error.py @@ -16,12 +16,10 @@ import google.genai import pytest -from testing_support.fixtures import dt_enabled, override_llm_token_callback_settings, reset_core_stats_engine +from testing_support.fixtures import dt_enabled, reset_core_stats_engine from testing_support.ml_testing_utils import ( - add_token_count_to_events, disabled_ai_monitoring_record_content_settings, events_sans_content, - llm_token_count_callback, set_trace_info, ) from testing_support.validators.validate_custom_event import validate_custom_event_count @@ -159,34 +157,6 @@ def test_embeddings_invalid_request_error_invalid_model(gemini_dev_client, set_t gemini_dev_client.models.embed_content(contents="Embedded: Model does not exist.", model="does-not-exist") -@dt_enabled -@reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) -@validate_error_trace_attributes( - callable_name(google.genai.errors.ClientError), - exact_attrs={"agent": {}, "intrinsic": {}, "user": {"error.code": "NOT_FOUND", "http.statusCode": 404}}, -) -@validate_span_events( - exact_agents={ - "error.message": "models/does-not-exist is not found for API version v1beta, or is not supported for embedContent. Call ListModels to see the list of available models and their supported methods." - } -) -@validate_transaction_metrics( - name="test_embeddings_error:test_embeddings_invalid_request_error_invalid_model_with_token_count", - scoped_metrics=[("Llm/embedding/Gemini/embed_content", 1)], - rollup_metrics=[("Llm/embedding/Gemini/embed_content", 1)], - custom_metrics=[(f"Supportability/Python/ML/Gemini/{google.genai.__version__}", 1)], - background_task=True, -) -@validate_custom_events(add_token_count_to_events(invalid_model_events)) -@validate_custom_event_count(count=1) -@background_task() -def test_embeddings_invalid_request_error_invalid_model_with_token_count(gemini_dev_client, set_trace_info): - with pytest.raises(google.genai.errors.ClientError): - set_trace_info() - gemini_dev_client.models.embed_content(contents="Embedded: Model does not exist.", model="does-not-exist") - - embedding_invalid_key_error_events = [ ( {"type": "LlmEmbedding"}, @@ -326,36 +296,6 @@ def test_embeddings_async_invalid_request_error_invalid_model(gemini_dev_client, ) -@dt_enabled -@reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) -@validate_error_trace_attributes( - callable_name(google.genai.errors.ClientError), - exact_attrs={"agent": {}, "intrinsic": {}, "user": {"error.code": "NOT_FOUND", "http.statusCode": 404}}, -) -@validate_span_events( - exact_agents={ - "error.message": "models/does-not-exist is not found for API version v1beta, or is not supported for embedContent. Call ListModels to see the list of available models and their supported methods." - } -) -@validate_transaction_metrics( - name="test_embeddings_error:test_embeddings_async_invalid_request_error_invalid_model_with_token_count", - scoped_metrics=[("Llm/embedding/Gemini/embed_content", 1)], - rollup_metrics=[("Llm/embedding/Gemini/embed_content", 1)], - custom_metrics=[(f"Supportability/Python/ML/Gemini/{google.genai.__version__}", 1)], - background_task=True, -) -@validate_custom_events(add_token_count_to_events(invalid_model_events)) -@validate_custom_event_count(count=1) -@background_task() -def test_embeddings_async_invalid_request_error_invalid_model_with_token_count(gemini_dev_client, loop, set_trace_info): - with pytest.raises(google.genai.errors.ClientError): - set_trace_info() - loop.run_until_complete( - gemini_dev_client.models.embed_content(contents="Embedded: Model does not exist.", model="does-not-exist") - ) - - # Wrong api_key provided @dt_enabled @reset_core_stats_engine() diff --git a/tests/mlmodel_gemini/test_text_generation.py b/tests/mlmodel_gemini/test_text_generation.py index faec66aa7..3da978e77 100644 --- a/tests/mlmodel_gemini/test_text_generation.py +++ b/tests/mlmodel_gemini/test_text_generation.py @@ -15,7 +15,7 @@ import google.genai from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( - add_token_count_to_events, + add_token_counts_to_chat_events, disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, events_sans_content, @@ -50,6 +50,9 @@ "vendor": "gemini", "ingest_source": "Python", "response.number_of_messages": 2, + "response.usage.prompt_tokens": 9, + "response.usage.completion_tokens": 13, + "response.usage.total_tokens": 22, }, ), ( @@ -60,6 +63,7 @@ "llm.foo": "bar", "span_id": None, "trace_id": "trace-id", + "token_count": 0, "content": "How many letters are in the word Python?", "role": "user", "completion_id": None, @@ -77,6 +81,7 @@ "llm.foo": "bar", "span_id": None, "trace_id": "trace-id", + "token_count": 0, "content": 'There are **6** letters in the word "Python".\n', "role": "model", "completion_id": None, @@ -183,7 +188,8 @@ def test_gemini_text_generation_sync_no_content(gemini_dev_client, set_trace_inf @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) -@validate_custom_events(add_token_count_to_events(text_generation_recorded_events)) +# Ensure LLM callback is invoked and response token counts are overridden +@validate_custom_events(add_token_counts_to_chat_events(text_generation_recorded_events)) @validate_custom_event_count(count=3) @validate_transaction_metrics( name="test_text_generation:test_gemini_text_generation_sync_with_token_count", @@ -324,7 +330,7 @@ def test_gemini_text_generation_async_no_content(gemini_dev_client, loop, set_tr @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) -@validate_custom_events(add_token_count_to_events(text_generation_recorded_events)) +@validate_custom_events(add_token_counts_to_chat_events(text_generation_recorded_events)) @validate_custom_event_count(count=3) @validate_transaction_metrics( name="test_text_generation:test_gemini_text_generation_async_with_token_count", diff --git a/tests/mlmodel_gemini/test_text_generation_error.py b/tests/mlmodel_gemini/test_text_generation_error.py index 5e6f1c04d..c92e1a2d4 100644 --- a/tests/mlmodel_gemini/test_text_generation_error.py +++ b/tests/mlmodel_gemini/test_text_generation_error.py @@ -17,13 +17,11 @@ import google.genai import pytest -from testing_support.fixtures import dt_enabled, override_llm_token_callback_settings, reset_core_stats_engine +from testing_support.fixtures import dt_enabled, reset_core_stats_engine from testing_support.ml_testing_utils import ( - add_token_count_to_events, disabled_ai_monitoring_record_content_settings, events_sans_content, events_with_context_attrs, - llm_token_count_callback, set_trace_info, ) from testing_support.validators.validate_custom_event import validate_custom_event_count @@ -63,6 +61,7 @@ "trace_id": "trace-id", "content": "How many letters are in the word Python?", "role": "user", + "token_count": 0, "completion_id": None, "sequence": 0, "vendor": "gemini", @@ -167,6 +166,7 @@ def _test(): "trace_id": "trace-id", "content": "Model does not exist.", "role": "user", + "token_count": 0, "completion_id": None, "response.model": "does-not-exist", "sequence": 0, @@ -179,39 +179,6 @@ def _test(): @dt_enabled @reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) -@validate_error_trace_attributes( - callable_name(google.genai.errors.ClientError), - exact_attrs={"agent": {}, "intrinsic": {}, "user": {"error.code": "NOT_FOUND", "http.statusCode": 404}}, -) -@validate_span_events( - exact_agents={ - "error.message": "models/does-not-exist is not found for API version v1beta, or is not supported for generateContent. Call ListModels to see the list of available models and their supported methods." - } -) -@validate_transaction_metrics( - "test_text_generation_error:test_text_generation_invalid_request_error_invalid_model_with_token_count", - scoped_metrics=[("Llm/completion/Gemini/generate_content", 1)], - rollup_metrics=[("Llm/completion/Gemini/generate_content", 1)], - background_task=True, -) -@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) -@validate_custom_event_count(count=2) -@background_task() -def test_text_generation_invalid_request_error_invalid_model_with_token_count(gemini_dev_client, set_trace_info): - with pytest.raises(google.genai.errors.ClientError): - set_trace_info() - add_custom_attribute("llm.conversation_id", "my-awesome-id") - gemini_dev_client.models.generate_content( - model="does-not-exist", - contents=["Model does not exist."], - config=google.genai.types.GenerateContentConfig(max_output_tokens=100, temperature=0.7), - ) - - -@dt_enabled -@reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) @validate_error_trace_attributes( callable_name(google.genai.errors.ClientError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {"error.code": "NOT_FOUND", "http.statusCode": 404}}, @@ -227,7 +194,7 @@ def test_text_generation_invalid_request_error_invalid_model_with_token_count(ge rollup_metrics=[("Llm/completion/Gemini/generate_content", 1)], background_task=True, ) -@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_events(expected_events_on_invalid_model_error) @validate_custom_event_count(count=2) @background_task() def test_text_generation_invalid_request_error_invalid_model_chat(gemini_dev_client, set_trace_info): @@ -266,6 +233,7 @@ def test_text_generation_invalid_request_error_invalid_model_chat(gemini_dev_cli "trace_id": "trace-id", "content": "Invalid API key.", "role": "user", + "token_count": 0, "response.model": "gemini-flash-2.0", "completion_id": None, "sequence": 0, @@ -377,43 +345,6 @@ def _test(): @dt_enabled @reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) -@validate_error_trace_attributes( - callable_name(google.genai.errors.ClientError), - exact_attrs={"agent": {}, "intrinsic": {}, "user": {"error.code": "NOT_FOUND", "http.statusCode": 404}}, -) -@validate_span_events( - exact_agents={ - "error.message": "models/does-not-exist is not found for API version v1beta, or is not supported for generateContent. Call ListModels to see the list of available models and their supported methods." - } -) -@validate_transaction_metrics( - "test_text_generation_error:test_text_generation_async_invalid_request_error_invalid_model_with_token_count", - scoped_metrics=[("Llm/completion/Gemini/generate_content", 1)], - rollup_metrics=[("Llm/completion/Gemini/generate_content", 1)], - background_task=True, -) -@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) -@validate_custom_event_count(count=2) -@background_task() -def test_text_generation_async_invalid_request_error_invalid_model_with_token_count( - gemini_dev_client, loop, set_trace_info -): - with pytest.raises(google.genai.errors.ClientError): - set_trace_info() - add_custom_attribute("llm.conversation_id", "my-awesome-id") - loop.run_until_complete( - gemini_dev_client.models.generate_content( - model="does-not-exist", - contents=["Model does not exist."], - config=google.genai.types.GenerateContentConfig(max_output_tokens=100, temperature=0.7), - ) - ) - - -@dt_enabled -@reset_core_stats_engine() -@override_llm_token_callback_settings(llm_token_count_callback) @validate_error_trace_attributes( callable_name(google.genai.errors.ClientError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {"error.code": "NOT_FOUND", "http.statusCode": 404}}, @@ -429,7 +360,7 @@ def test_text_generation_async_invalid_request_error_invalid_model_with_token_co rollup_metrics=[("Llm/completion/Gemini/generate_content", 1)], background_task=True, ) -@validate_custom_events(add_token_count_to_events(expected_events_on_invalid_model_error)) +@validate_custom_events(expected_events_on_invalid_model_error) @validate_custom_event_count(count=2) @background_task() def test_text_generation_async_invalid_request_error_invalid_model_chat(gemini_dev_client, loop, set_trace_info): diff --git a/tests/testing_support/ml_testing_utils.py b/tests/testing_support/ml_testing_utils.py index 0e7307bfb..ae95e01bd 100644 --- a/tests/testing_support/ml_testing_utils.py +++ b/tests/testing_support/ml_testing_utils.py @@ -29,6 +29,7 @@ def llm_token_count_callback(model, content): return 105 +# This will be removed once all LLM instrumentations have been converted to use new token count design def add_token_count_to_events(expected_events): events = copy.deepcopy(expected_events) for event in events: @@ -37,6 +38,24 @@ def add_token_count_to_events(expected_events): return events +def add_token_count_to_embedding_events(expected_events): + events = copy.deepcopy(expected_events) + for event in events: + if event[0]["type"] == "LlmEmbedding": + event[1]["response.usage.total_tokens"] = 105 + return events + + +def add_token_counts_to_chat_events(expected_events): + events = copy.deepcopy(expected_events) + for event in events: + if event[0]["type"] == "LlmChatCompletionSummary": + event[1]["response.usage.prompt_tokens"] = 105 + event[1]["response.usage.completion_tokens"] = 105 + event[1]["response.usage.total_tokens"] = 210 + return events + + def events_sans_content(event): new_event = copy.deepcopy(event) for _event in new_event: