diff --git a/ext/datadog_profiling_native_extension/clock_id.h b/ext/datadog_profiling_native_extension/clock_id.h index b302380eab7..f02bcd5bd1b 100644 --- a/ext/datadog_profiling_native_extension/clock_id.h +++ b/ext/datadog_profiling_native_extension/clock_id.h @@ -5,13 +5,13 @@ #include // Contains the operating-system specific identifier needed to fetch CPU-time, and a flag to indicate if we failed to fetch it -typedef struct thread_cpu_time_id { +typedef struct { bool valid; clockid_t clock_id; } thread_cpu_time_id; // Contains the current cpu time, and a flag to indicate if we failed to fetch it -typedef struct thread_cpu_time { +typedef struct { bool valid; long result_ns; } thread_cpu_time; diff --git a/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c b/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c index 723e2d93b33..e40c2a1a14e 100644 --- a/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c +++ b/ext/datadog_profiling_native_extension/collectors_cpu_and_wall_time_worker.c @@ -92,7 +92,7 @@ unsigned int MAX_ALLOC_WEIGHT = 10000; #endif // Contains state for a single CpuAndWallTimeWorker instance -struct cpu_and_wall_time_worker_state { +typedef struct { // These are immutable after initialization bool gc_profiling_enabled; @@ -187,7 +187,7 @@ struct cpu_and_wall_time_worker_state { uint64_t gvl_sampling_time_ns_max; uint64_t gvl_sampling_time_ns_total; } stats; -}; +} cpu_and_wall_time_worker_state; static VALUE _native_new(VALUE klass); static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self); @@ -195,7 +195,7 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr); static VALUE _native_sampling_loop(VALUE self, VALUE instance); static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread); static VALUE stop(VALUE self_instance, VALUE optional_exception); -static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optional_exception); +static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception); static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext); static void *run_sampling_trigger_loop(void *state_ptr); static void interrupt_sampling_trigger_loop(void *state_ptr); @@ -221,14 +221,14 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance); static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance); void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused); static void grab_gvl_and_sample(void); -static void reset_stats_not_thread_safe(struct cpu_and_wall_time_worker_state *state); +static void reset_stats_not_thread_safe(cpu_and_wall_time_worker_state *state); static void sleep_for(uint64_t time_ns); static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self); static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *unused2); -static void disable_tracepoints(struct cpu_and_wall_time_worker_state *state); +static void disable_tracepoints(cpu_and_wall_time_worker_state *state); static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self); static VALUE rescued_sample_allocation(VALUE tracepoint_data); -static void delayed_error(struct cpu_and_wall_time_worker_state *state, const char *error); +static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error); static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg); static VALUE _native_hold_signals(DDTRACE_UNUSED VALUE self); static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self); @@ -262,7 +262,7 @@ static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE // This global state is needed because a bunch of functions on this file need to access it from situations // (e.g. signal handler) where it's impossible or just awkward to pass it as an argument. static VALUE active_sampler_instance = Qnil; -static struct cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL; +static cpu_and_wall_time_worker_state *active_sampler_instance_state = NULL; // See handle_sampling_signal for details on what this does #ifdef NO_POSTPONED_TRIGGER @@ -334,7 +334,7 @@ void collectors_cpu_and_wall_time_worker_init(VALUE profiling_module) { rb_define_singleton_method(testing_module, "_native_gvl_profiling_hook_active", _native_gvl_profiling_hook_active, 1); } -// This structure is used to define a Ruby object that stores a pointer to a struct cpu_and_wall_time_worker_state +// This structure is used to define a Ruby object that stores a pointer to a cpu_and_wall_time_worker_state // See also https://github.com/ruby/ruby/blob/master/doc/extension.rdoc for how this works static const rb_data_type_t cpu_and_wall_time_worker_typed_data = { .wrap_struct_name = "Datadog::Profiling::Collectors::CpuAndWallTimeWorker", @@ -350,7 +350,7 @@ static const rb_data_type_t cpu_and_wall_time_worker_typed_data = { static VALUE _native_new(VALUE klass) { long now = monotonic_wall_time_now_ns(RAISE_ON_FAILURE); - struct cpu_and_wall_time_worker_state *state = ruby_xcalloc(1, sizeof(struct cpu_and_wall_time_worker_state)); + cpu_and_wall_time_worker_state *state = ruby_xcalloc(1, sizeof(cpu_and_wall_time_worker_state)); // Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory // being leaked. @@ -414,8 +414,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel ENFORCE_BOOLEAN(gvl_profiling_enabled); ENFORCE_BOOLEAN(skip_idle_samples_for_testing) - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); state->gc_profiling_enabled = (gc_profiling_enabled == Qtrue); state->no_signals_workaround_enabled = (no_signals_workaround_enabled == Qtrue); @@ -445,7 +445,7 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel // Since our state contains references to Ruby objects, we need to tell the Ruby GC about them static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr) { - struct cpu_and_wall_time_worker_state *state = (struct cpu_and_wall_time_worker_state *) state_ptr; + cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr; rb_gc_mark(state->thread_context_collector_instance); rb_gc_mark(state->idle_sampling_helper_instance); @@ -457,8 +457,8 @@ static void cpu_and_wall_time_worker_typed_data_mark(void *state_ptr) { // Called in a background thread created in CpuAndWallTimeWorker#start static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); // If we already got a delayed exception registered even before starting, raise before starting if (state->failure_exception != Qnil) { @@ -466,7 +466,7 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) { rb_exc_raise(state->failure_exception); } - struct cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state; + cpu_and_wall_time_worker_state *old_state = active_sampler_instance_state; if (old_state != NULL) { if (is_thread_alive(old_state->owner_thread)) { rb_raise( @@ -546,15 +546,15 @@ static VALUE _native_sampling_loop(DDTRACE_UNUSED VALUE _self, VALUE instance) { } static VALUE _native_stop(DDTRACE_UNUSED VALUE _self, VALUE self_instance, VALUE worker_thread) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); state->stop_thread = worker_thread; return stop(self_instance, /* optional_exception: */ Qnil); } -static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optional_exception) { +static void stop_state(cpu_and_wall_time_worker_state *state, VALUE optional_exception) { atomic_store(&state->should_run, false); state->failure_exception = optional_exception; @@ -563,8 +563,8 @@ static void stop_state(struct cpu_and_wall_time_worker_state *state, VALUE optio } static VALUE stop(VALUE self_instance, VALUE optional_exception) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); stop_state(state, optional_exception); @@ -575,7 +575,7 @@ static VALUE stop(VALUE self_instance, VALUE optional_exception) { // We need to be careful not to change any state that may be observed OR to restore it if we do. For instance, if anything // we do here can set `errno`, then we must be careful to restore the old `errno` after the fact. static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED siginfo_t *_info, DDTRACE_UNUSED void *_ucontext) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This can potentially happen if the CpuAndWallTimeWorker was stopped while the signal delivery was happening; nothing to do if (state == NULL) return; @@ -650,7 +650,7 @@ static void handle_sampling_signal(DDTRACE_UNUSED int _signal, DDTRACE_UNUSED si // The actual sampling trigger loop always runs **without** the global vm lock. static void *run_sampling_trigger_loop(void *state_ptr) { - struct cpu_and_wall_time_worker_state *state = (struct cpu_and_wall_time_worker_state *) state_ptr; + cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr; uint64_t minimum_time_between_signals = MILLIS_AS_NS(10); @@ -709,13 +709,13 @@ static void *run_sampling_trigger_loop(void *state_ptr) { // This is called by the Ruby VM when it wants to shut down the background thread static void interrupt_sampling_trigger_loop(void *state_ptr) { - struct cpu_and_wall_time_worker_state *state = (struct cpu_and_wall_time_worker_state *) state_ptr; + cpu_and_wall_time_worker_state *state = (cpu_and_wall_time_worker_state *) state_ptr; atomic_store(&state->should_run, false); } static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do if (state == NULL) return; @@ -735,8 +735,8 @@ static void sample_from_postponed_job(DDTRACE_UNUSED void *_unused) { } static VALUE rescued_sample_from_postponed_job(VALUE self_instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE); @@ -791,8 +791,8 @@ static VALUE _native_current_sigprof_signal_handler(DDTRACE_UNUSED VALUE self) { } static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); // Final preparations: Setup signal handler and enable tracepoints. We run these here and not in `_native_sampling_loop` // because they may raise exceptions. @@ -842,7 +842,7 @@ static VALUE release_gvl_and_run_sampling_trigger_loop(VALUE instance) { // This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_is_running(DDTRACE_UNUSED VALUE self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above return (state != NULL && is_thread_alive(state->owner_thread) && state->self_instance == instance) ? Qtrue : Qfalse; } @@ -875,8 +875,8 @@ static VALUE _native_trigger_sample(DDTRACE_UNUSED VALUE self) { // This method exists only to enable testing Datadog::Profiling::Collectors::CpuAndWallTimeWorker behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_gc_tracepoint(DDTRACE_UNUSED VALUE self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); return state->gc_tracepoint; } @@ -902,7 +902,7 @@ static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused) { int event = rb_tracearg_event_flag(rb_tracearg_from_tracepoint(tracepoint_data)); if (event != RUBY_INTERNAL_EVENT_GC_ENTER && event != RUBY_INTERNAL_EVENT_GC_EXIT) return; // Unknown event - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This should not happen in a normal situation because the tracepoint is always enabled after the instance is set // and disabled before it is cleared, but just in case... @@ -926,7 +926,7 @@ static void on_gc_event(VALUE tracepoint_data, DDTRACE_UNUSED void *unused) { } static void after_gc_from_postponed_job(DDTRACE_UNUSED void *_unused) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do if (state == NULL) return; @@ -981,8 +981,8 @@ static VALUE _native_simulate_sample_from_postponed_job(DDTRACE_UNUSED VALUE sel // In the future, if we add more other components with tracepoints, we will need to coordinate stopping all such // tracepoints before doing the other cleaning steps. static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); // Disable all tracepoints, so that there are no more attempts to mutate the profile disable_tracepoints(state); @@ -1000,8 +1000,8 @@ static VALUE _native_is_sigprof_blocked_in_current_thread(DDTRACE_UNUSED VALUE s } static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); unsigned long total_cpu_samples_attempted = state->stats.cpu_sampled + state->stats.cpu_skipped; VALUE effective_cpu_sample_rate = @@ -1059,14 +1059,14 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE instance) { } static VALUE _native_stats_reset_not_thread_safe(DDTRACE_UNUSED VALUE self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); reset_stats_not_thread_safe(state); return Qnil; } void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This can potentially happen if the CpuAndWallTimeWorker was stopped while the IdleSamplingHelper was trying to execute this action if (state == NULL) return NULL; @@ -1082,7 +1082,7 @@ void *simulate_sampling_signal_delivery(DDTRACE_UNUSED void *_unused) { static void grab_gvl_and_sample(void) { rb_thread_call_with_gvl(simulate_sampling_signal_delivery, NULL); } -static void reset_stats_not_thread_safe(struct cpu_and_wall_time_worker_state *state) { +static void reset_stats_not_thread_safe(cpu_and_wall_time_worker_state *state) { // NOTE: This is not really thread safe so ongoing sampling operations that are concurrent with a reset can have their stats: // * Lost (writes after stats retrieval but before reset). // * Included in the previous stats window (writes before stats retrieval and reset). @@ -1116,7 +1116,7 @@ static void sleep_for(uint64_t time_ns) { } static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; bool are_allocations_being_tracked = state != NULL && state->allocation_profiling_enabled && state->allocation_counting_enabled; @@ -1149,7 +1149,7 @@ static VALUE _native_allocation_count(DDTRACE_UNUSED VALUE self) { // call `rb_tracearg_from_tracepoint(anything)` anywhere during this function or its callees to get the data, so that's // why it's not being passed as an argument. static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *unused2) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This should not happen in a normal situation because the tracepoint is always enabled after the instance is set // and disabled before it is cleared, but just in case... @@ -1235,7 +1235,7 @@ static void on_newobj_event(DDTRACE_UNUSED VALUE unused1, DDTRACE_UNUSED void *u state->during_sample = false; } -static void disable_tracepoints(struct cpu_and_wall_time_worker_state *state) { +static void disable_tracepoints(cpu_and_wall_time_worker_state *state) { if (state->gc_tracepoint != Qnil) { rb_tracepoint_disable(state->gc_tracepoint); } @@ -1264,7 +1264,7 @@ static VALUE _native_with_blocked_sigprof(DDTRACE_UNUSED VALUE self) { } static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This should not happen in a normal situation because on_newobj_event already checked for this, but just in case... if (state == NULL) return Qnil; @@ -1293,7 +1293,7 @@ static VALUE rescued_sample_allocation(DDTRACE_UNUSED VALUE unused) { return Qnil; } -static void delayed_error(struct cpu_and_wall_time_worker_state *state, const char *error) { +static void delayed_error(cpu_and_wall_time_worker_state *state, const char *error) { // If we can't raise an immediate exception at the calling site, use the asynchronous flow through the main worker loop. stop_state(state, rb_exc_new_cstr(rb_eRuntimeError, error)); } @@ -1301,8 +1301,8 @@ static void delayed_error(struct cpu_and_wall_time_worker_state *state, const ch static VALUE _native_delayed_error(DDTRACE_UNUSED VALUE self, VALUE instance, VALUE error_msg) { ENFORCE_TYPE(error_msg, T_STRING); - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); delayed_error(state, rb_string_value_cstr(&error_msg)); @@ -1355,7 +1355,7 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) { rb_postponed_job_register_one(0, after_gvl_running_from_postponed_job, NULL); #endif } else if (result == ON_GVL_RUNNING_DONT_SAMPLE) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above if (state == NULL) return; // This should not happen, but just in case... @@ -1368,7 +1368,7 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) { } static void after_gvl_running_from_postponed_job(DDTRACE_UNUSED void *_unused) { - struct cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above + cpu_and_wall_time_worker_state *state = active_sampler_instance_state; // Read from global variable, see "sampler global state safety" note above // This can potentially happen if the CpuAndWallTimeWorker was stopped while the postponed job was waiting to be executed; nothing to do if (state == NULL) return; @@ -1382,8 +1382,8 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) { } static VALUE rescued_after_gvl_running_from_postponed_job(VALUE self_instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(self_instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(self_instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); long wall_time_ns_before_sample = monotonic_wall_time_now_ns(RAISE_ON_FAILURE); thread_context_collector_sample_after_gvl_running(state->thread_context_collector_instance, rb_thread_current(), wall_time_ns_before_sample); @@ -1404,8 +1404,8 @@ static VALUE _native_resume_signals(DDTRACE_UNUSED VALUE self) { } static VALUE _native_gvl_profiling_hook_active(DDTRACE_UNUSED VALUE self, VALUE instance) { - struct cpu_and_wall_time_worker_state *state; - TypedData_Get_Struct(instance, struct cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); + cpu_and_wall_time_worker_state *state; + TypedData_Get_Struct(instance, cpu_and_wall_time_worker_state, &cpu_and_wall_time_worker_typed_data, state); return state->gvl_profiling_hook != NULL ? Qtrue : Qfalse; } diff --git a/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c b/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c index 25fcb0b3dbe..d310d3ca993 100644 --- a/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c +++ b/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.c @@ -333,7 +333,7 @@ static VALUE _native_should_sample(VALUE self, VALUE now); static VALUE _native_after_sample(VALUE self, VALUE now); static VALUE _native_state_snapshot(VALUE self); -typedef struct sampler_state { +typedef struct { discrete_dynamic_sampler sampler; } sampler_state; diff --git a/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h b/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h index 7e1edfb5c42..1c0b0aedb2c 100644 --- a/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h +++ b/ext/datadog_profiling_native_extension/collectors_discrete_dynamic_sampler.h @@ -16,7 +16,7 @@ // every event and is thus, in theory, susceptible to some pattern // biases. In practice, the dynamic readjustment of sampling interval // and randomized starting point should help with avoiding heavy biases. -typedef struct discrete_dynamic_sampler { +typedef struct { // --- Config --- // Name of this sampler for debug logs. const char *debug_name; diff --git a/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c b/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c index a34a81feaf2..fcbc42eaa94 100644 --- a/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c +++ b/ext/datadog_profiling_native_extension/collectors_idle_sampling_helper.c @@ -21,15 +21,15 @@ typedef enum { ACTION_WAIT, ACTION_RUN, ACTION_STOP } action; // Contains state for a single CpuAndWallTimeWorker instance -struct idle_sampling_loop_state { +typedef struct { pthread_mutex_t wakeup_mutex; pthread_cond_t wakeup; action requested_action; void (*run_action_function)(void); -}; +} idle_sampling_loop_state; static VALUE _native_new(VALUE klass); -static void reset_state(struct idle_sampling_loop_state *state); +static void reset_state(idle_sampling_loop_state *state); static VALUE _native_idle_sampling_loop(DDTRACE_UNUSED VALUE self, VALUE self_instance); static VALUE _native_stop(DDTRACE_UNUSED VALUE self, VALUE self_instance); static void *run_idle_sampling_loop(void *state_ptr); @@ -62,7 +62,7 @@ void collectors_idle_sampling_helper_init(VALUE profiling_module) { rb_define_singleton_method(testing_module, "_native_idle_sampling_helper_request_action", _native_idle_sampling_helper_request_action, 1); } -// This structure is used to define a Ruby object that stores a pointer to a struct idle_sampling_loop_state +// This structure is used to define a Ruby object that stores a pointer to a idle_sampling_loop_state // See also https://github.com/ruby/ruby/blob/master/doc/extension.rdoc for how this works static const rb_data_type_t idle_sampling_helper_typed_data = { .wrap_struct_name = "Datadog::Profiling::Collectors::IdleSamplingHelper", @@ -76,7 +76,7 @@ static const rb_data_type_t idle_sampling_helper_typed_data = { }; static VALUE _native_new(VALUE klass) { - struct idle_sampling_loop_state *state = ruby_xcalloc(1, sizeof(struct idle_sampling_loop_state)); + idle_sampling_loop_state *state = ruby_xcalloc(1, sizeof(idle_sampling_loop_state)); // Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory // being leaked. @@ -90,7 +90,7 @@ static VALUE _native_new(VALUE klass) { return TypedData_Wrap_Struct(klass, &idle_sampling_helper_typed_data, state); } -static void reset_state(struct idle_sampling_loop_state *state) { +static void reset_state(idle_sampling_loop_state *state) { state->wakeup_mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER; state->wakeup = (pthread_cond_t) PTHREAD_COND_INITIALIZER; state->requested_action = ACTION_WAIT; @@ -101,8 +101,8 @@ static void reset_state(struct idle_sampling_loop_state *state) { // a pristine state before recreating the worker thread (this includes resetting the mutex in case it was left // locked halfway through the VM forking) static VALUE _native_reset(DDTRACE_UNUSED VALUE self, VALUE self_instance) { - struct idle_sampling_loop_state *state; - TypedData_Get_Struct(self_instance, struct idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); + idle_sampling_loop_state *state; + TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); reset_state(state); @@ -110,8 +110,8 @@ static VALUE _native_reset(DDTRACE_UNUSED VALUE self, VALUE self_instance) { } static VALUE _native_idle_sampling_loop(DDTRACE_UNUSED VALUE self, VALUE self_instance) { - struct idle_sampling_loop_state *state; - TypedData_Get_Struct(self_instance, struct idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); + idle_sampling_loop_state *state; + TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); // Release GVL and run the loop waiting for requests rb_thread_call_without_gvl(run_idle_sampling_loop, state, interrupt_idle_sampling_loop, state); @@ -120,7 +120,7 @@ static VALUE _native_idle_sampling_loop(DDTRACE_UNUSED VALUE self, VALUE self_in } static void *run_idle_sampling_loop(void *state_ptr) { - struct idle_sampling_loop_state *state = (struct idle_sampling_loop_state *) state_ptr; + idle_sampling_loop_state *state = (idle_sampling_loop_state *) state_ptr; int error = 0; while (true) { @@ -164,7 +164,7 @@ static void *run_idle_sampling_loop(void *state_ptr) { } static void interrupt_idle_sampling_loop(void *state_ptr) { - struct idle_sampling_loop_state *state = (struct idle_sampling_loop_state *) state_ptr; + idle_sampling_loop_state *state = (idle_sampling_loop_state *) state_ptr; int error = 0; // Note about the error handling in this situation: Something bad happening at this stage is really really awkward to @@ -189,8 +189,8 @@ static void interrupt_idle_sampling_loop(void *state_ptr) { } static VALUE _native_stop(DDTRACE_UNUSED VALUE self, VALUE self_instance) { - struct idle_sampling_loop_state *state; - TypedData_Get_Struct(self_instance, struct idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); + idle_sampling_loop_state *state; + TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); ENFORCE_SUCCESS_GVL(pthread_mutex_lock(&state->wakeup_mutex)); state->requested_action = ACTION_STOP; @@ -204,12 +204,12 @@ static VALUE _native_stop(DDTRACE_UNUSED VALUE self, VALUE self_instance) { // Assumption: Function gets called without the global VM lock void idle_sampling_helper_request_action(VALUE self_instance, void (*run_action_function)(void)) { - struct idle_sampling_loop_state *state; + idle_sampling_loop_state *state; if (!rb_typeddata_is_kind_of(self_instance, &idle_sampling_helper_typed_data)) { grab_gvl_and_raise(rb_eTypeError, "Wrong argument for idle_sampling_helper_request_action"); } // This should never fail the the above check passes - TypedData_Get_Struct(self_instance, struct idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); + TypedData_Get_Struct(self_instance, idle_sampling_loop_state, &idle_sampling_helper_typed_data, state); ENFORCE_SUCCESS_NO_GVL(pthread_mutex_lock(&state->wakeup_mutex)); if (state->requested_action == ACTION_WAIT) { diff --git a/ext/datadog_profiling_native_extension/collectors_stack.c b/ext/datadog_profiling_native_extension/collectors_stack.c index c7445dfc9ca..201a548f388 100644 --- a/ext/datadog_profiling_native_extension/collectors_stack.c +++ b/ext/datadog_profiling_native_extension/collectors_stack.c @@ -14,11 +14,11 @@ static VALUE missing_string = Qnil; // Used as scratch space during sampling -struct sampling_buffer { +struct sampling_buffer { // Note: typedef'd in the header to sampling_buffer uint16_t max_frames; ddog_prof_Location *locations; frame_info *stack_buffer; -}; // Note: typedef'd in the header to sampling_buffer +}; static VALUE _native_sample(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self); static VALUE native_sample_do(VALUE args); @@ -44,7 +44,7 @@ void collectors_stack_init(VALUE profiling_module) { rb_global_variable(&missing_string); } -struct native_sample_args { +typedef struct { VALUE in_gc; VALUE recorder_instance; sample_values values; @@ -52,7 +52,7 @@ struct native_sample_args { VALUE thread; ddog_prof_Location *locations; sampling_buffer *buffer; -}; +} native_sample_args; // This method exists only to enable testing Datadog::Profiling::Collectors::Stack behavior using RSpec. // It SHOULD NOT be used for other purposes. @@ -123,7 +123,7 @@ static VALUE _native_sample(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self) { ddog_prof_Slice_Label slice_labels = {.ptr = labels, .len = labels_count}; - struct native_sample_args args_struct = { + native_sample_args args_struct = { .in_gc = in_gc, .recorder_instance = recorder_instance, .values = values, @@ -137,7 +137,7 @@ static VALUE _native_sample(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self) { } static VALUE native_sample_do(VALUE args) { - struct native_sample_args *args_struct = (struct native_sample_args *) args; + native_sample_args *args_struct = (native_sample_args *) args; if (args_struct->in_gc == Qtrue) { record_placeholder_stack( @@ -160,7 +160,7 @@ static VALUE native_sample_do(VALUE args) { } static VALUE native_sample_ensure(VALUE args) { - struct native_sample_args *args_struct = (struct native_sample_args *) args; + native_sample_args *args_struct = (native_sample_args *) args; ruby_xfree(args_struct->locations); sampling_buffer_free(args_struct->buffer); diff --git a/ext/datadog_profiling_native_extension/collectors_thread_context.c b/ext/datadog_profiling_native_extension/collectors_thread_context.c index 4afb23c5a9e..ba351b0c1fa 100644 --- a/ext/datadog_profiling_native_extension/collectors_thread_context.c +++ b/ext/datadog_profiling_native_extension/collectors_thread_context.c @@ -113,14 +113,14 @@ static uint32_t global_waiting_for_gvl_threshold_ns = MILLIS_AS_NS(10); typedef enum { OTEL_CONTEXT_ENABLED_FALSE, OTEL_CONTEXT_ENABLED_ONLY, OTEL_CONTEXT_ENABLED_BOTH } otel_context_enabled; // Contains state for a single ThreadContext instance -struct thread_context_collector_state { +typedef struct { // Note: Places in this file that usually need to be changed when this struct is changed are tagged with // "Update this when modifying state struct" // Required by Datadog::Profiling::Collectors::Stack as a scratch buffer during sampling ddog_prof_Location *locations; uint16_t max_frames; - // Hashmap + // Hashmap st_table *hash_map_per_thread_context; // Datadog::Profiling::StackRecorder instance VALUE recorder_instance; @@ -163,10 +163,10 @@ struct thread_context_collector_state { long wall_time_at_previous_gc_ns; // Will be INVALID_TIME unless there's accumulated time above long wall_time_at_last_flushed_gc_event_ns; // Starts at 0 and then will always be valid } gc_tracking; -}; +} thread_context_collector_state; // Tracks per-thread state -struct per_thread_context { +typedef struct { sampling_buffer *sampling_buffer; char thread_id[THREAD_ID_LIMIT_CHARS]; ddog_CharSlice thread_id_char_slice; @@ -182,21 +182,21 @@ struct per_thread_context { long cpu_time_at_start_ns; long wall_time_at_start_ns; } gc_tracking; -}; +} per_thread_context; // Used to correlate profiles with traces -struct trace_identifiers { +typedef struct { bool valid; uint64_t local_root_span_id; uint64_t span_id; VALUE trace_endpoint; -}; +} trace_identifiers; -struct otel_span { +typedef struct { VALUE span; VALUE span_id; VALUE trace_id; -}; +} otel_span; static void thread_context_collector_typed_data_mark(void *state_ptr); static void thread_context_collector_typed_data_free(void *state_ptr); @@ -209,19 +209,19 @@ static VALUE _native_on_gc_start(VALUE self, VALUE collector_instance); static VALUE _native_on_gc_finish(VALUE self, VALUE collector_instance); static VALUE _native_sample_after_gc(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE reset_monotonic_to_system_state, VALUE allow_exception); static void update_metrics_and_sample( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread_being_sampled, VALUE stack_from_thread, - struct per_thread_context *thread_context, + per_thread_context *thread_context, sampling_buffer* sampling_buffer, long current_cpu_time_ns, long current_monotonic_wall_time_ns ); static void trigger_sample_for_thread( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread, VALUE stack_from_thread, - struct per_thread_context *thread_context, + per_thread_context *thread_context, sampling_buffer* sampling_buffer, sample_values values, long current_monotonic_wall_time_ns, @@ -231,37 +231,37 @@ static void trigger_sample_for_thread( bool is_safe_to_allocate_objects ); static VALUE _native_thread_list(VALUE self); -static struct per_thread_context *get_or_create_context_for(VALUE thread, struct thread_context_collector_state *state); -static struct per_thread_context *get_context_for(VALUE thread, struct thread_context_collector_state *state); -static void initialize_context(VALUE thread, struct per_thread_context *thread_context, struct thread_context_collector_state *state); -static void free_context(struct per_thread_context* thread_context); +static per_thread_context *get_or_create_context_for(VALUE thread, thread_context_collector_state *state); +static per_thread_context *get_context_for(VALUE thread, thread_context_collector_state *state); +static void initialize_context(VALUE thread, per_thread_context *thread_context, thread_context_collector_state *state); +static void free_context(per_thread_context* thread_context); static VALUE _native_inspect(VALUE self, VALUE collector_instance); -static VALUE per_thread_context_st_table_as_ruby_hash(struct thread_context_collector_state *state); +static VALUE per_thread_context_st_table_as_ruby_hash(thread_context_collector_state *state); static int per_thread_context_as_ruby_hash(st_data_t key_thread, st_data_t value_context, st_data_t result_hash); -static VALUE stats_as_ruby_hash(struct thread_context_collector_state *state); -static VALUE gc_tracking_as_ruby_hash(struct thread_context_collector_state *state); -static void remove_context_for_dead_threads(struct thread_context_collector_state *state); +static VALUE stats_as_ruby_hash(thread_context_collector_state *state); +static VALUE gc_tracking_as_ruby_hash(thread_context_collector_state *state); +static void remove_context_for_dead_threads(thread_context_collector_state *state); static int remove_if_dead_thread(st_data_t key_thread, st_data_t value_context, st_data_t _argument); static VALUE _native_per_thread_context(VALUE self, VALUE collector_instance); static long update_time_since_previous_sample(long *time_at_previous_sample_ns, long current_time_ns, long gc_start_time_ns, bool is_wall_time); -static long cpu_time_now_ns(struct per_thread_context *thread_context); +static long cpu_time_now_ns(per_thread_context *thread_context); static long thread_id_for(VALUE thread); static VALUE _native_stats(VALUE self, VALUE collector_instance); static VALUE _native_gc_tracking(VALUE self, VALUE collector_instance); static void trace_identifiers_for( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread, - struct trace_identifiers *trace_identifiers_result, + trace_identifiers *trace_identifiers_result, bool is_safe_to_allocate_objects ); static bool should_collect_resource(VALUE root_span); static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE collector_instance); -static VALUE thread_list(struct thread_context_collector_state *state); +static VALUE thread_list(thread_context_collector_state *state); static VALUE _native_sample_allocation(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE sample_weight, VALUE new_object); static VALUE _native_new_empty_thread(VALUE self); static ddog_CharSlice ruby_value_type_to_class_name(enum ruby_value_type type); static void ddtrace_otel_trace_identifiers_for( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE *active_trace, VALUE *root_span, VALUE *numeric_span_id, @@ -271,10 +271,10 @@ static void ddtrace_otel_trace_identifiers_for( ); static VALUE _native_sample_skipped_allocation_samples(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE skipped_samples); static bool handle_gvl_waiting( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread_being_sampled, VALUE stack_from_thread, - struct per_thread_context *thread_context, + per_thread_context *thread_context, sampling_buffer* sampling_buffer, long current_cpu_time_ns ); @@ -284,12 +284,12 @@ static VALUE _native_on_gvl_running(DDTRACE_UNUSED VALUE self, VALUE thread); static VALUE _native_sample_after_gvl_running(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread); static VALUE _native_apply_delta_to_cpu_time_at_previous_sample_ns(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread, VALUE delta_ns); static void otel_without_ddtrace_trace_identifiers_for( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread, - struct trace_identifiers *trace_identifiers_result, + trace_identifiers *trace_identifiers_result, bool is_safe_to_allocate_objects ); -static struct otel_span otel_span_from(VALUE otel_context, VALUE otel_current_span_key); +static otel_span otel_span_from(VALUE otel_context, VALUE otel_current_span_key); static uint64_t otel_span_id_to_uint(VALUE otel_span_id); static VALUE safely_lookup_hash_without_going_into_ruby_code(VALUE hash, VALUE key); @@ -357,7 +357,7 @@ void collectors_thread_context_init(VALUE profiling_module) { gc_profiling_init(); } -// This structure is used to define a Ruby object that stores a pointer to a struct thread_context_collector_state +// This structure is used to define a Ruby object that stores a pointer to a thread_context_collector_state // See also https://github.com/ruby/ruby/blob/master/doc/extension.rdoc for how this works static const rb_data_type_t thread_context_collector_typed_data = { .wrap_struct_name = "Datadog::Profiling::Collectors::ThreadContext", @@ -373,7 +373,7 @@ static const rb_data_type_t thread_context_collector_typed_data = { // This function is called by the Ruby GC to give us a chance to mark any Ruby objects that we're holding on to, // so that they don't get garbage collected static void thread_context_collector_typed_data_mark(void *state_ptr) { - struct thread_context_collector_state *state = (struct thread_context_collector_state *) state_ptr; + thread_context_collector_state *state = (thread_context_collector_state *) state_ptr; // Update this when modifying state struct rb_gc_mark(state->recorder_instance); @@ -384,7 +384,7 @@ static void thread_context_collector_typed_data_mark(void *state_ptr) { } static void thread_context_collector_typed_data_free(void *state_ptr) { - struct thread_context_collector_state *state = (struct thread_context_collector_state *) state_ptr; + thread_context_collector_state *state = (thread_context_collector_state *) state_ptr; // Update this when modifying state struct @@ -409,13 +409,13 @@ static int hash_map_per_thread_context_mark(st_data_t key_thread, DDTRACE_UNUSED // Used to clear each of the per_thread_contexts inside the hash_map_per_thread_context static int hash_map_per_thread_context_free_values(DDTRACE_UNUSED st_data_t _thread, st_data_t value_per_thread_context, DDTRACE_UNUSED st_data_t _argument) { - struct per_thread_context *thread_context = (struct per_thread_context*) value_per_thread_context; + per_thread_context *thread_context = (per_thread_context*) value_per_thread_context; free_context(thread_context); return ST_CONTINUE; } static VALUE _native_new(VALUE klass) { - struct thread_context_collector_state *state = ruby_xcalloc(1, sizeof(struct thread_context_collector_state)); + thread_context_collector_state *state = ruby_xcalloc(1, sizeof(thread_context_collector_state)); // Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory // being leaked. @@ -471,8 +471,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel ENFORCE_BOOLEAN(timeline_enabled); ENFORCE_TYPE(waiting_for_gvl_threshold_ns, T_FIXNUM); - struct thread_context_collector_state *state; - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); // Update this when modifying state struct state->max_frames = sampling_buffer_check_max_frames(NUM2INT(max_frames)); @@ -550,8 +550,8 @@ static VALUE _native_sample_after_gc(DDTRACE_UNUSED VALUE self, VALUE collector_ ENFORCE_BOOLEAN(reset_monotonic_to_system_state); ENFORCE_BOOLEAN(allow_exception); - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); if (reset_monotonic_to_system_state == Qtrue) { state->time_converter_state = (monotonic_to_system_epoch_state) MONOTONIC_TO_SYSTEM_EPOCH_INITIALIZER; @@ -577,11 +577,11 @@ static VALUE _native_sample_after_gc(DDTRACE_UNUSED VALUE self, VALUE collector_ // The `profiler_overhead_stack_thread` is used to attribute the profiler overhead to a stack borrowed from a different thread // (belonging to ddtrace), so that the overhead is visible in the profile rather than blamed on user code. void thread_context_collector_sample(VALUE self_instance, long current_monotonic_wall_time_ns, VALUE profiler_overhead_stack_thread) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); VALUE current_thread = rb_thread_current(); - struct per_thread_context *current_thread_context = get_or_create_context_for(current_thread, state); + per_thread_context *current_thread_context = get_or_create_context_for(current_thread, state); long cpu_time_at_sample_start_for_current_thread = cpu_time_now_ns(current_thread_context); VALUE threads = thread_list(state); @@ -589,7 +589,7 @@ void thread_context_collector_sample(VALUE self_instance, long current_monotonic const long thread_count = RARRAY_LEN(threads); for (long i = 0; i < thread_count; i++) { VALUE thread = RARRAY_AREF(threads, i); - struct per_thread_context *thread_context = get_or_create_context_for(thread, state); + per_thread_context *thread_context = get_or_create_context_for(thread, state); // We account for cpu-time for the current thread in a different way -- we use the cpu-time at sampling start, to avoid // blaming the time the profiler took on whatever's running on the thread right now @@ -625,10 +625,10 @@ void thread_context_collector_sample(VALUE self_instance, long current_monotonic } static void update_metrics_and_sample( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread_being_sampled, VALUE stack_from_thread, // This can be different when attributing profiler overhead using a different stack - struct per_thread_context *thread_context, + per_thread_context *thread_context, sampling_buffer* sampling_buffer, long current_cpu_time_ns, long current_monotonic_wall_time_ns @@ -696,12 +696,12 @@ static void update_metrics_and_sample( // Assumption 1: This function is called in a thread that is holding the Global VM Lock. Caller is responsible for enforcing this. // Assumption 2: This function is called from the main Ractor (if Ruby has support for Ractors). void thread_context_collector_on_gc_start(VALUE self_instance) { - struct thread_context_collector_state *state; + thread_context_collector_state *state; if (!rb_typeddata_is_kind_of(self_instance, &thread_context_collector_typed_data)) return; // This should never fail the the above check passes - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); - struct per_thread_context *thread_context = get_context_for(rb_thread_current(), state); + per_thread_context *thread_context = get_context_for(rb_thread_current(), state); // If there was no previously-existing context for this thread, we won't allocate one (see safety). For now we just drop // the GC sample, under the assumption that "a thread that is so new that we never sampled it even once before it triggers @@ -729,12 +729,12 @@ void thread_context_collector_on_gc_start(VALUE self_instance) { // Assumption 2: This function is called from the main Ractor (if Ruby has support for Ractors). __attribute__((warn_unused_result)) bool thread_context_collector_on_gc_finish(VALUE self_instance) { - struct thread_context_collector_state *state; + thread_context_collector_state *state; if (!rb_typeddata_is_kind_of(self_instance, &thread_context_collector_typed_data)) return false; // This should never fail the the above check passes - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); - struct per_thread_context *thread_context = get_context_for(rb_thread_current(), state); + per_thread_context *thread_context = get_context_for(rb_thread_current(), state); // If there was no previously-existing context for this thread, we won't allocate one (see safety). We keep a metric for // how often this happens -- see on_gc_start. @@ -807,8 +807,8 @@ bool thread_context_collector_on_gc_finish(VALUE self_instance) { // Assumption 3: Unlike `on_gc_start` and `on_gc_finish`, this method is allowed to allocate memory as needed. // Assumption 4: This function is called from the main Ractor (if Ruby has support for Ractors). VALUE thread_context_collector_sample_after_gc(VALUE self_instance) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); if (state->gc_tracking.wall_time_at_previous_gc_ns == INVALID_TIME) { rb_raise(rb_eRuntimeError, "BUG: Unexpected call to sample_after_gc without valid GC information available"); @@ -857,10 +857,10 @@ VALUE thread_context_collector_sample_after_gc(VALUE self_instance) { } static void trigger_sample_for_thread( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread, VALUE stack_from_thread, // This can be different when attributing profiler overhead using a different stack - struct per_thread_context *thread_context, + per_thread_context *thread_context, sampling_buffer* sampling_buffer, sample_values values, long current_monotonic_wall_time_ns, @@ -908,7 +908,7 @@ static void trigger_sample_for_thread( }; } - struct trace_identifiers trace_identifiers_result = {.valid = false, .trace_endpoint = Qnil}; + trace_identifiers trace_identifiers_result = {.valid = false, .trace_endpoint = Qnil}; trace_identifiers_for(state, thread, &trace_identifiers_result, is_safe_to_allocate_objects); if (!trace_identifiers_result.valid && state->otel_context_enabled != OTEL_CONTEXT_ENABLED_FALSE) { @@ -1017,14 +1017,14 @@ static VALUE _native_thread_list(DDTRACE_UNUSED VALUE _self) { return result; } -static struct per_thread_context *get_or_create_context_for(VALUE thread, struct thread_context_collector_state *state) { - struct per_thread_context* thread_context = NULL; +static per_thread_context *get_or_create_context_for(VALUE thread, thread_context_collector_state *state) { + per_thread_context* thread_context = NULL; st_data_t value_context = 0; if (st_lookup(state->hash_map_per_thread_context, (st_data_t) thread, &value_context)) { - thread_context = (struct per_thread_context*) value_context; + thread_context = (per_thread_context*) value_context; } else { - thread_context = ruby_xcalloc(1, sizeof(struct per_thread_context)); + thread_context = ruby_xcalloc(1, sizeof(per_thread_context)); initialize_context(thread, thread_context, state); st_insert(state->hash_map_per_thread_context, (st_data_t) thread, (st_data_t) thread_context); } @@ -1032,12 +1032,12 @@ static struct per_thread_context *get_or_create_context_for(VALUE thread, struct return thread_context; } -static struct per_thread_context *get_context_for(VALUE thread, struct thread_context_collector_state *state) { - struct per_thread_context* thread_context = NULL; +static per_thread_context *get_context_for(VALUE thread, thread_context_collector_state *state) { + per_thread_context* thread_context = NULL; st_data_t value_context = 0; if (st_lookup(state->hash_map_per_thread_context, (st_data_t) thread, &value_context)) { - thread_context = (struct per_thread_context*) value_context; + thread_context = (per_thread_context*) value_context; } return thread_context; @@ -1064,7 +1064,7 @@ static bool is_logging_gem_monkey_patch(VALUE invoke_file_location) { return strncmp(invoke_file + invoke_file_len - logging_gem_path_len, LOGGING_GEM_PATH, logging_gem_path_len) == 0; } -static void initialize_context(VALUE thread, struct per_thread_context *thread_context, struct thread_context_collector_state *state) { +static void initialize_context(VALUE thread, per_thread_context *thread_context, thread_context_collector_state *state) { thread_context->sampling_buffer = sampling_buffer_new(state->max_frames, state->locations); snprintf(thread_context->thread_id, THREAD_ID_LIMIT_CHARS, "%"PRIu64" (%lu)", native_thread_id_for(thread), (unsigned long) thread_id_for(thread)); @@ -1121,14 +1121,14 @@ static void initialize_context(VALUE thread, struct per_thread_context *thread_c #endif } -static void free_context(struct per_thread_context* thread_context) { +static void free_context(per_thread_context* thread_context) { sampling_buffer_free(thread_context->sampling_buffer); ruby_xfree(thread_context); } static VALUE _native_inspect(DDTRACE_UNUSED VALUE _self, VALUE collector_instance) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); VALUE result = rb_str_new2(" (native state)"); @@ -1156,7 +1156,7 @@ static VALUE _native_inspect(DDTRACE_UNUSED VALUE _self, VALUE collector_instanc return result; } -static VALUE per_thread_context_st_table_as_ruby_hash(struct thread_context_collector_state *state) { +static VALUE per_thread_context_st_table_as_ruby_hash(thread_context_collector_state *state) { VALUE result = rb_hash_new(); st_foreach(state->hash_map_per_thread_context, per_thread_context_as_ruby_hash, result); return result; @@ -1164,7 +1164,7 @@ static VALUE per_thread_context_st_table_as_ruby_hash(struct thread_context_coll static int per_thread_context_as_ruby_hash(st_data_t key_thread, st_data_t value_context, st_data_t result_hash) { VALUE thread = (VALUE) key_thread; - struct per_thread_context *thread_context = (struct per_thread_context*) value_context; + per_thread_context *thread_context = (per_thread_context*) value_context; VALUE result = (VALUE) result_hash; VALUE context_as_hash = rb_hash_new(); rb_hash_aset(result, thread, context_as_hash); @@ -1189,7 +1189,7 @@ static int per_thread_context_as_ruby_hash(st_data_t key_thread, st_data_t value return ST_CONTINUE; } -static VALUE stats_as_ruby_hash(struct thread_context_collector_state *state) { +static VALUE stats_as_ruby_hash(thread_context_collector_state *state) { // Update this when modifying state struct (stats inner struct) VALUE stats_as_hash = rb_hash_new(); VALUE arguments[] = { @@ -1200,7 +1200,7 @@ static VALUE stats_as_ruby_hash(struct thread_context_collector_state *state) { return stats_as_hash; } -static VALUE gc_tracking_as_ruby_hash(struct thread_context_collector_state *state) { +static VALUE gc_tracking_as_ruby_hash(thread_context_collector_state *state) { // Update this when modifying state struct (gc_tracking inner struct) VALUE result = rb_hash_new(); VALUE arguments[] = { @@ -1213,13 +1213,13 @@ static VALUE gc_tracking_as_ruby_hash(struct thread_context_collector_state *sta return result; } -static void remove_context_for_dead_threads(struct thread_context_collector_state *state) { +static void remove_context_for_dead_threads(thread_context_collector_state *state) { st_foreach(state->hash_map_per_thread_context, remove_if_dead_thread, 0 /* unused */); } static int remove_if_dead_thread(st_data_t key_thread, st_data_t value_context, DDTRACE_UNUSED st_data_t _argument) { VALUE thread = (VALUE) key_thread; - struct per_thread_context* thread_context = (struct per_thread_context*) value_context; + per_thread_context* thread_context = (per_thread_context*) value_context; if (is_thread_alive(thread)) return ST_CONTINUE; @@ -1232,8 +1232,8 @@ static int remove_if_dead_thread(st_data_t key_thread, st_data_t value_context, // // Returns the whole contents of the per_thread_context structs being tracked. static VALUE _native_per_thread_context(DDTRACE_UNUSED VALUE _self, VALUE collector_instance) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); return per_thread_context_st_table_as_ruby_hash(state); } @@ -1278,7 +1278,7 @@ static long update_time_since_previous_sample(long *time_at_previous_sample_ns, } // Safety: This function is assumed never to raise exceptions by callers -static long cpu_time_now_ns(struct per_thread_context *thread_context) { +static long cpu_time_now_ns(per_thread_context *thread_context) { thread_cpu_time cpu_time = thread_cpu_time_for(thread_context->thread_cpu_time_id); if (!cpu_time.valid) { @@ -1316,8 +1316,8 @@ VALUE enforce_thread_context_collector_instance(VALUE object) { // This method exists only to enable testing Datadog::Profiling::Collectors::ThreadContext behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_stats(DDTRACE_UNUSED VALUE _self, VALUE collector_instance) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); return stats_as_ruby_hash(state); } @@ -1325,17 +1325,17 @@ static VALUE _native_stats(DDTRACE_UNUSED VALUE _self, VALUE collector_instance) // This method exists only to enable testing Datadog::Profiling::Collectors::ThreadContext behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_gc_tracking(DDTRACE_UNUSED VALUE _self, VALUE collector_instance) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); return gc_tracking_as_ruby_hash(state); } // Assumption 1: This function is called in a thread that is holding the Global VM Lock. Caller is responsible for enforcing this. static void trace_identifiers_for( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread, - struct trace_identifiers *trace_identifiers_result, + trace_identifiers *trace_identifiers_result, bool is_safe_to_allocate_objects ) { if (state->otel_context_enabled == OTEL_CONTEXT_ENABLED_ONLY) return; @@ -1415,8 +1415,8 @@ static bool should_collect_resource(VALUE root_span) { // Assumption: This method gets called BEFORE restarting profiling -- e.g. there are no components attempting to // trigger samples at the same time. static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE collector_instance) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); // Release all context memory before clearing the existing context st_foreach(state->hash_map_per_thread_context, hash_map_per_thread_context_free_values, 0 /* unused */); @@ -1430,7 +1430,7 @@ static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE collector return Qtrue; } -static VALUE thread_list(struct thread_context_collector_state *state) { +static VALUE thread_list(thread_context_collector_state *state) { VALUE result = state->thread_list_buffer; rb_ary_clear(result); ddtrace_thread_list(result); @@ -1438,8 +1438,8 @@ static VALUE thread_list(struct thread_context_collector_state *state) { } void thread_context_collector_sample_allocation(VALUE self_instance, unsigned int sample_weight, VALUE new_object) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); VALUE current_thread = rb_thread_current(); @@ -1512,7 +1512,7 @@ void thread_context_collector_sample_allocation(VALUE self_instance, unsigned in track_object(state->recorder_instance, new_object, sample_weight, optional_class_name); - struct per_thread_context *thread_context = get_or_create_context_for(current_thread, state); + per_thread_context *thread_context = get_or_create_context_for(current_thread, state); trigger_sample_for_thread( state, @@ -1585,7 +1585,7 @@ static VALUE read_otel_current_span_key_const(DDTRACE_UNUSED VALUE _unused) { return rb_const_get(trace_module, rb_intern("CURRENT_SPAN_KEY")); } -static VALUE get_otel_current_span_key(struct thread_context_collector_state *state, bool is_safe_to_allocate_objects) { +static VALUE get_otel_current_span_key(thread_context_collector_state *state, bool is_safe_to_allocate_objects) { if (state->otel_current_span_key == Qtrue) { // Qtrue means we haven't tried to extract it yet if (!is_safe_to_allocate_objects) { // Calling read_otel_current_span_key_const below can trigger exceptions and arbitrary Ruby code running (e.g. @@ -1608,7 +1608,7 @@ static VALUE get_otel_current_span_key(struct thread_context_collector_state *st // This method gets used when ddtrace is being used indirectly via the opentelemetry APIs. Information gets stored slightly // differently, and this codepath handles it. static void ddtrace_otel_trace_identifiers_for( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE *active_trace, VALUE *root_span, VALUE *numeric_span_id, @@ -1652,8 +1652,8 @@ static void ddtrace_otel_trace_identifiers_for( } void thread_context_collector_sample_skipped_allocation_samples(VALUE self_instance, unsigned int skipped_samples) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); ddog_prof_Label labels[] = { // Providing .num = 0 should not be needed but the tracer-2.7 docker image ships a buggy gcc that complains about this @@ -1707,9 +1707,9 @@ static VALUE _native_sample_skipped_allocation_samples(DDTRACE_UNUSED VALUE self // root span id. // This matches the semantics of how ddtrace tracing creates a TraceOperation and assigns a local root span to it. static void otel_without_ddtrace_trace_identifiers_for( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread, - struct trace_identifiers *trace_identifiers_result, + trace_identifiers *trace_identifiers_result, bool is_safe_to_allocate_objects ) { VALUE context_storage = rb_thread_local_aref(thread, otel_context_storage_id /* __opentelemetry_context_storage__ */); @@ -1723,14 +1723,14 @@ static void otel_without_ddtrace_trace_identifiers_for( int active_context_index = RARRAY_LEN(context_storage) - 1; if (active_context_index < 0) return; - struct otel_span active_span = otel_span_from(rb_ary_entry(context_storage, active_context_index), otel_current_span_key); + otel_span active_span = otel_span_from(rb_ary_entry(context_storage, active_context_index), otel_current_span_key); if (active_span.span == Qnil) return; - struct otel_span local_root_span = active_span; + otel_span local_root_span = active_span; // Now find the oldest span starting from the active span that still has the same trace id as the active span for (int i = active_context_index - 1; i >= 0; i--) { - struct otel_span checking_span = otel_span_from(rb_ary_entry(context_storage, i), otel_current_span_key); + otel_span checking_span = otel_span_from(rb_ary_entry(context_storage, i), otel_current_span_key); if (checking_span.span == Qnil) return; if (rb_str_equal(active_span.trace_id, checking_span.trace_id) == Qfalse) break; @@ -1758,8 +1758,8 @@ static void otel_without_ddtrace_trace_identifiers_for( trace_identifiers_result->trace_endpoint = trace_resource; } -static struct otel_span otel_span_from(VALUE otel_context, VALUE otel_current_span_key) { - struct otel_span failed = {.span = Qnil, .span_id = Qnil, .trace_id = Qnil}; +static otel_span otel_span_from(VALUE otel_context, VALUE otel_current_span_key) { + otel_span failed = {.span = Qnil, .span_id = Qnil, .trace_id = Qnil}; if (otel_context == Qnil) return failed; @@ -1778,7 +1778,7 @@ static struct otel_span otel_span_from(VALUE otel_context, VALUE otel_current_sp VALUE trace_id = rb_ivar_get(span_context, at_trace_id_id /* @trace_id */); if (span_id == Qnil || trace_id == Qnil || !RB_TYPE_P(span_id, T_STRING) || !RB_TYPE_P(trace_id, T_STRING)) return failed; - return (struct otel_span) {.span = span, .span_id = span_id, .trace_id = trace_id}; + return (otel_span) {.span = span, .span_id = span_id, .trace_id = trace_id}; } // Otel span ids are represented as a big-endian 8-byte string @@ -1880,8 +1880,8 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) { // NOTE: In normal use, current_thread is expected to be == rb_thread_current(); the `current_thread` parameter only // exists to enable testing. VALUE thread_context_collector_sample_after_gvl_running(VALUE self_instance, VALUE current_thread, long current_monotonic_wall_time_ns) { - struct thread_context_collector_state *state; - TypedData_Get_Struct(self_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(self_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); if (!state->timeline_enabled) rb_raise(rb_eRuntimeError, "GVL profiling requires timeline to be enabled"); @@ -1895,7 +1895,7 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) { return Qfalse; } - struct per_thread_context *thread_context = get_or_create_context_for(current_thread, state); + per_thread_context *thread_context = get_or_create_context_for(current_thread, state); // We don't actually account for cpu-time during Waiting for GVL. BUT, we may chose to push an // extra sample to represent the period prior to Waiting for GVL. To support that, we retrieve the current @@ -1921,10 +1921,10 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) { // need to take when sampling cpu/wall-time for a thread that's in the "Waiting for GVL" state. __attribute__((warn_unused_result)) static bool handle_gvl_waiting( - struct thread_context_collector_state *state, + thread_context_collector_state *state, VALUE thread_being_sampled, VALUE stack_from_thread, - struct per_thread_context *thread_context, + per_thread_context *thread_context, sampling_buffer* sampling_buffer, long current_cpu_time_ns ) { @@ -2072,10 +2072,10 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) { static VALUE _native_apply_delta_to_cpu_time_at_previous_sample_ns(DDTRACE_UNUSED VALUE self, VALUE collector_instance, VALUE thread, VALUE delta_ns) { ENFORCE_THREAD(thread); - struct thread_context_collector_state *state; - TypedData_Get_Struct(collector_instance, struct thread_context_collector_state, &thread_context_collector_typed_data, state); + thread_context_collector_state *state; + TypedData_Get_Struct(collector_instance, thread_context_collector_state, &thread_context_collector_typed_data, state); - struct per_thread_context *thread_context = get_context_for(thread, state); + per_thread_context *thread_context = get_context_for(thread, state); if (thread_context == NULL) rb_raise(rb_eArgError, "Unexpected: This method cannot be used unless the per-thread context for the thread already exists"); thread_context->cpu_time_at_previous_sample_ns += NUM2LONG(delta_ns); @@ -2085,10 +2085,10 @@ static uint64_t otel_span_id_to_uint(VALUE otel_span_id) { #else static bool handle_gvl_waiting( - DDTRACE_UNUSED struct thread_context_collector_state *state, + DDTRACE_UNUSED thread_context_collector_state *state, DDTRACE_UNUSED VALUE thread_being_sampled, DDTRACE_UNUSED VALUE stack_from_thread, - DDTRACE_UNUSED struct per_thread_context *thread_context, + DDTRACE_UNUSED per_thread_context *thread_context, DDTRACE_UNUSED sampling_buffer* sampling_buffer, DDTRACE_UNUSED long current_cpu_time_ns ) { return false; } diff --git a/ext/datadog_profiling_native_extension/heap_recorder.h b/ext/datadog_profiling_native_extension/heap_recorder.h index bd298464e35..e59bf7d0187 100644 --- a/ext/datadog_profiling_native_extension/heap_recorder.h +++ b/ext/datadog_profiling_native_extension/heap_recorder.h @@ -17,7 +17,7 @@ typedef struct heap_recorder heap_recorder; // Extra data associated with each live object being tracked. -typedef struct live_object_data { +typedef struct { // The weight of this object from a sampling perspective. // // A notion of weight is preserved for each tracked object to allow for an approximate diff --git a/ext/datadog_profiling_native_extension/http_transport.c b/ext/datadog_profiling_native_extension/http_transport.c index d41ab7a6b4c..0852e36d7b6 100644 --- a/ext/datadog_profiling_native_extension/http_transport.c +++ b/ext/datadog_profiling_native_extension/http_transport.c @@ -13,13 +13,13 @@ static VALUE error_symbol = Qnil; // :error in Ruby static VALUE library_version_string = Qnil; -struct call_exporter_without_gvl_arguments { +typedef struct { ddog_prof_Exporter *exporter; ddog_prof_Exporter_Request_BuildResult *build_result; ddog_CancellationToken *cancel_token; ddog_prof_Exporter_SendResult result; bool send_ran; -}; +} call_exporter_without_gvl_arguments; static inline ddog_ByteSlice byte_slice_from_ruby_string(VALUE string); static VALUE _native_validate_exporter(VALUE self, VALUE exporter_configuration); @@ -165,7 +165,7 @@ static VALUE perform_export( // We'll release the Global VM Lock while we're calling send, so that the Ruby VM can continue to work while this // is pending - struct call_exporter_without_gvl_arguments args = + call_exporter_without_gvl_arguments args = {.exporter = exporter, .build_result = &build_result, .cancel_token = cancel_token, .send_ran = false}; // We use rb_thread_call_without_gvl2 instead of rb_thread_call_without_gvl as the gvl2 variant never raises any @@ -300,7 +300,7 @@ static VALUE _native_do_export( } static void *call_exporter_without_gvl(void *call_args) { - struct call_exporter_without_gvl_arguments *args = (struct call_exporter_without_gvl_arguments*) call_args; + call_exporter_without_gvl_arguments *args = (call_exporter_without_gvl_arguments*) call_args; args->result = ddog_prof_Exporter_send(args->exporter, &args->build_result->ok, args->cancel_token); args->send_ran = true; diff --git a/ext/datadog_profiling_native_extension/private_vm_api_access.h b/ext/datadog_profiling_native_extension/private_vm_api_access.h index 3e412f51ea5..030ff1b5757 100644 --- a/ext/datadog_profiling_native_extension/private_vm_api_access.h +++ b/ext/datadog_profiling_native_extension/private_vm_api_access.h @@ -18,7 +18,7 @@ typedef struct { rb_nativethread_id_t owner; } current_gvl_owner; -typedef struct frame_info { +typedef struct { union { struct { VALUE iseq; diff --git a/ext/datadog_profiling_native_extension/profiling.c b/ext/datadog_profiling_native_extension/profiling.c index a7bfe0d466b..e26bbf897a3 100644 --- a/ext/datadog_profiling_native_extension/profiling.c +++ b/ext/datadog_profiling_native_extension/profiling.c @@ -85,16 +85,16 @@ static VALUE native_working_p(DDTRACE_UNUSED VALUE _self) { return Qtrue; } -struct trigger_grab_gvl_and_raise_arguments { +typedef struct { VALUE exception_class; char *test_message; int test_message_arg; -}; +} trigger_grab_gvl_and_raise_arguments; static VALUE _native_grab_gvl_and_raise(DDTRACE_UNUSED VALUE _self, VALUE exception_class, VALUE test_message, VALUE test_message_arg, VALUE release_gvl) { ENFORCE_TYPE(test_message, T_STRING); - struct trigger_grab_gvl_and_raise_arguments args; + trigger_grab_gvl_and_raise_arguments args; args.exception_class = exception_class; args.test_message = StringValueCStr(test_message); @@ -110,7 +110,7 @@ static VALUE _native_grab_gvl_and_raise(DDTRACE_UNUSED VALUE _self, VALUE except } static void *trigger_grab_gvl_and_raise(void *trigger_args) { - struct trigger_grab_gvl_and_raise_arguments *args = (struct trigger_grab_gvl_and_raise_arguments *) trigger_args; + trigger_grab_gvl_and_raise_arguments *args = (trigger_grab_gvl_and_raise_arguments *) trigger_args; if (args->test_message_arg >= 0) { grab_gvl_and_raise(args->exception_class, "%s%d", args->test_message, args->test_message_arg); @@ -121,16 +121,16 @@ static void *trigger_grab_gvl_and_raise(void *trigger_args) { return NULL; } -struct trigger_grab_gvl_and_raise_syserr_arguments { +typedef struct { int syserr_errno; char *test_message; int test_message_arg; -}; +} trigger_grab_gvl_and_raise_syserr_arguments; static VALUE _native_grab_gvl_and_raise_syserr(DDTRACE_UNUSED VALUE _self, VALUE syserr_errno, VALUE test_message, VALUE test_message_arg, VALUE release_gvl) { ENFORCE_TYPE(test_message, T_STRING); - struct trigger_grab_gvl_and_raise_syserr_arguments args; + trigger_grab_gvl_and_raise_syserr_arguments args; args.syserr_errno = NUM2INT(syserr_errno); args.test_message = StringValueCStr(test_message); @@ -146,7 +146,7 @@ static VALUE _native_grab_gvl_and_raise_syserr(DDTRACE_UNUSED VALUE _self, VALUE } static void *trigger_grab_gvl_and_raise_syserr(void *trigger_args) { - struct trigger_grab_gvl_and_raise_syserr_arguments *args = (struct trigger_grab_gvl_and_raise_syserr_arguments *) trigger_args; + trigger_grab_gvl_and_raise_syserr_arguments *args = (trigger_grab_gvl_and_raise_syserr_arguments *) trigger_args; if (args->test_message_arg >= 0) { grab_gvl_and_raise_syserr(args->syserr_errno, "%s%d", args->test_message, args->test_message_arg); diff --git a/ext/datadog_profiling_native_extension/ruby_helpers.c b/ext/datadog_profiling_native_extension/ruby_helpers.c index 09b14d20855..34e9fa61c77 100644 --- a/ext/datadog_profiling_native_extension/ruby_helpers.c +++ b/ext/datadog_profiling_native_extension/ruby_helpers.c @@ -23,18 +23,18 @@ void ruby_helpers_init(void) { #define MAX_RAISE_MESSAGE_SIZE 256 -struct raise_arguments { +typedef struct { VALUE exception_class; char exception_message[MAX_RAISE_MESSAGE_SIZE]; -}; +} raise_args; static void *trigger_raise(void *raise_arguments) { - struct raise_arguments *args = (struct raise_arguments *) raise_arguments; + raise_args *args = (raise_args *) raise_arguments; rb_raise(args->exception_class, "%s", args->exception_message); } void grab_gvl_and_raise(VALUE exception_class, const char *format_string, ...) { - struct raise_arguments args; + raise_args args; args.exception_class = exception_class; @@ -55,18 +55,18 @@ void grab_gvl_and_raise(VALUE exception_class, const char *format_string, ...) { rb_bug("[ddtrace] Unexpected: Reached the end of grab_gvl_and_raise while raising '%s'\n", args.exception_message); } -struct syserr_raise_arguments { +typedef struct { int syserr_errno; char exception_message[MAX_RAISE_MESSAGE_SIZE]; -}; +} syserr_raise_args; static void *trigger_syserr_raise(void *syserr_raise_arguments) { - struct syserr_raise_arguments *args = (struct syserr_raise_arguments *) syserr_raise_arguments; + syserr_raise_args *args = (syserr_raise_args *) syserr_raise_arguments; rb_syserr_fail(args->syserr_errno, args->exception_message); } void grab_gvl_and_raise_syserr(int syserr_errno, const char *format_string, ...) { - struct syserr_raise_arguments args; + syserr_raise_args args; args.syserr_errno = syserr_errno; diff --git a/ext/datadog_profiling_native_extension/stack_recorder.c b/ext/datadog_profiling_native_extension/stack_recorder.c index 710b17356e2..349a9df89dd 100644 --- a/ext/datadog_profiling_native_extension/stack_recorder.c +++ b/ext/datadog_profiling_native_extension/stack_recorder.c @@ -173,18 +173,18 @@ static const uint8_t all_value_types_positions[] = // Struct for storing stats related to a profile in a particular slot. // These stats will share the same lifetime as the data in that profile slot. -typedef struct slot_stats { +typedef struct { // How many individual samples were recorded into this slot (un-weighted) uint64_t recorded_samples; } stats_slot; -typedef struct profile_slot { +typedef struct { ddog_prof_Profile profile; stats_slot stats; } profile_slot; // Contains native state for each instance -struct stack_recorder_state { +typedef struct { // Heap recorder instance heap_recorder *heap_recorder; bool heap_clean_after_gc_enabled; @@ -210,17 +210,17 @@ struct stack_recorder_state { long serialization_time_ns_max; uint64_t serialization_time_ns_total; } stats_lifetime; -}; +} stack_recorder_state; // Used to group mutex and the corresponding profile slot for easy unlocking after work is done. -typedef struct locked_profile_slot { +typedef struct { pthread_mutex_t *mutex; profile_slot *data; } locked_profile_slot; -struct call_serialize_without_gvl_arguments { +typedef struct { // Set by caller - struct stack_recorder_state *state; + stack_recorder_state *state; ddog_Timespec finish_timestamp; // Set by callee @@ -231,26 +231,26 @@ struct call_serialize_without_gvl_arguments { // Set by both bool serialize_ran; -}; +} call_serialize_without_gvl_arguments; static VALUE _native_new(VALUE klass); -static void initialize_slot_concurrency_control(struct stack_recorder_state *state); -static void initialize_profiles(struct stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types); +static void initialize_slot_concurrency_control(stack_recorder_state *state); +static void initialize_profiles(stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types); static void stack_recorder_typed_data_free(void *data); static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _self); static VALUE _native_serialize(VALUE self, VALUE recorder_instance); static VALUE ruby_time_from(ddog_Timespec ddprof_time); static void *call_serialize_without_gvl(void *call_args); -static locked_profile_slot sampler_lock_active_profile(struct stack_recorder_state *state); +static locked_profile_slot sampler_lock_active_profile(stack_recorder_state *state); static void sampler_unlock_active_profile(locked_profile_slot active_slot); -static profile_slot* serializer_flip_active_and_inactive_slots(struct stack_recorder_state *state); +static profile_slot* serializer_flip_active_and_inactive_slots(stack_recorder_state *state); static VALUE _native_active_slot(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance); static VALUE _native_is_slot_one_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance); static VALUE _native_is_slot_two_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance); static VALUE test_slot_mutex_state(VALUE recorder_instance, int slot); static ddog_Timespec system_epoch_now_timespec(void); static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_instance); -static void serializer_set_start_timestamp_for_next_profile(struct stack_recorder_state *state, ddog_Timespec start_time); +static void serializer_set_start_timestamp_for_next_profile(stack_recorder_state *state, ddog_Timespec start_time); static VALUE _native_record_endpoint(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE local_root_span_id, VALUE endpoint); static void reset_profile_slot(profile_slot *slot, ddog_Timespec *start_time /* Can be null */); static VALUE _native_track_object(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE new_obj, VALUE weight, VALUE alloc_class); @@ -316,7 +316,7 @@ static const rb_data_type_t stack_recorder_typed_data = { }; static VALUE _native_new(VALUE klass) { - struct stack_recorder_state *state = ruby_xcalloc(1, sizeof(struct stack_recorder_state)); + stack_recorder_state *state = ruby_xcalloc(1, sizeof(stack_recorder_state)); // Note: Any exceptions raised from this note until the TypedData_Wrap_Struct call will lead to the state memory // being leaked. @@ -354,7 +354,7 @@ static VALUE _native_new(VALUE klass) { return stack_recorder; } -static void initialize_slot_concurrency_control(struct stack_recorder_state *state) { +static void initialize_slot_concurrency_control(stack_recorder_state *state) { state->mutex_slot_one = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER; state->mutex_slot_two = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER; @@ -364,7 +364,7 @@ static void initialize_slot_concurrency_control(struct stack_recorder_state *sta state->active_slot = 1; } -static void initialize_profiles(struct stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types) { +static void initialize_profiles(stack_recorder_state *state, ddog_prof_Slice_ValueType sample_types) { ddog_prof_Profile_NewResult slot_one_profile_result = ddog_prof_Profile_new(sample_types, NULL /* period is optional */, NULL /* start_time is optional */); @@ -391,7 +391,7 @@ static void initialize_profiles(struct stack_recorder_state *state, ddog_prof_Sl } static void stack_recorder_typed_data_free(void *state_ptr) { - struct stack_recorder_state *state = (struct stack_recorder_state *) state_ptr; + stack_recorder_state *state = (stack_recorder_state *) state_ptr; pthread_mutex_destroy(&state->mutex_slot_one); ddog_prof_Profile_drop(&state->profile_slot_one.profile); @@ -426,8 +426,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel ENFORCE_BOOLEAN(timeline_enabled); ENFORCE_BOOLEAN(heap_clean_after_gc_enabled); - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); state->heap_clean_after_gc_enabled = (heap_clean_after_gc_enabled == Qtrue); @@ -517,8 +517,8 @@ static VALUE _native_initialize(int argc, VALUE *argv, DDTRACE_UNUSED VALUE _sel } static VALUE _native_serialize(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); ddog_Timespec finish_timestamp = system_epoch_now_timespec(); // Need to do this while still holding on to the Global VM Lock; see comments on method for why @@ -532,7 +532,7 @@ static VALUE _native_serialize(DDTRACE_UNUSED VALUE _self, VALUE recorder_instan // We'll release the Global VM Lock while we're calling serialize, so that the Ruby VM can continue to work while this // is pending - struct call_serialize_without_gvl_arguments args = { + call_serialize_without_gvl_arguments args = { .state = state, .finish_timestamp = finish_timestamp, .serialize_ran = false @@ -597,8 +597,8 @@ static VALUE ruby_time_from(ddog_Timespec ddprof_time) { } void record_sample(VALUE recorder_instance, ddog_prof_Slice_Location locations, sample_values values, sample_labels labels) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); locked_profile_slot active_slot = sampler_lock_active_profile(state); @@ -652,8 +652,8 @@ void record_sample(VALUE recorder_instance, ddog_prof_Slice_Location locations, } void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample_weight, ddog_CharSlice *alloc_class) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); // FIXME: Heap sampling currently has to be done in 2 parts because the construction of locations is happening // very late in the allocation-sampling path (which is shared with the cpu sampling path). This can // be fixed with some refactoring but for now this leads to a less impactful change. @@ -661,8 +661,8 @@ void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample } void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_CharSlice endpoint) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); locked_profile_slot active_slot = sampler_lock_active_profile(state); @@ -676,8 +676,8 @@ void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_ } void recorder_after_gc_step(VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); if (state->heap_clean_after_gc_enabled) heap_recorder_update_young_objects(state->heap_recorder); } @@ -687,7 +687,7 @@ void recorder_after_gc_step(VALUE recorder_instance) { // Heap recorder iteration context allows us access to stack recorder state and profile being serialized // during iteration of heap recorder live objects. typedef struct heap_recorder_iteration_context { - struct stack_recorder_state *state; + stack_recorder_state *state; profile_slot *slot; bool error; @@ -749,7 +749,7 @@ static bool add_heap_sample_to_active_profile_without_gvl(heap_recorder_iteratio return true; } -static void build_heap_profile_without_gvl(struct stack_recorder_state *state, profile_slot *slot) { +static void build_heap_profile_without_gvl(stack_recorder_state *state, profile_slot *slot) { heap_recorder_iteration_context iteration_context = { .state = state, .slot = slot, @@ -770,7 +770,7 @@ static void build_heap_profile_without_gvl(struct stack_recorder_state *state, p } static void *call_serialize_without_gvl(void *call_args) { - struct call_serialize_without_gvl_arguments *args = (struct call_serialize_without_gvl_arguments *) call_args; + call_serialize_without_gvl_arguments *args = (call_serialize_without_gvl_arguments *) call_args; long serialize_no_gvl_start_time_ns = monotonic_wall_time_now_ns(DO_NOT_RAISE_ON_FAILURE); @@ -796,7 +796,7 @@ VALUE enforce_recorder_instance(VALUE object) { return object; } -static locked_profile_slot sampler_lock_active_profile(struct stack_recorder_state *state) { +static locked_profile_slot sampler_lock_active_profile(stack_recorder_state *state) { int error; for (int attempts = 0; attempts < 2; attempts++) { @@ -823,7 +823,7 @@ static void sampler_unlock_active_profile(locked_profile_slot active_slot) { ENFORCE_SUCCESS_GVL(pthread_mutex_unlock(active_slot.mutex)); } -static profile_slot* serializer_flip_active_and_inactive_slots(struct stack_recorder_state *state) { +static profile_slot* serializer_flip_active_and_inactive_slots(stack_recorder_state *state) { int previously_active_slot = state->active_slot; if (previously_active_slot != 1 && previously_active_slot != 2) { @@ -849,8 +849,8 @@ static profile_slot* serializer_flip_active_and_inactive_slots(struct stack_reco // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_active_slot(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); return INT2NUM(state->active_slot); } @@ -864,8 +864,8 @@ static VALUE _native_is_slot_one_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE static VALUE _native_is_slot_two_mutex_locked(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { return test_slot_mutex_state(recorder_instance, 2); } static VALUE test_slot_mutex_state(VALUE recorder_instance, int slot) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); pthread_mutex_t *slot_mutex = (slot == 1) ? &state->mutex_slot_one : &state->mutex_slot_two; @@ -895,8 +895,8 @@ static ddog_Timespec system_epoch_now_timespec(void) { // Assumption: This method gets called BEFORE restarting profiling -- e.g. there are no components attempting to // trigger samples at the same time. static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); // In case the fork happened halfway through `serializer_flip_active_and_inactive_slots` execution and the // resulting state is inconsistent, we make sure to reset it back to the initial state. @@ -912,7 +912,7 @@ static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_ // Assumption 1: This method is called with the GVL being held, because `ddog_prof_Profile_reset` mutates the profile and must // not be interrupted part-way through by a VM fork. -static void serializer_set_start_timestamp_for_next_profile(struct stack_recorder_state *state, ddog_Timespec start_time) { +static void serializer_set_start_timestamp_for_next_profile(stack_recorder_state *state, ddog_Timespec start_time) { // Before making this profile active, we reset it so that it uses the correct start_time for its start profile_slot *next_profile_slot = (state->active_slot == 1) ? &state->profile_slot_two : &state->profile_slot_one; reset_profile_slot(next_profile_slot, &start_time); @@ -972,8 +972,8 @@ static void reset_profile_slot(profile_slot *slot, ddog_Timespec *start_time /* // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); heap_recorder_prepare_iteration(state->heap_recorder); @@ -983,8 +983,8 @@ static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _se // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_end_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); heap_recorder_finish_iteration(state->heap_recorder); @@ -994,15 +994,15 @@ static VALUE _native_end_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self // This method exists only to enable testing Datadog::Profiling::StackRecorder behavior using RSpec. // It SHOULD NOT be used for other purposes. static VALUE _native_debug_heap_recorder(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); return heap_recorder_testonly_debug(state->heap_recorder); } static VALUE _native_stats(DDTRACE_UNUSED VALUE self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); uint64_t total_serializations = state->stats_lifetime.serialization_successes + state->stats_lifetime.serialization_failures; @@ -1040,15 +1040,15 @@ static VALUE build_profile_stats(profile_slot *slot, long serialization_time_ns, static VALUE _native_is_object_recorded(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE obj_id) { ENFORCE_TYPE(obj_id, T_FIXNUM); - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); return heap_recorder_testonly_is_object_recorded(state->heap_recorder, obj_id); } static VALUE _native_heap_recorder_reset_last_update(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance) { - struct stack_recorder_state *state; - TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); + stack_recorder_state *state; + TypedData_Get_Struct(recorder_instance, stack_recorder_state, &stack_recorder_typed_data, state); heap_recorder_testonly_reset_last_update(state->heap_recorder); diff --git a/ext/datadog_profiling_native_extension/stack_recorder.h b/ext/datadog_profiling_native_extension/stack_recorder.h index 7a500c42c1b..38b228afc78 100644 --- a/ext/datadog_profiling_native_extension/stack_recorder.h +++ b/ext/datadog_profiling_native_extension/stack_recorder.h @@ -13,7 +13,7 @@ typedef struct { int64_t timeline_wall_time_ns; } sample_values; -typedef struct sample_labels { +typedef struct { ddog_prof_Slice_Label labels; // This is used to allow the `Collectors::Stack` to modify the existing label, if any. This MUST be NULL or point diff --git a/ext/datadog_profiling_native_extension/time_helpers.h b/ext/datadog_profiling_native_extension/time_helpers.h index 87bc5341fc9..08390cd8c11 100644 --- a/ext/datadog_profiling_native_extension/time_helpers.h +++ b/ext/datadog_profiling_native_extension/time_helpers.h @@ -39,7 +39,7 @@ static inline long system_epoch_time_now_ns(raise_on_failure_setting raise_on_fa // https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_for_real_time/7/html/reference_guide/sect-posix_clocks#Using_clock_getres_to_compare_clock_resolution // We introduce here a separate type for it, so as to make it harder to misuse/more explicit when these timestamps are used -typedef struct coarse_instant { +typedef struct { long timestamp_ns; } coarse_instant;