diff --git a/lib/yabeda/sidekiq.rb b/lib/yabeda/sidekiq.rb index f118a04..3d69d8c 100644 --- a/lib/yabeda/sidekiq.rb +++ b/lib/yabeda/sidekiq.rb @@ -21,7 +21,6 @@ module Sidekiq counter :jobs_enqueued_total, comment: "A counter of the total number of jobs sidekiq enqueued." next unless ::Sidekiq.server? - counter :jobs_executed_total, comment: "A counter of the total number of jobs sidekiq executed." counter :jobs_success_total, comment: "A counter of the total number of jobs successfully processed by sidekiq." counter :jobs_failed_total, comment: "A counter of the total number of jobs failed in sidekiq." @@ -32,6 +31,7 @@ module Sidekiq gauge :jobs_dead_count, comment: "The number of jobs exceeded their retry count." gauge :active_processes, comment: "The number of active Sidekiq worker processes." gauge :jobs_latency, comment: "The job latency, the difference in seconds since the oldest job in the queue was enqueued" + gauge :memory_usage, comment: "The sidekiq process overall memory usage" histogram :job_runtime, unit: :seconds, per: :job, comment: "A histogram of the job execution time.", buckets: LONG_RUNNING_JOB_RUNTIME_BUCKETS @@ -52,6 +52,8 @@ module Sidekiq sidekiq_jobs_latency.set({ queue: queue.name }, queue.latency) end + sidekiq_memory_usage.set({}, Yabeda::Sidekiq.process_memory_usage) + # That is quite slow if your retry set is large # I don't want to enable it by default # retries_by_queues = @@ -90,6 +92,12 @@ def worker_class(worker, job) end (worker.is_a?(String) ? worker : worker.class).to_s end + + def process_memory_usage + memories = Hash[%i{size resident shared trs lrs drs dt}.zip(open("/proc/#{Process.pid}/statm").read.split)] + page_size = `getconf PAGESIZE`.chomp.to_i + memories[:resident].to_i * page_size + end end end end