From bc078e4eab65f11bbaeed380593ab8151b30d703 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 2 Mar 2010 16:01:10 +0100 Subject: oprofile: convert oprofile from timer_hook to hrtimer Oprofile is currently broken on systems running with NOHZ enabled. A maximum of 1 tick is accounted via the timer_hook if a cpu sleeps for a longer period of time. This does bad things to the percentages in the profiler output. To solve this problem convert oprofile to use a restarting hrtimer instead of the timer_hook. Signed-off-by: Martin Schwidefsky Signed-off-by: Robert Richter --- drivers/oprofile/oprof.c | 12 ++++--- drivers/oprofile/oprof.h | 3 +- drivers/oprofile/timer_int.c | 78 +++++++++++++++++++++++++++++++++++++++----- 3 files changed, 79 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index dc8a0428260d..b336cd9ee7a1 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -253,22 +253,26 @@ static int __init oprofile_init(void) int err; err = oprofile_arch_init(&oprofile_ops); - if (err < 0 || timer) { printk(KERN_INFO "oprofile: using timer interrupt.\n"); - oprofile_timer_init(&oprofile_ops); + err = oprofile_timer_init(&oprofile_ops); + if (err) + goto out_arch; } - err = oprofilefs_register(); if (err) - oprofile_arch_exit(); + goto out_arch; + return 0; +out_arch: + oprofile_arch_exit(); return err; } static void __exit oprofile_exit(void) { + oprofile_timer_exit(); oprofilefs_unregister(); oprofile_arch_exit(); } diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index cb92f5c98c1a..47e12cb4ee8b 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h @@ -34,7 +34,8 @@ struct super_block; struct dentry; void oprofile_create_files(struct super_block *sb, struct dentry *root); -void oprofile_timer_init(struct oprofile_operations *ops); +int oprofile_timer_init(struct oprofile_operations *ops); +void oprofile_timer_exit(void); int oprofile_set_backtrace(unsigned long depth); int oprofile_set_timeout(unsigned long time); diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 333f915568c7..dc0ae4d14dff 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -13,34 +13,94 @@ #include #include #include +#include +#include +#include #include #include "oprof.h" -static int timer_notify(struct pt_regs *regs) +static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer); + +static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) +{ + oprofile_add_sample(get_irq_regs(), 0); + hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC)); + return HRTIMER_RESTART; +} + +static void __oprofile_hrtimer_start(void *unused) +{ + struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); + + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = oprofile_hrtimer_notify; + + hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC), + HRTIMER_MODE_REL_PINNED); +} + +static int oprofile_hrtimer_start(void) { - oprofile_add_sample(regs, 0); + on_each_cpu(__oprofile_hrtimer_start, NULL, 1); return 0; } -static int timer_start(void) +static void __oprofile_hrtimer_stop(int cpu) { - return register_timer_hook(timer_notify); + struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); + + hrtimer_cancel(hrtimer); } +static void oprofile_hrtimer_stop(void) +{ + int cpu; + + for_each_online_cpu(cpu) + __oprofile_hrtimer_stop(cpu); +} -static void timer_stop(void) +static int __cpuinit oprofile_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { - unregister_timer_hook(timer_notify); + long cpu = (long) hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + smp_call_function_single(cpu, __oprofile_hrtimer_start, + NULL, 1); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + __oprofile_hrtimer_stop(cpu); + break; + } + return NOTIFY_OK; } +static struct notifier_block __refdata oprofile_cpu_notifier = { + .notifier_call = oprofile_cpu_notify, +}; -void __init oprofile_timer_init(struct oprofile_operations *ops) +int __init oprofile_timer_init(struct oprofile_operations *ops) { + int rc; + + rc = register_hotcpu_notifier(&oprofile_cpu_notifier); + if (rc) + return rc; ops->create_files = NULL; ops->setup = NULL; ops->shutdown = NULL; - ops->start = timer_start; - ops->stop = timer_stop; + ops->start = oprofile_hrtimer_start; + ops->stop = oprofile_hrtimer_stop; ops->cpu_type = "timer"; + return 0; +} + +void __exit oprofile_timer_exit(void) +{ + unregister_hotcpu_notifier(&oprofile_cpu_notifier); } -- cgit v1.2.2 From 66a8cb95ed04025664d1db4e952155ee1dccd048 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 31 Mar 2010 13:21:56 -0400 Subject: ring-buffer: Add place holder recording of dropped events Currently, when the ring buffer drops events, it does not record the fact that it did so. It does inform the writer that the event was dropped by returning a NULL event, but it does not put in any place holder where the event was dropped. This is not a trivial thing to add because the ring buffer mostly runs in overwrite (flight recorder) mode. That is, when the ring buffer is full, new data will overwrite old data. In a produce/consumer mode, where new data is simply dropped when the ring buffer is full, it is trivial to add the placeholder for dropped events. When there's more room to write new data, then a special event can be added to notify the reader about the dropped events. But in overwrite mode, any new write can overwrite events. A place holder can not be inserted into the ring buffer since there never may be room. A reader could also come in at anytime and miss the placeholder. Luckily, the way the ring buffer works, the read side can find out if events were lost or not, and how many events. Everytime a write takes place, if it overwrites the header page (the next read) it updates a "overrun" variable that keeps track of the number of lost events. When a reader swaps out a page from the ring buffer, it can record this number, perfom the swap, and then check to see if the number changed, and take the diff if it has, which would be the number of events dropped. This can be stored by the reader and returned to callers of the reader. Since the reader page swap will fail if the writer moved the head page since the time the reader page set up the swap, this gives room to record the overruns without worrying about races. If the reader sets up the pages, records the overrun, than performs the swap, if the swap succeeds, then the overrun variable has not been updated since the setup before the swap. For binary readers of the ring buffer, a flag is set in the header of each sub page (sub buffer) of the ring buffer. This flag is embedded in the size field of the data on the sub buffer, in the 31st bit (the size can be 32 or 64 bits depending on the architecture), but only 27 bits needs to be used for the actual size (less actually). We could add a new field in the sub buffer header to also record the number of events dropped since the last read, but this will change the format of the binary ring buffer a bit too much. Perhaps this change can be made if the information on the number of events dropped is considered important enough. Note, the notification of dropped events is only used by consuming reads or peeking at the ring buffer. Iterating over the ring buffer does not keep this information because the necessary data is only available when a page swap is made, and the iterator does not swap out pages. Cc: Robert Richter Cc: Andi Kleen Cc: Li Zefan Cc: Arnaldo Carvalho de Melo Cc: "Luis Claudio R. Goncalves" Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt --- drivers/oprofile/cpu_buffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 166b67ea622f..7581dbe456da 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -186,14 +186,14 @@ int op_cpu_buffer_write_commit(struct op_entry *entry) struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); + e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL, NULL); if (e) goto event; if (ring_buffer_swap_cpu(op_ring_buffer_read, op_ring_buffer_write, cpu)) return NULL; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); + e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL, NULL); if (e) goto event; return NULL; -- cgit v1.2.2 From cb6e943ccf19ab6d3189147e9d625a992e016084 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 1 Apr 2010 03:17:25 +0200 Subject: oprofile: remove double ring buffering oprofile used a double buffer scheme for its cpu event buffer to avoid races on reading with the old locked ring buffer. But that is obsolete now with the new ring buffer, so simply use a single buffer. This greatly simplifies the code and avoids a lot of sample drops on large runs, especially with call graph. Based on suggestions from Steven Rostedt For stable kernels from v2.6.32, but not earlier. Signed-off-by: Andi Kleen Cc: Steven Rostedt Cc: stable Signed-off-by: Robert Richter --- drivers/oprofile/cpu_buffer.c | 63 +++++++++---------------------------------- 1 file changed, 13 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 166b67ea622f..de82183bb9b3 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -30,23 +30,7 @@ #define OP_BUFFER_FLAGS 0 -/* - * Read and write access is using spin locking. Thus, writing to the - * buffer by NMI handler (x86) could occur also during critical - * sections when reading the buffer. To avoid this, there are 2 - * buffers for independent read and write access. Read access is in - * process context only, write access only in the NMI handler. If the - * read buffer runs empty, both buffers are swapped atomically. There - * is potentially a small window during swapping where the buffers are - * disabled and samples could be lost. - * - * Using 2 buffers is a little bit overhead, but the solution is clear - * and does not require changes in the ring buffer implementation. It - * can be changed to a single buffer solution when the ring buffer - * access is implemented as non-locking atomic code. - */ -static struct ring_buffer *op_ring_buffer_read; -static struct ring_buffer *op_ring_buffer_write; +static struct ring_buffer *op_ring_buffer; DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); static void wq_sync_buffer(struct work_struct *work); @@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) void free_cpu_buffers(void) { - if (op_ring_buffer_read) - ring_buffer_free(op_ring_buffer_read); - op_ring_buffer_read = NULL; - if (op_ring_buffer_write) - ring_buffer_free(op_ring_buffer_write); - op_ring_buffer_write = NULL; + if (op_ring_buffer) + ring_buffer_free(op_ring_buffer); + op_ring_buffer = NULL; } #define RB_EVENT_HDR_SIZE 4 @@ -86,11 +67,8 @@ int alloc_cpu_buffers(void) unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); - op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); - if (!op_ring_buffer_read) - goto fail; - op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); - if (!op_ring_buffer_write) + op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); + if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { @@ -162,16 +140,11 @@ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve - (op_ring_buffer_write, sizeof(struct op_sample) + + (op_ring_buffer, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); - if (entry->event) - entry->sample = ring_buffer_event_data(entry->event); - else - entry->sample = NULL; - - if (!entry->sample) + if (!entry->event) return NULL; - + entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; @@ -180,25 +153,16 @@ struct op_sample int op_cpu_buffer_write_commit(struct op_entry *entry) { - return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); + return ring_buffer_unlock_commit(op_ring_buffer, entry->event); } struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); - if (e) - goto event; - if (ring_buffer_swap_cpu(op_ring_buffer_read, - op_ring_buffer_write, - cpu)) + e = ring_buffer_consume(op_ring_buffer, cpu, NULL); + if (!e) return NULL; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); - if (e) - goto event; - return NULL; -event: entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) @@ -209,8 +173,7 @@ event: unsigned long op_cpu_buffer_entries(int cpu) { - return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) - + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); + return ring_buffer_entries_cpu(op_ring_buffer, cpu); } static int -- cgit v1.2.2 From 9414e99672271adcc661f3c160a30b374179b92f Mon Sep 17 00:00:00 2001 From: Phil Carmody Date: Wed, 28 Apr 2010 12:09:16 -0500 Subject: oprofile: protect from not being in an IRQ context http://lkml.org/lkml/2010/4/27/285 Protect against dereferencing regs when it's NULL, and force a magic number into pc to prevent too deep processing. This approach permits the dropped samples to be tallied as invalid Instruction Pointer events. e.g. output from about 15mins at 10kHz sample rate: Nr. samples received: 2565380 Nr. samples lost invalid pc: 4 Signed-off-by: Phil Carmody Signed-off-by: Robert Richter --- drivers/oprofile/cpu_buffer.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 0ac8b065ee02..219f79e2210a 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -319,8 +319,16 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { - int is_kernel = !user_mode(regs); - unsigned long pc = profile_pc(regs); + int is_kernel; + unsigned long pc; + + if (likely(regs)) { + is_kernel = !user_mode(regs); + pc = profile_pc(regs); + } else { + is_kernel = 0; /* This value will not be used */ + pc = ESCAPE_CODE; /* as this causes an early return. */ + } __oprofile_add_ext_sample(pc, regs, event, is_kernel); } -- cgit v1.2.2