diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-20 11:25:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-20 11:25:49 -0400 |
commit | 3daeb4da9a0b056bdc4af003e5605c1da4c0b068 (patch) | |
tree | a17fda13d9be51c71880a8ce2c55eaf7337c722f /kernel | |
parent | 9ea1a153a4fb435c22e9988784bb476671286112 (diff) | |
parent | 4b221f0313f0f7f1f7aa0a1fd16ad400840def26 (diff) |
Merge branch 'tip/tracing/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 82 | ||||
-rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 37 |
2 files changed, 78 insertions, 41 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ed3559944fcf..589b3eedfa67 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | |||
620 | kfree(cpu_buffer); | 620 | kfree(cpu_buffer); |
621 | } | 621 | } |
622 | 622 | ||
623 | /* | ||
624 | * Causes compile errors if the struct buffer_page gets bigger | ||
625 | * than the struct page. | ||
626 | */ | ||
627 | extern int ring_buffer_page_too_big(void); | ||
628 | |||
629 | #ifdef CONFIG_HOTPLUG_CPU | 623 | #ifdef CONFIG_HOTPLUG_CPU |
630 | static int rb_cpu_notify(struct notifier_block *self, | 624 | static int rb_cpu_notify(struct notifier_block *self, |
631 | unsigned long action, void *hcpu); | 625 | unsigned long action, void *hcpu); |
@@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
648 | int bsize; | 642 | int bsize; |
649 | int cpu; | 643 | int cpu; |
650 | 644 | ||
651 | /* Paranoid! Optimizes out when all is well */ | ||
652 | if (sizeof(struct buffer_page) > sizeof(struct page)) | ||
653 | ring_buffer_page_too_big(); | ||
654 | |||
655 | |||
656 | /* keep it in its own cache line */ | 645 | /* keep it in its own cache line */ |
657 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), | 646 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), |
658 | GFP_KERNEL); | 647 | GFP_KERNEL); |
@@ -668,8 +657,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
668 | buffer->reader_lock_key = key; | 657 | buffer->reader_lock_key = key; |
669 | 658 | ||
670 | /* need at least two pages */ | 659 | /* need at least two pages */ |
671 | if (buffer->pages == 1) | 660 | if (buffer->pages < 2) |
672 | buffer->pages++; | 661 | buffer->pages = 2; |
673 | 662 | ||
674 | /* | 663 | /* |
675 | * In case of non-hotplug cpu, if the ring-buffer is allocated | 664 | * In case of non-hotplug cpu, if the ring-buffer is allocated |
@@ -1013,7 +1002,7 @@ rb_event_index(struct ring_buffer_event *event) | |||
1013 | { | 1002 | { |
1014 | unsigned long addr = (unsigned long)event; | 1003 | unsigned long addr = (unsigned long)event; |
1015 | 1004 | ||
1016 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); | 1005 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; |
1017 | } | 1006 | } |
1018 | 1007 | ||
1019 | static inline int | 1008 | static inline int |
@@ -1334,9 +1323,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1334 | 1323 | ||
1335 | /* We reserved something on the buffer */ | 1324 | /* We reserved something on the buffer */ |
1336 | 1325 | ||
1337 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) | ||
1338 | return NULL; | ||
1339 | |||
1340 | event = __rb_page_index(tail_page, tail); | 1326 | event = __rb_page_index(tail_page, tail); |
1341 | rb_update_event(event, type, length); | 1327 | rb_update_event(event, type, length); |
1342 | 1328 | ||
@@ -2480,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
2480 | } | 2466 | } |
2481 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | 2467 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); |
2482 | 2468 | ||
2469 | static inline int rb_ok_to_lock(void) | ||
2470 | { | ||
2471 | /* | ||
2472 | * If an NMI die dumps out the content of the ring buffer | ||
2473 | * do not grab locks. We also permanently disable the ring | ||
2474 | * buffer too. A one time deal is all you get from reading | ||
2475 | * the ring buffer from an NMI. | ||
2476 | */ | ||
2477 | if (likely(!in_nmi() && !oops_in_progress)) | ||
2478 | return 1; | ||
2479 | |||
2480 | tracing_off_permanent(); | ||
2481 | return 0; | ||
2482 | } | ||
2483 | |||
2483 | /** | 2484 | /** |
2484 | * ring_buffer_peek - peek at the next event to be read | 2485 | * ring_buffer_peek - peek at the next event to be read |
2485 | * @buffer: The ring buffer to read | 2486 | * @buffer: The ring buffer to read |
@@ -2495,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2495 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2496 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2496 | struct ring_buffer_event *event; | 2497 | struct ring_buffer_event *event; |
2497 | unsigned long flags; | 2498 | unsigned long flags; |
2499 | int dolock; | ||
2498 | 2500 | ||
2499 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2501 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2500 | return NULL; | 2502 | return NULL; |
2501 | 2503 | ||
2504 | dolock = rb_ok_to_lock(); | ||
2502 | again: | 2505 | again: |
2503 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2506 | local_irq_save(flags); |
2507 | if (dolock) | ||
2508 | spin_lock(&cpu_buffer->reader_lock); | ||
2504 | event = rb_buffer_peek(buffer, cpu, ts); | 2509 | event = rb_buffer_peek(buffer, cpu, ts); |
2505 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2510 | if (dolock) |
2511 | spin_unlock(&cpu_buffer->reader_lock); | ||
2512 | local_irq_restore(flags); | ||
2506 | 2513 | ||
2507 | if (event && event->type_len == RINGBUF_TYPE_PADDING) { | 2514 | if (event && event->type_len == RINGBUF_TYPE_PADDING) { |
2508 | cpu_relax(); | 2515 | cpu_relax(); |
@@ -2554,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2554 | struct ring_buffer_per_cpu *cpu_buffer; | 2561 | struct ring_buffer_per_cpu *cpu_buffer; |
2555 | struct ring_buffer_event *event = NULL; | 2562 | struct ring_buffer_event *event = NULL; |
2556 | unsigned long flags; | 2563 | unsigned long flags; |
2564 | int dolock; | ||
2565 | |||
2566 | dolock = rb_ok_to_lock(); | ||
2557 | 2567 | ||
2558 | again: | 2568 | again: |
2559 | /* might be called in atomic */ | 2569 | /* might be called in atomic */ |
@@ -2563,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2563 | goto out; | 2573 | goto out; |
2564 | 2574 | ||
2565 | cpu_buffer = buffer->buffers[cpu]; | 2575 | cpu_buffer = buffer->buffers[cpu]; |
2566 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2576 | local_irq_save(flags); |
2577 | if (dolock) | ||
2578 | spin_lock(&cpu_buffer->reader_lock); | ||
2567 | 2579 | ||
2568 | event = rb_buffer_peek(buffer, cpu, ts); | 2580 | event = rb_buffer_peek(buffer, cpu, ts); |
2569 | if (!event) | 2581 | if (!event) |
@@ -2572,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2572 | rb_advance_reader(cpu_buffer); | 2584 | rb_advance_reader(cpu_buffer); |
2573 | 2585 | ||
2574 | out_unlock: | 2586 | out_unlock: |
2575 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2587 | if (dolock) |
2588 | spin_unlock(&cpu_buffer->reader_lock); | ||
2589 | local_irq_restore(flags); | ||
2576 | 2590 | ||
2577 | out: | 2591 | out: |
2578 | preempt_enable(); | 2592 | preempt_enable(); |
@@ -2770,12 +2784,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset); | |||
2770 | int ring_buffer_empty(struct ring_buffer *buffer) | 2784 | int ring_buffer_empty(struct ring_buffer *buffer) |
2771 | { | 2785 | { |
2772 | struct ring_buffer_per_cpu *cpu_buffer; | 2786 | struct ring_buffer_per_cpu *cpu_buffer; |
2787 | unsigned long flags; | ||
2788 | int dolock; | ||
2773 | int cpu; | 2789 | int cpu; |
2790 | int ret; | ||
2791 | |||
2792 | dolock = rb_ok_to_lock(); | ||
2774 | 2793 | ||
2775 | /* yes this is racy, but if you don't like the race, lock the buffer */ | 2794 | /* yes this is racy, but if you don't like the race, lock the buffer */ |
2776 | for_each_buffer_cpu(buffer, cpu) { | 2795 | for_each_buffer_cpu(buffer, cpu) { |
2777 | cpu_buffer = buffer->buffers[cpu]; | 2796 | cpu_buffer = buffer->buffers[cpu]; |
2778 | if (!rb_per_cpu_empty(cpu_buffer)) | 2797 | local_irq_save(flags); |
2798 | if (dolock) | ||
2799 | spin_lock(&cpu_buffer->reader_lock); | ||
2800 | ret = rb_per_cpu_empty(cpu_buffer); | ||
2801 | if (dolock) | ||
2802 | spin_unlock(&cpu_buffer->reader_lock); | ||
2803 | local_irq_restore(flags); | ||
2804 | |||
2805 | if (!ret) | ||
2779 | return 0; | 2806 | return 0; |
2780 | } | 2807 | } |
2781 | 2808 | ||
@@ -2791,14 +2818,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); | |||
2791 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | 2818 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) |
2792 | { | 2819 | { |
2793 | struct ring_buffer_per_cpu *cpu_buffer; | 2820 | struct ring_buffer_per_cpu *cpu_buffer; |
2821 | unsigned long flags; | ||
2822 | int dolock; | ||
2794 | int ret; | 2823 | int ret; |
2795 | 2824 | ||
2796 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2825 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2797 | return 1; | 2826 | return 1; |
2798 | 2827 | ||
2828 | dolock = rb_ok_to_lock(); | ||
2829 | |||
2799 | cpu_buffer = buffer->buffers[cpu]; | 2830 | cpu_buffer = buffer->buffers[cpu]; |
2831 | local_irq_save(flags); | ||
2832 | if (dolock) | ||
2833 | spin_lock(&cpu_buffer->reader_lock); | ||
2800 | ret = rb_per_cpu_empty(cpu_buffer); | 2834 | ret = rb_per_cpu_empty(cpu_buffer); |
2801 | 2835 | if (dolock) | |
2836 | spin_unlock(&cpu_buffer->reader_lock); | ||
2837 | local_irq_restore(flags); | ||
2802 | 2838 | ||
2803 | return ret; | 2839 | return ret; |
2804 | } | 2840 | } |
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index cf6b0f50134e..573d3cc762c3 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -203,7 +203,7 @@ static void ring_buffer_producer(void) | |||
203 | * Hammer the buffer for 10 secs (this may | 203 | * Hammer the buffer for 10 secs (this may |
204 | * make the system stall) | 204 | * make the system stall) |
205 | */ | 205 | */ |
206 | pr_info("Starting ring buffer hammer\n"); | 206 | trace_printk("Starting ring buffer hammer\n"); |
207 | do_gettimeofday(&start_tv); | 207 | do_gettimeofday(&start_tv); |
208 | do { | 208 | do { |
209 | struct ring_buffer_event *event; | 209 | struct ring_buffer_event *event; |
@@ -239,7 +239,7 @@ static void ring_buffer_producer(void) | |||
239 | #endif | 239 | #endif |
240 | 240 | ||
241 | } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); | 241 | } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); |
242 | pr_info("End ring buffer hammer\n"); | 242 | trace_printk("End ring buffer hammer\n"); |
243 | 243 | ||
244 | if (consumer) { | 244 | if (consumer) { |
245 | /* Init both completions here to avoid races */ | 245 | /* Init both completions here to avoid races */ |
@@ -262,49 +262,50 @@ static void ring_buffer_producer(void) | |||
262 | overruns = ring_buffer_overruns(buffer); | 262 | overruns = ring_buffer_overruns(buffer); |
263 | 263 | ||
264 | if (kill_test) | 264 | if (kill_test) |
265 | pr_info("ERROR!\n"); | 265 | trace_printk("ERROR!\n"); |
266 | pr_info("Time: %lld (usecs)\n", time); | 266 | trace_printk("Time: %lld (usecs)\n", time); |
267 | pr_info("Overruns: %lld\n", overruns); | 267 | trace_printk("Overruns: %lld\n", overruns); |
268 | if (disable_reader) | 268 | if (disable_reader) |
269 | pr_info("Read: (reader disabled)\n"); | 269 | trace_printk("Read: (reader disabled)\n"); |
270 | else | 270 | else |
271 | pr_info("Read: %ld (by %s)\n", read, | 271 | trace_printk("Read: %ld (by %s)\n", read, |
272 | read_events ? "events" : "pages"); | 272 | read_events ? "events" : "pages"); |
273 | pr_info("Entries: %lld\n", entries); | 273 | trace_printk("Entries: %lld\n", entries); |
274 | pr_info("Total: %lld\n", entries + overruns + read); | 274 | trace_printk("Total: %lld\n", entries + overruns + read); |
275 | pr_info("Missed: %ld\n", missed); | 275 | trace_printk("Missed: %ld\n", missed); |
276 | pr_info("Hit: %ld\n", hit); | 276 | trace_printk("Hit: %ld\n", hit); |
277 | 277 | ||
278 | /* Convert time from usecs to millisecs */ | 278 | /* Convert time from usecs to millisecs */ |
279 | do_div(time, USEC_PER_MSEC); | 279 | do_div(time, USEC_PER_MSEC); |
280 | if (time) | 280 | if (time) |
281 | hit /= (long)time; | 281 | hit /= (long)time; |
282 | else | 282 | else |
283 | pr_info("TIME IS ZERO??\n"); | 283 | trace_printk("TIME IS ZERO??\n"); |
284 | 284 | ||
285 | pr_info("Entries per millisec: %ld\n", hit); | 285 | trace_printk("Entries per millisec: %ld\n", hit); |
286 | 286 | ||
287 | if (hit) { | 287 | if (hit) { |
288 | /* Calculate the average time in nanosecs */ | 288 | /* Calculate the average time in nanosecs */ |
289 | avg = NSEC_PER_MSEC / hit; | 289 | avg = NSEC_PER_MSEC / hit; |
290 | pr_info("%ld ns per entry\n", avg); | 290 | trace_printk("%ld ns per entry\n", avg); |
291 | } | 291 | } |
292 | 292 | ||
293 | if (missed) { | 293 | if (missed) { |
294 | if (time) | 294 | if (time) |
295 | missed /= (long)time; | 295 | missed /= (long)time; |
296 | 296 | ||
297 | pr_info("Total iterations per millisec: %ld\n", hit + missed); | 297 | trace_printk("Total iterations per millisec: %ld\n", |
298 | hit + missed); | ||
298 | 299 | ||
299 | /* it is possible that hit + missed will overflow and be zero */ | 300 | /* it is possible that hit + missed will overflow and be zero */ |
300 | if (!(hit + missed)) { | 301 | if (!(hit + missed)) { |
301 | pr_info("hit + missed overflowed and totalled zero!\n"); | 302 | trace_printk("hit + missed overflowed and totalled zero!\n"); |
302 | hit--; /* make it non zero */ | 303 | hit--; /* make it non zero */ |
303 | } | 304 | } |
304 | 305 | ||
305 | /* Caculate the average time in nanosecs */ | 306 | /* Caculate the average time in nanosecs */ |
306 | avg = NSEC_PER_MSEC / (hit + missed); | 307 | avg = NSEC_PER_MSEC / (hit + missed); |
307 | pr_info("%ld ns per entry\n", avg); | 308 | trace_printk("%ld ns per entry\n", avg); |
308 | } | 309 | } |
309 | } | 310 | } |
310 | 311 | ||
@@ -355,7 +356,7 @@ static int ring_buffer_producer_thread(void *arg) | |||
355 | 356 | ||
356 | ring_buffer_producer(); | 357 | ring_buffer_producer(); |
357 | 358 | ||
358 | pr_info("Sleeping for 10 secs\n"); | 359 | trace_printk("Sleeping for 10 secs\n"); |
359 | set_current_state(TASK_INTERRUPTIBLE); | 360 | set_current_state(TASK_INTERRUPTIBLE); |
360 | schedule_timeout(HZ * SLEEP_TIME); | 361 | schedule_timeout(HZ * SLEEP_TIME); |
361 | __set_current_state(TASK_RUNNING); | 362 | __set_current_state(TASK_RUNNING); |