diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-05-28 13:41:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-09 05:12:35 -0400 |
commit | d57e34fdd60be7ffd0b1d86bfa1a553df86b7172 (patch) | |
tree | f6044a0b2c2a757d56ad47e88cfa662bfc5bf424 /kernel/perf_event.c | |
parent | ca5135e6b4a3cbc7e187737520fbc4b508f6f7a2 (diff) |
perf: Simplify the ring-buffer logic: make perf_buffer_alloc() do everything needed
Currently there are perf_buffer_alloc() + perf_buffer_init() + some
separate bits, fold it all into a single perf_buffer_alloc() and only
leave the attachment to the event separate.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 61 |
1 files changed, 34 insertions, 27 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 93d545801e43..f75c9c9c8177 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2369,6 +2369,25 @@ unlock: | |||
2369 | rcu_read_unlock(); | 2369 | rcu_read_unlock(); |
2370 | } | 2370 | } |
2371 | 2371 | ||
2372 | static unsigned long perf_data_size(struct perf_buffer *buffer); | ||
2373 | |||
2374 | static void | ||
2375 | perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) | ||
2376 | { | ||
2377 | long max_size = perf_data_size(buffer); | ||
2378 | |||
2379 | if (watermark) | ||
2380 | buffer->watermark = min(max_size, watermark); | ||
2381 | |||
2382 | if (!buffer->watermark) | ||
2383 | buffer->watermark = max_size / 2; | ||
2384 | |||
2385 | if (flags & PERF_BUFFER_WRITABLE) | ||
2386 | buffer->writable = 1; | ||
2387 | |||
2388 | atomic_set(&buffer->refcount, 1); | ||
2389 | } | ||
2390 | |||
2372 | #ifndef CONFIG_PERF_USE_VMALLOC | 2391 | #ifndef CONFIG_PERF_USE_VMALLOC |
2373 | 2392 | ||
2374 | /* | 2393 | /* |
@@ -2401,7 +2420,7 @@ static void *perf_mmap_alloc_page(int cpu) | |||
2401 | } | 2420 | } |
2402 | 2421 | ||
2403 | static struct perf_buffer * | 2422 | static struct perf_buffer * |
2404 | perf_buffer_alloc(struct perf_event *event, int nr_pages) | 2423 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) |
2405 | { | 2424 | { |
2406 | struct perf_buffer *buffer; | 2425 | struct perf_buffer *buffer; |
2407 | unsigned long size; | 2426 | unsigned long size; |
@@ -2414,18 +2433,20 @@ perf_buffer_alloc(struct perf_event *event, int nr_pages) | |||
2414 | if (!buffer) | 2433 | if (!buffer) |
2415 | goto fail; | 2434 | goto fail; |
2416 | 2435 | ||
2417 | buffer->user_page = perf_mmap_alloc_page(event->cpu); | 2436 | buffer->user_page = perf_mmap_alloc_page(cpu); |
2418 | if (!buffer->user_page) | 2437 | if (!buffer->user_page) |
2419 | goto fail_user_page; | 2438 | goto fail_user_page; |
2420 | 2439 | ||
2421 | for (i = 0; i < nr_pages; i++) { | 2440 | for (i = 0; i < nr_pages; i++) { |
2422 | buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu); | 2441 | buffer->data_pages[i] = perf_mmap_alloc_page(cpu); |
2423 | if (!buffer->data_pages[i]) | 2442 | if (!buffer->data_pages[i]) |
2424 | goto fail_data_pages; | 2443 | goto fail_data_pages; |
2425 | } | 2444 | } |
2426 | 2445 | ||
2427 | buffer->nr_pages = nr_pages; | 2446 | buffer->nr_pages = nr_pages; |
2428 | 2447 | ||
2448 | perf_buffer_init(buffer, watermark, flags); | ||
2449 | |||
2429 | return buffer; | 2450 | return buffer; |
2430 | 2451 | ||
2431 | fail_data_pages: | 2452 | fail_data_pages: |
@@ -2516,7 +2537,7 @@ static void perf_buffer_free(struct perf_buffer *buffer) | |||
2516 | } | 2537 | } |
2517 | 2538 | ||
2518 | static struct perf_buffer * | 2539 | static struct perf_buffer * |
2519 | perf_buffer_alloc(struct perf_event *event, int nr_pages) | 2540 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) |
2520 | { | 2541 | { |
2521 | struct perf_buffer *buffer; | 2542 | struct perf_buffer *buffer; |
2522 | unsigned long size; | 2543 | unsigned long size; |
@@ -2540,6 +2561,8 @@ perf_buffer_alloc(struct perf_event *event, int nr_pages) | |||
2540 | buffer->page_order = ilog2(nr_pages); | 2561 | buffer->page_order = ilog2(nr_pages); |
2541 | buffer->nr_pages = 1; | 2562 | buffer->nr_pages = 1; |
2542 | 2563 | ||
2564 | perf_buffer_init(buffer, watermark, flags); | ||
2565 | |||
2543 | return buffer; | 2566 | return buffer; |
2544 | 2567 | ||
2545 | fail_all_buf: | 2568 | fail_all_buf: |
@@ -2591,23 +2614,6 @@ unlock: | |||
2591 | return ret; | 2614 | return ret; |
2592 | } | 2615 | } |
2593 | 2616 | ||
2594 | static void | ||
2595 | perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer) | ||
2596 | { | ||
2597 | long max_size = perf_data_size(buffer); | ||
2598 | |||
2599 | if (event->attr.watermark) { | ||
2600 | buffer->watermark = min_t(long, max_size, | ||
2601 | event->attr.wakeup_watermark); | ||
2602 | } | ||
2603 | |||
2604 | if (!buffer->watermark) | ||
2605 | buffer->watermark = max_size / 2; | ||
2606 | |||
2607 | atomic_set(&buffer->refcount, 1); | ||
2608 | rcu_assign_pointer(event->buffer, buffer); | ||
2609 | } | ||
2610 | |||
2611 | static void perf_buffer_free_rcu(struct rcu_head *rcu_head) | 2617 | static void perf_buffer_free_rcu(struct rcu_head *rcu_head) |
2612 | { | 2618 | { |
2613 | struct perf_buffer *buffer; | 2619 | struct perf_buffer *buffer; |
@@ -2682,7 +2688,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2682 | unsigned long vma_size; | 2688 | unsigned long vma_size; |
2683 | unsigned long nr_pages; | 2689 | unsigned long nr_pages; |
2684 | long user_extra, extra; | 2690 | long user_extra, extra; |
2685 | int ret = 0; | 2691 | int ret = 0, flags = 0; |
2686 | 2692 | ||
2687 | /* | 2693 | /* |
2688 | * Don't allow mmap() of inherited per-task counters. This would | 2694 | * Don't allow mmap() of inherited per-task counters. This would |
@@ -2747,15 +2753,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2747 | 2753 | ||
2748 | WARN_ON(event->buffer); | 2754 | WARN_ON(event->buffer); |
2749 | 2755 | ||
2750 | buffer = perf_buffer_alloc(event, nr_pages); | 2756 | if (vma->vm_flags & VM_WRITE) |
2757 | flags |= PERF_BUFFER_WRITABLE; | ||
2758 | |||
2759 | buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, | ||
2760 | event->cpu, flags); | ||
2751 | if (!buffer) { | 2761 | if (!buffer) { |
2752 | ret = -ENOMEM; | 2762 | ret = -ENOMEM; |
2753 | goto unlock; | 2763 | goto unlock; |
2754 | } | 2764 | } |
2755 | 2765 | rcu_assign_pointer(event->buffer, buffer); | |
2756 | perf_buffer_init(event, buffer); | ||
2757 | if (vma->vm_flags & VM_WRITE) | ||
2758 | event->buffer->writable = 1; | ||
2759 | 2766 | ||
2760 | atomic_long_add(user_extra, &user->locked_vm); | 2767 | atomic_long_add(user_extra, &user->locked_vm); |
2761 | event->mmap_locked = extra; | 2768 | event->mmap_locked = extra; |