aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-05-28 13:33:23 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-09 05:12:35 -0400
commitca5135e6b4a3cbc7e187737520fbc4b508f6f7a2 (patch)
treeb9bf928206d397f9c0474b1d2ea8f777cf4d22d7 /kernel/perf_event.c
parent68aa00ac0a82e9a876c799bf6be7622b8f1c8517 (diff)
perf: Rename perf_mmap_data to perf_buffer
Rename to clarify code. s/perf_mmap_data/perf_buffer/g and selective s/data/buffer/g Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c308
1 files changed, 154 insertions, 154 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 6f60920772b3..93d545801e43 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1876,7 +1876,7 @@ static void free_event_rcu(struct rcu_head *head)
1876} 1876}
1877 1877
1878static void perf_pending_sync(struct perf_event *event); 1878static void perf_pending_sync(struct perf_event *event);
1879static void perf_mmap_data_put(struct perf_mmap_data *data); 1879static void perf_buffer_put(struct perf_buffer *buffer);
1880 1880
1881static void free_event(struct perf_event *event) 1881static void free_event(struct perf_event *event)
1882{ 1882{
@@ -1892,9 +1892,9 @@ static void free_event(struct perf_event *event)
1892 atomic_dec(&nr_task_events); 1892 atomic_dec(&nr_task_events);
1893 } 1893 }
1894 1894
1895 if (event->data) { 1895 if (event->buffer) {
1896 perf_mmap_data_put(event->data); 1896 perf_buffer_put(event->buffer);
1897 event->data = NULL; 1897 event->buffer = NULL;
1898 } 1898 }
1899 1899
1900 if (event->destroy) 1900 if (event->destroy)
@@ -2119,13 +2119,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2119static unsigned int perf_poll(struct file *file, poll_table *wait) 2119static unsigned int perf_poll(struct file *file, poll_table *wait)
2120{ 2120{
2121 struct perf_event *event = file->private_data; 2121 struct perf_event *event = file->private_data;
2122 struct perf_mmap_data *data; 2122 struct perf_buffer *buffer;
2123 unsigned int events = POLL_HUP; 2123 unsigned int events = POLL_HUP;
2124 2124
2125 rcu_read_lock(); 2125 rcu_read_lock();
2126 data = rcu_dereference(event->data); 2126 buffer = rcu_dereference(event->buffer);
2127 if (data) 2127 if (buffer)
2128 events = atomic_xchg(&data->poll, 0); 2128 events = atomic_xchg(&buffer->poll, 0);
2129 rcu_read_unlock(); 2129 rcu_read_unlock();
2130 2130
2131 poll_wait(file, &event->waitq, wait); 2131 poll_wait(file, &event->waitq, wait);
@@ -2335,14 +2335,14 @@ static int perf_event_index(struct perf_event *event)
2335void perf_event_update_userpage(struct perf_event *event) 2335void perf_event_update_userpage(struct perf_event *event)
2336{ 2336{
2337 struct perf_event_mmap_page *userpg; 2337 struct perf_event_mmap_page *userpg;
2338 struct perf_mmap_data *data; 2338 struct perf_buffer *buffer;
2339 2339
2340 rcu_read_lock(); 2340 rcu_read_lock();
2341 data = rcu_dereference(event->data); 2341 buffer = rcu_dereference(event->buffer);
2342 if (!data) 2342 if (!buffer)
2343 goto unlock; 2343 goto unlock;
2344 2344
2345 userpg = data->user_page; 2345 userpg = buffer->user_page;
2346 2346
2347 /* 2347 /*
2348 * Disable preemption so as to not let the corresponding user-space 2348 * Disable preemption so as to not let the corresponding user-space
@@ -2376,15 +2376,15 @@ unlock:
2376 */ 2376 */
2377 2377
2378static struct page * 2378static struct page *
2379perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) 2379perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2380{ 2380{
2381 if (pgoff > data->nr_pages) 2381 if (pgoff > buffer->nr_pages)
2382 return NULL; 2382 return NULL;
2383 2383
2384 if (pgoff == 0) 2384 if (pgoff == 0)
2385 return virt_to_page(data->user_page); 2385 return virt_to_page(buffer->user_page);
2386 2386
2387 return virt_to_page(data->data_pages[pgoff - 1]); 2387 return virt_to_page(buffer->data_pages[pgoff - 1]);
2388} 2388}
2389 2389
2390static void *perf_mmap_alloc_page(int cpu) 2390static void *perf_mmap_alloc_page(int cpu)
@@ -2400,42 +2400,42 @@ static void *perf_mmap_alloc_page(int cpu)
2400 return page_address(page); 2400 return page_address(page);
2401} 2401}
2402 2402
2403static struct perf_mmap_data * 2403static struct perf_buffer *
2404perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2404perf_buffer_alloc(struct perf_event *event, int nr_pages)
2405{ 2405{
2406 struct perf_mmap_data *data; 2406 struct perf_buffer *buffer;
2407 unsigned long size; 2407 unsigned long size;
2408 int i; 2408 int i;
2409 2409
2410 size = sizeof(struct perf_mmap_data); 2410 size = sizeof(struct perf_buffer);
2411 size += nr_pages * sizeof(void *); 2411 size += nr_pages * sizeof(void *);
2412 2412
2413 data = kzalloc(size, GFP_KERNEL); 2413 buffer = kzalloc(size, GFP_KERNEL);
2414 if (!data) 2414 if (!buffer)
2415 goto fail; 2415 goto fail;
2416 2416
2417 data->user_page = perf_mmap_alloc_page(event->cpu); 2417 buffer->user_page = perf_mmap_alloc_page(event->cpu);
2418 if (!data->user_page) 2418 if (!buffer->user_page)
2419 goto fail_user_page; 2419 goto fail_user_page;
2420 2420
2421 for (i = 0; i < nr_pages; i++) { 2421 for (i = 0; i < nr_pages; i++) {
2422 data->data_pages[i] = perf_mmap_alloc_page(event->cpu); 2422 buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu);
2423 if (!data->data_pages[i]) 2423 if (!buffer->data_pages[i])
2424 goto fail_data_pages; 2424 goto fail_data_pages;
2425 } 2425 }
2426 2426
2427 data->nr_pages = nr_pages; 2427 buffer->nr_pages = nr_pages;
2428 2428
2429 return data; 2429 return buffer;
2430 2430
2431fail_data_pages: 2431fail_data_pages:
2432 for (i--; i >= 0; i--) 2432 for (i--; i >= 0; i--)
2433 free_page((unsigned long)data->data_pages[i]); 2433 free_page((unsigned long)buffer->data_pages[i]);
2434 2434
2435 free_page((unsigned long)data->user_page); 2435 free_page((unsigned long)buffer->user_page);
2436 2436
2437fail_user_page: 2437fail_user_page:
2438 kfree(data); 2438 kfree(buffer);
2439 2439
2440fail: 2440fail:
2441 return NULL; 2441 return NULL;
@@ -2449,17 +2449,17 @@ static void perf_mmap_free_page(unsigned long addr)
2449 __free_page(page); 2449 __free_page(page);
2450} 2450}
2451 2451
2452static void perf_mmap_data_free(struct perf_mmap_data *data) 2452static void perf_buffer_free(struct perf_buffer *buffer)
2453{ 2453{
2454 int i; 2454 int i;
2455 2455
2456 perf_mmap_free_page((unsigned long)data->user_page); 2456 perf_mmap_free_page((unsigned long)buffer->user_page);
2457 for (i = 0; i < data->nr_pages; i++) 2457 for (i = 0; i < buffer->nr_pages; i++)
2458 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2458 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2459 kfree(data); 2459 kfree(buffer);
2460} 2460}
2461 2461
2462static inline int page_order(struct perf_mmap_data *data) 2462static inline int page_order(struct perf_buffer *buffer)
2463{ 2463{
2464 return 0; 2464 return 0;
2465} 2465}
@@ -2472,18 +2472,18 @@ static inline int page_order(struct perf_mmap_data *data)
2472 * Required for architectures that have d-cache aliasing issues. 2472 * Required for architectures that have d-cache aliasing issues.
2473 */ 2473 */
2474 2474
2475static inline int page_order(struct perf_mmap_data *data) 2475static inline int page_order(struct perf_buffer *buffer)
2476{ 2476{
2477 return data->page_order; 2477 return buffer->page_order;
2478} 2478}
2479 2479
2480static struct page * 2480static struct page *
2481perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) 2481perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2482{ 2482{
2483 if (pgoff > (1UL << page_order(data))) 2483 if (pgoff > (1UL << page_order(buffer)))
2484 return NULL; 2484 return NULL;
2485 2485
2486 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); 2486 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
2487} 2487}
2488 2488
2489static void perf_mmap_unmark_page(void *addr) 2489static void perf_mmap_unmark_page(void *addr)
@@ -2493,57 +2493,57 @@ static void perf_mmap_unmark_page(void *addr)
2493 page->mapping = NULL; 2493 page->mapping = NULL;
2494} 2494}
2495 2495
2496static void perf_mmap_data_free_work(struct work_struct *work) 2496static void perf_buffer_free_work(struct work_struct *work)
2497{ 2497{
2498 struct perf_mmap_data *data; 2498 struct perf_buffer *buffer;
2499 void *base; 2499 void *base;
2500 int i, nr; 2500 int i, nr;
2501 2501
2502 data = container_of(work, struct perf_mmap_data, work); 2502 buffer = container_of(work, struct perf_buffer, work);
2503 nr = 1 << page_order(data); 2503 nr = 1 << page_order(buffer);
2504 2504
2505 base = data->user_page; 2505 base = buffer->user_page;
2506 for (i = 0; i < nr + 1; i++) 2506 for (i = 0; i < nr + 1; i++)
2507 perf_mmap_unmark_page(base + (i * PAGE_SIZE)); 2507 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2508 2508
2509 vfree(base); 2509 vfree(base);
2510 kfree(data); 2510 kfree(buffer);
2511} 2511}
2512 2512
2513static void perf_mmap_data_free(struct perf_mmap_data *data) 2513static void perf_buffer_free(struct perf_buffer *buffer)
2514{ 2514{
2515 schedule_work(&data->work); 2515 schedule_work(&buffer->work);
2516} 2516}
2517 2517
2518static struct perf_mmap_data * 2518static struct perf_buffer *
2519perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2519perf_buffer_alloc(struct perf_event *event, int nr_pages)
2520{ 2520{
2521 struct perf_mmap_data *data; 2521 struct perf_buffer *buffer;
2522 unsigned long size; 2522 unsigned long size;
2523 void *all_buf; 2523 void *all_buf;
2524 2524
2525 size = sizeof(struct perf_mmap_data); 2525 size = sizeof(struct perf_buffer);
2526 size += sizeof(void *); 2526 size += sizeof(void *);
2527 2527
2528 data = kzalloc(size, GFP_KERNEL); 2528 buffer = kzalloc(size, GFP_KERNEL);
2529 if (!data) 2529 if (!buffer)
2530 goto fail; 2530 goto fail;
2531 2531
2532 INIT_WORK(&data->work, perf_mmap_data_free_work); 2532 INIT_WORK(&buffer->work, perf_buffer_free_work);
2533 2533
2534 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); 2534 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2535 if (!all_buf) 2535 if (!all_buf)
2536 goto fail_all_buf; 2536 goto fail_all_buf;
2537 2537
2538 data->user_page = all_buf; 2538 buffer->user_page = all_buf;
2539 data->data_pages[0] = all_buf + PAGE_SIZE; 2539 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2540 data->page_order = ilog2(nr_pages); 2540 buffer->page_order = ilog2(nr_pages);
2541 data->nr_pages = 1; 2541 buffer->nr_pages = 1;
2542 2542
2543 return data; 2543 return buffer;
2544 2544
2545fail_all_buf: 2545fail_all_buf:
2546 kfree(data); 2546 kfree(buffer);
2547 2547
2548fail: 2548fail:
2549 return NULL; 2549 return NULL;
@@ -2551,15 +2551,15 @@ fail:
2551 2551
2552#endif 2552#endif
2553 2553
2554static unsigned long perf_data_size(struct perf_mmap_data *data) 2554static unsigned long perf_data_size(struct perf_buffer *buffer)
2555{ 2555{
2556 return data->nr_pages << (PAGE_SHIFT + page_order(data)); 2556 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
2557} 2557}
2558 2558
2559static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2559static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2560{ 2560{
2561 struct perf_event *event = vma->vm_file->private_data; 2561 struct perf_event *event = vma->vm_file->private_data;
2562 struct perf_mmap_data *data; 2562 struct perf_buffer *buffer;
2563 int ret = VM_FAULT_SIGBUS; 2563 int ret = VM_FAULT_SIGBUS;
2564 2564
2565 if (vmf->flags & FAULT_FLAG_MKWRITE) { 2565 if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -2569,14 +2569,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2569 } 2569 }
2570 2570
2571 rcu_read_lock(); 2571 rcu_read_lock();
2572 data = rcu_dereference(event->data); 2572 buffer = rcu_dereference(event->buffer);
2573 if (!data) 2573 if (!buffer)
2574 goto unlock; 2574 goto unlock;
2575 2575
2576 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) 2576 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2577 goto unlock; 2577 goto unlock;
2578 2578
2579 vmf->page = perf_mmap_to_page(data, vmf->pgoff); 2579 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
2580 if (!vmf->page) 2580 if (!vmf->page)
2581 goto unlock; 2581 goto unlock;
2582 2582
@@ -2592,51 +2592,51 @@ unlock:
2592} 2592}
2593 2593
2594static void 2594static void
2595perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) 2595perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer)
2596{ 2596{
2597 long max_size = perf_data_size(data); 2597 long max_size = perf_data_size(buffer);
2598 2598
2599 if (event->attr.watermark) { 2599 if (event->attr.watermark) {
2600 data->watermark = min_t(long, max_size, 2600 buffer->watermark = min_t(long, max_size,
2601 event->attr.wakeup_watermark); 2601 event->attr.wakeup_watermark);
2602 } 2602 }
2603 2603
2604 if (!data->watermark) 2604 if (!buffer->watermark)
2605 data->watermark = max_size / 2; 2605 buffer->watermark = max_size / 2;
2606 2606
2607 atomic_set(&data->refcount, 1); 2607 atomic_set(&buffer->refcount, 1);
2608 rcu_assign_pointer(event->data, data); 2608 rcu_assign_pointer(event->buffer, buffer);
2609} 2609}
2610 2610
2611static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) 2611static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
2612{ 2612{
2613 struct perf_mmap_data *data; 2613 struct perf_buffer *buffer;
2614 2614
2615 data = container_of(rcu_head, struct perf_mmap_data, rcu_head); 2615 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2616 perf_mmap_data_free(data); 2616 perf_buffer_free(buffer);
2617} 2617}
2618 2618
2619static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event) 2619static struct perf_buffer *perf_buffer_get(struct perf_event *event)
2620{ 2620{
2621 struct perf_mmap_data *data; 2621 struct perf_buffer *buffer;
2622 2622
2623 rcu_read_lock(); 2623 rcu_read_lock();
2624 data = rcu_dereference(event->data); 2624 buffer = rcu_dereference(event->buffer);
2625 if (data) { 2625 if (buffer) {
2626 if (!atomic_inc_not_zero(&data->refcount)) 2626 if (!atomic_inc_not_zero(&buffer->refcount))
2627 data = NULL; 2627 buffer = NULL;
2628 } 2628 }
2629 rcu_read_unlock(); 2629 rcu_read_unlock();
2630 2630
2631 return data; 2631 return buffer;
2632} 2632}
2633 2633
2634static void perf_mmap_data_put(struct perf_mmap_data *data) 2634static void perf_buffer_put(struct perf_buffer *buffer)
2635{ 2635{
2636 if (!atomic_dec_and_test(&data->refcount)) 2636 if (!atomic_dec_and_test(&buffer->refcount))
2637 return; 2637 return;
2638 2638
2639 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); 2639 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
2640} 2640}
2641 2641
2642static void perf_mmap_open(struct vm_area_struct *vma) 2642static void perf_mmap_open(struct vm_area_struct *vma)
@@ -2651,16 +2651,16 @@ static void perf_mmap_close(struct vm_area_struct *vma)
2651 struct perf_event *event = vma->vm_file->private_data; 2651 struct perf_event *event = vma->vm_file->private_data;
2652 2652
2653 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 2653 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2654 unsigned long size = perf_data_size(event->data); 2654 unsigned long size = perf_data_size(event->buffer);
2655 struct user_struct *user = event->mmap_user; 2655 struct user_struct *user = event->mmap_user;
2656 struct perf_mmap_data *data = event->data; 2656 struct perf_buffer *buffer = event->buffer;
2657 2657
2658 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 2658 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2659 vma->vm_mm->locked_vm -= event->mmap_locked; 2659 vma->vm_mm->locked_vm -= event->mmap_locked;
2660 rcu_assign_pointer(event->data, NULL); 2660 rcu_assign_pointer(event->buffer, NULL);
2661 mutex_unlock(&event->mmap_mutex); 2661 mutex_unlock(&event->mmap_mutex);
2662 2662
2663 perf_mmap_data_put(data); 2663 perf_buffer_put(buffer);
2664 free_uid(user); 2664 free_uid(user);
2665 } 2665 }
2666} 2666}
@@ -2678,7 +2678,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2678 unsigned long user_locked, user_lock_limit; 2678 unsigned long user_locked, user_lock_limit;
2679 struct user_struct *user = current_user(); 2679 struct user_struct *user = current_user();
2680 unsigned long locked, lock_limit; 2680 unsigned long locked, lock_limit;
2681 struct perf_mmap_data *data; 2681 struct perf_buffer *buffer;
2682 unsigned long vma_size; 2682 unsigned long vma_size;
2683 unsigned long nr_pages; 2683 unsigned long nr_pages;
2684 long user_extra, extra; 2684 long user_extra, extra;
@@ -2699,7 +2699,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2699 nr_pages = (vma_size / PAGE_SIZE) - 1; 2699 nr_pages = (vma_size / PAGE_SIZE) - 1;
2700 2700
2701 /* 2701 /*
2702 * If we have data pages ensure they're a power-of-two number, so we 2702 * If we have buffer pages ensure they're a power-of-two number, so we
2703 * can do bitmasks instead of modulo. 2703 * can do bitmasks instead of modulo.
2704 */ 2704 */
2705 if (nr_pages != 0 && !is_power_of_2(nr_pages)) 2705 if (nr_pages != 0 && !is_power_of_2(nr_pages))
@@ -2713,9 +2713,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2713 2713
2714 WARN_ON_ONCE(event->ctx->parent_ctx); 2714 WARN_ON_ONCE(event->ctx->parent_ctx);
2715 mutex_lock(&event->mmap_mutex); 2715 mutex_lock(&event->mmap_mutex);
2716 if (event->data) { 2716 if (event->buffer) {
2717 if (event->data->nr_pages == nr_pages) 2717 if (event->buffer->nr_pages == nr_pages)
2718 atomic_inc(&event->data->refcount); 2718 atomic_inc(&event->buffer->refcount);
2719 else 2719 else
2720 ret = -EINVAL; 2720 ret = -EINVAL;
2721 goto unlock; 2721 goto unlock;
@@ -2745,17 +2745,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2745 goto unlock; 2745 goto unlock;
2746 } 2746 }
2747 2747
2748 WARN_ON(event->data); 2748 WARN_ON(event->buffer);
2749 2749
2750 data = perf_mmap_data_alloc(event, nr_pages); 2750 buffer = perf_buffer_alloc(event, nr_pages);
2751 if (!data) { 2751 if (!buffer) {
2752 ret = -ENOMEM; 2752 ret = -ENOMEM;
2753 goto unlock; 2753 goto unlock;
2754 } 2754 }
2755 2755
2756 perf_mmap_data_init(event, data); 2756 perf_buffer_init(event, buffer);
2757 if (vma->vm_flags & VM_WRITE) 2757 if (vma->vm_flags & VM_WRITE)
2758 event->data->writable = 1; 2758 event->buffer->writable = 1;
2759 2759
2760 atomic_long_add(user_extra, &user->locked_vm); 2760 atomic_long_add(user_extra, &user->locked_vm);
2761 event->mmap_locked = extra; 2761 event->mmap_locked = extra;
@@ -2964,15 +2964,15 @@ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
2964/* 2964/*
2965 * Output 2965 * Output
2966 */ 2966 */
2967static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, 2967static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
2968 unsigned long offset, unsigned long head) 2968 unsigned long offset, unsigned long head)
2969{ 2969{
2970 unsigned long mask; 2970 unsigned long mask;
2971 2971
2972 if (!data->writable) 2972 if (!buffer->writable)
2973 return true; 2973 return true;
2974 2974
2975 mask = perf_data_size(data) - 1; 2975 mask = perf_data_size(buffer) - 1;
2976 2976
2977 offset = (offset - tail) & mask; 2977 offset = (offset - tail) & mask;
2978 head = (head - tail) & mask; 2978 head = (head - tail) & mask;
@@ -2985,7 +2985,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2985 2985
2986static void perf_output_wakeup(struct perf_output_handle *handle) 2986static void perf_output_wakeup(struct perf_output_handle *handle)
2987{ 2987{
2988 atomic_set(&handle->data->poll, POLL_IN); 2988 atomic_set(&handle->buffer->poll, POLL_IN);
2989 2989
2990 if (handle->nmi) { 2990 if (handle->nmi) {
2991 handle->event->pending_wakeup = 1; 2991 handle->event->pending_wakeup = 1;
@@ -3005,45 +3005,45 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
3005 */ 3005 */
3006static void perf_output_get_handle(struct perf_output_handle *handle) 3006static void perf_output_get_handle(struct perf_output_handle *handle)
3007{ 3007{
3008 struct perf_mmap_data *data = handle->data; 3008 struct perf_buffer *buffer = handle->buffer;
3009 3009
3010 preempt_disable(); 3010 preempt_disable();
3011 local_inc(&data->nest); 3011 local_inc(&buffer->nest);
3012 handle->wakeup = local_read(&data->wakeup); 3012 handle->wakeup = local_read(&buffer->wakeup);
3013} 3013}
3014 3014
3015static void perf_output_put_handle(struct perf_output_handle *handle) 3015static void perf_output_put_handle(struct perf_output_handle *handle)
3016{ 3016{
3017 struct perf_mmap_data *data = handle->data; 3017 struct perf_buffer *buffer = handle->buffer;
3018 unsigned long head; 3018 unsigned long head;
3019 3019
3020again: 3020again:
3021 head = local_read(&data->head); 3021 head = local_read(&buffer->head);
3022 3022
3023 /* 3023 /*
3024 * IRQ/NMI can happen here, which means we can miss a head update. 3024 * IRQ/NMI can happen here, which means we can miss a head update.
3025 */ 3025 */
3026 3026
3027 if (!local_dec_and_test(&data->nest)) 3027 if (!local_dec_and_test(&buffer->nest))
3028 goto out; 3028 goto out;
3029 3029
3030 /* 3030 /*
3031 * Publish the known good head. Rely on the full barrier implied 3031 * Publish the known good head. Rely on the full barrier implied
3032 * by atomic_dec_and_test() order the data->head read and this 3032 * by atomic_dec_and_test() order the buffer->head read and this
3033 * write. 3033 * write.
3034 */ 3034 */
3035 data->user_page->data_head = head; 3035 buffer->user_page->data_head = head;
3036 3036
3037 /* 3037 /*
3038 * Now check if we missed an update, rely on the (compiler) 3038 * Now check if we missed an update, rely on the (compiler)
3039 * barrier in atomic_dec_and_test() to re-read data->head. 3039 * barrier in atomic_dec_and_test() to re-read buffer->head.
3040 */ 3040 */
3041 if (unlikely(head != local_read(&data->head))) { 3041 if (unlikely(head != local_read(&buffer->head))) {
3042 local_inc(&data->nest); 3042 local_inc(&buffer->nest);
3043 goto again; 3043 goto again;
3044 } 3044 }
3045 3045
3046 if (handle->wakeup != local_read(&data->wakeup)) 3046 if (handle->wakeup != local_read(&buffer->wakeup))
3047 perf_output_wakeup(handle); 3047 perf_output_wakeup(handle);
3048 3048
3049 out: 3049 out:
@@ -3063,12 +3063,12 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle,
3063 buf += size; 3063 buf += size;
3064 handle->size -= size; 3064 handle->size -= size;
3065 if (!handle->size) { 3065 if (!handle->size) {
3066 struct perf_mmap_data *data = handle->data; 3066 struct perf_buffer *buffer = handle->buffer;
3067 3067
3068 handle->page++; 3068 handle->page++;
3069 handle->page &= data->nr_pages - 1; 3069 handle->page &= buffer->nr_pages - 1;
3070 handle->addr = data->data_pages[handle->page]; 3070 handle->addr = buffer->data_pages[handle->page];
3071 handle->size = PAGE_SIZE << page_order(data); 3071 handle->size = PAGE_SIZE << page_order(buffer);
3072 } 3072 }
3073 } while (len); 3073 } while (len);
3074} 3074}
@@ -3077,7 +3077,7 @@ int perf_output_begin(struct perf_output_handle *handle,
3077 struct perf_event *event, unsigned int size, 3077 struct perf_event *event, unsigned int size,
3078 int nmi, int sample) 3078 int nmi, int sample)
3079{ 3079{
3080 struct perf_mmap_data *data; 3080 struct perf_buffer *buffer;
3081 unsigned long tail, offset, head; 3081 unsigned long tail, offset, head;
3082 int have_lost; 3082 int have_lost;
3083 struct { 3083 struct {
@@ -3093,19 +3093,19 @@ int perf_output_begin(struct perf_output_handle *handle,
3093 if (event->parent) 3093 if (event->parent)
3094 event = event->parent; 3094 event = event->parent;
3095 3095
3096 data = rcu_dereference(event->data); 3096 buffer = rcu_dereference(event->buffer);
3097 if (!data) 3097 if (!buffer)
3098 goto out; 3098 goto out;
3099 3099
3100 handle->data = data; 3100 handle->buffer = buffer;
3101 handle->event = event; 3101 handle->event = event;
3102 handle->nmi = nmi; 3102 handle->nmi = nmi;
3103 handle->sample = sample; 3103 handle->sample = sample;
3104 3104
3105 if (!data->nr_pages) 3105 if (!buffer->nr_pages)
3106 goto out; 3106 goto out;
3107 3107
3108 have_lost = local_read(&data->lost); 3108 have_lost = local_read(&buffer->lost);
3109 if (have_lost) 3109 if (have_lost)
3110 size += sizeof(lost_event); 3110 size += sizeof(lost_event);
3111 3111
@@ -3117,30 +3117,30 @@ int perf_output_begin(struct perf_output_handle *handle,
3117 * tail pointer. So that all reads will be completed before the 3117 * tail pointer. So that all reads will be completed before the
3118 * write is issued. 3118 * write is issued.
3119 */ 3119 */
3120 tail = ACCESS_ONCE(data->user_page->data_tail); 3120 tail = ACCESS_ONCE(buffer->user_page->data_tail);
3121 smp_rmb(); 3121 smp_rmb();
3122 offset = head = local_read(&data->head); 3122 offset = head = local_read(&buffer->head);
3123 head += size; 3123 head += size;
3124 if (unlikely(!perf_output_space(data, tail, offset, head))) 3124 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
3125 goto fail; 3125 goto fail;
3126 } while (local_cmpxchg(&data->head, offset, head) != offset); 3126 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
3127 3127
3128 if (head - local_read(&data->wakeup) > data->watermark) 3128 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3129 local_add(data->watermark, &data->wakeup); 3129 local_add(buffer->watermark, &buffer->wakeup);
3130 3130
3131 handle->page = offset >> (PAGE_SHIFT + page_order(data)); 3131 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3132 handle->page &= data->nr_pages - 1; 3132 handle->page &= buffer->nr_pages - 1;
3133 handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1); 3133 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3134 handle->addr = data->data_pages[handle->page]; 3134 handle->addr = buffer->data_pages[handle->page];
3135 handle->addr += handle->size; 3135 handle->addr += handle->size;
3136 handle->size = (PAGE_SIZE << page_order(data)) - handle->size; 3136 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
3137 3137
3138 if (have_lost) { 3138 if (have_lost) {
3139 lost_event.header.type = PERF_RECORD_LOST; 3139 lost_event.header.type = PERF_RECORD_LOST;
3140 lost_event.header.misc = 0; 3140 lost_event.header.misc = 0;
3141 lost_event.header.size = sizeof(lost_event); 3141 lost_event.header.size = sizeof(lost_event);
3142 lost_event.id = event->id; 3142 lost_event.id = event->id;
3143 lost_event.lost = local_xchg(&data->lost, 0); 3143 lost_event.lost = local_xchg(&buffer->lost, 0);
3144 3144
3145 perf_output_put(handle, lost_event); 3145 perf_output_put(handle, lost_event);
3146 } 3146 }
@@ -3148,7 +3148,7 @@ int perf_output_begin(struct perf_output_handle *handle,
3148 return 0; 3148 return 0;
3149 3149
3150fail: 3150fail:
3151 local_inc(&data->lost); 3151 local_inc(&buffer->lost);
3152 perf_output_put_handle(handle); 3152 perf_output_put_handle(handle);
3153out: 3153out:
3154 rcu_read_unlock(); 3154 rcu_read_unlock();
@@ -3159,15 +3159,15 @@ out:
3159void perf_output_end(struct perf_output_handle *handle) 3159void perf_output_end(struct perf_output_handle *handle)
3160{ 3160{
3161 struct perf_event *event = handle->event; 3161 struct perf_event *event = handle->event;
3162 struct perf_mmap_data *data = handle->data; 3162 struct perf_buffer *buffer = handle->buffer;
3163 3163
3164 int wakeup_events = event->attr.wakeup_events; 3164 int wakeup_events = event->attr.wakeup_events;
3165 3165
3166 if (handle->sample && wakeup_events) { 3166 if (handle->sample && wakeup_events) {
3167 int events = local_inc_return(&data->events); 3167 int events = local_inc_return(&buffer->events);
3168 if (events >= wakeup_events) { 3168 if (events >= wakeup_events) {
3169 local_sub(wakeup_events, &data->events); 3169 local_sub(wakeup_events, &buffer->events);
3170 local_inc(&data->wakeup); 3170 local_inc(&buffer->wakeup);
3171 } 3171 }
3172 } 3172 }
3173 3173
@@ -5010,7 +5010,7 @@ err_size:
5010static int 5010static int
5011perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 5011perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5012{ 5012{
5013 struct perf_mmap_data *data = NULL, *old_data = NULL; 5013 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
5014 int ret = -EINVAL; 5014 int ret = -EINVAL;
5015 5015
5016 if (!output_event) 5016 if (!output_event)
@@ -5040,19 +5040,19 @@ set:
5040 5040
5041 if (output_event) { 5041 if (output_event) {
5042 /* get the buffer we want to redirect to */ 5042 /* get the buffer we want to redirect to */
5043 data = perf_mmap_data_get(output_event); 5043 buffer = perf_buffer_get(output_event);
5044 if (!data) 5044 if (!buffer)
5045 goto unlock; 5045 goto unlock;
5046 } 5046 }
5047 5047
5048 old_data = event->data; 5048 old_buffer = event->buffer;
5049 rcu_assign_pointer(event->data, data); 5049 rcu_assign_pointer(event->buffer, buffer);
5050 ret = 0; 5050 ret = 0;
5051unlock: 5051unlock:
5052 mutex_unlock(&event->mmap_mutex); 5052 mutex_unlock(&event->mmap_mutex);
5053 5053
5054 if (old_data) 5054 if (old_buffer)
5055 perf_mmap_data_put(old_data); 5055 perf_buffer_put(old_buffer);
5056out: 5056out:
5057 return ret; 5057 return ret;
5058} 5058}