diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 458 |
1 files changed, 234 insertions, 224 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ff86c558af4c..c772a3d4000d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event, | |||
675 | struct perf_event *event, *partial_group = NULL; | 675 | struct perf_event *event, *partial_group = NULL; |
676 | const struct pmu *pmu = group_event->pmu; | 676 | const struct pmu *pmu = group_event->pmu; |
677 | bool txn = false; | 677 | bool txn = false; |
678 | int ret; | ||
679 | 678 | ||
680 | if (group_event->state == PERF_EVENT_STATE_OFF) | 679 | if (group_event->state == PERF_EVENT_STATE_OFF) |
681 | return 0; | 680 | return 0; |
@@ -703,14 +702,8 @@ group_sched_in(struct perf_event *group_event, | |||
703 | } | 702 | } |
704 | } | 703 | } |
705 | 704 | ||
706 | if (!txn) | 705 | if (!txn || !pmu->commit_txn(pmu)) |
707 | return 0; | ||
708 | |||
709 | ret = pmu->commit_txn(pmu); | ||
710 | if (!ret) { | ||
711 | pmu->cancel_txn(pmu); | ||
712 | return 0; | 706 | return 0; |
713 | } | ||
714 | 707 | ||
715 | group_error: | 708 | group_error: |
716 | /* | 709 | /* |
@@ -1155,9 +1148,9 @@ static void __perf_event_sync_stat(struct perf_event *event, | |||
1155 | * In order to keep per-task stats reliable we need to flip the event | 1148 | * In order to keep per-task stats reliable we need to flip the event |
1156 | * values when we flip the contexts. | 1149 | * values when we flip the contexts. |
1157 | */ | 1150 | */ |
1158 | value = atomic64_read(&next_event->count); | 1151 | value = local64_read(&next_event->count); |
1159 | value = atomic64_xchg(&event->count, value); | 1152 | value = local64_xchg(&event->count, value); |
1160 | atomic64_set(&next_event->count, value); | 1153 | local64_set(&next_event->count, value); |
1161 | 1154 | ||
1162 | swap(event->total_time_enabled, next_event->total_time_enabled); | 1155 | swap(event->total_time_enabled, next_event->total_time_enabled); |
1163 | swap(event->total_time_running, next_event->total_time_running); | 1156 | swap(event->total_time_running, next_event->total_time_running); |
@@ -1547,10 +1540,10 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
1547 | 1540 | ||
1548 | hwc->sample_period = sample_period; | 1541 | hwc->sample_period = sample_period; |
1549 | 1542 | ||
1550 | if (atomic64_read(&hwc->period_left) > 8*sample_period) { | 1543 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
1551 | perf_disable(); | 1544 | perf_disable(); |
1552 | perf_event_stop(event); | 1545 | perf_event_stop(event); |
1553 | atomic64_set(&hwc->period_left, 0); | 1546 | local64_set(&hwc->period_left, 0); |
1554 | perf_event_start(event); | 1547 | perf_event_start(event); |
1555 | perf_enable(); | 1548 | perf_enable(); |
1556 | } | 1549 | } |
@@ -1591,7 +1584,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1591 | 1584 | ||
1592 | perf_disable(); | 1585 | perf_disable(); |
1593 | event->pmu->read(event); | 1586 | event->pmu->read(event); |
1594 | now = atomic64_read(&event->count); | 1587 | now = local64_read(&event->count); |
1595 | delta = now - hwc->freq_count_stamp; | 1588 | delta = now - hwc->freq_count_stamp; |
1596 | hwc->freq_count_stamp = now; | 1589 | hwc->freq_count_stamp = now; |
1597 | 1590 | ||
@@ -1743,6 +1736,11 @@ static void __perf_event_read(void *info) | |||
1743 | event->pmu->read(event); | 1736 | event->pmu->read(event); |
1744 | } | 1737 | } |
1745 | 1738 | ||
1739 | static inline u64 perf_event_count(struct perf_event *event) | ||
1740 | { | ||
1741 | return local64_read(&event->count) + atomic64_read(&event->child_count); | ||
1742 | } | ||
1743 | |||
1746 | static u64 perf_event_read(struct perf_event *event) | 1744 | static u64 perf_event_read(struct perf_event *event) |
1747 | { | 1745 | { |
1748 | /* | 1746 | /* |
@@ -1762,7 +1760,7 @@ static u64 perf_event_read(struct perf_event *event) | |||
1762 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 1760 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
1763 | } | 1761 | } |
1764 | 1762 | ||
1765 | return atomic64_read(&event->count); | 1763 | return perf_event_count(event); |
1766 | } | 1764 | } |
1767 | 1765 | ||
1768 | /* | 1766 | /* |
@@ -1883,7 +1881,7 @@ static void free_event_rcu(struct rcu_head *head) | |||
1883 | } | 1881 | } |
1884 | 1882 | ||
1885 | static void perf_pending_sync(struct perf_event *event); | 1883 | static void perf_pending_sync(struct perf_event *event); |
1886 | static void perf_mmap_data_put(struct perf_mmap_data *data); | 1884 | static void perf_buffer_put(struct perf_buffer *buffer); |
1887 | 1885 | ||
1888 | static void free_event(struct perf_event *event) | 1886 | static void free_event(struct perf_event *event) |
1889 | { | 1887 | { |
@@ -1891,7 +1889,7 @@ static void free_event(struct perf_event *event) | |||
1891 | 1889 | ||
1892 | if (!event->parent) { | 1890 | if (!event->parent) { |
1893 | atomic_dec(&nr_events); | 1891 | atomic_dec(&nr_events); |
1894 | if (event->attr.mmap) | 1892 | if (event->attr.mmap || event->attr.mmap_data) |
1895 | atomic_dec(&nr_mmap_events); | 1893 | atomic_dec(&nr_mmap_events); |
1896 | if (event->attr.comm) | 1894 | if (event->attr.comm) |
1897 | atomic_dec(&nr_comm_events); | 1895 | atomic_dec(&nr_comm_events); |
@@ -1899,9 +1897,9 @@ static void free_event(struct perf_event *event) | |||
1899 | atomic_dec(&nr_task_events); | 1897 | atomic_dec(&nr_task_events); |
1900 | } | 1898 | } |
1901 | 1899 | ||
1902 | if (event->data) { | 1900 | if (event->buffer) { |
1903 | perf_mmap_data_put(event->data); | 1901 | perf_buffer_put(event->buffer); |
1904 | event->data = NULL; | 1902 | event->buffer = NULL; |
1905 | } | 1903 | } |
1906 | 1904 | ||
1907 | if (event->destroy) | 1905 | if (event->destroy) |
@@ -2126,13 +2124,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
2126 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 2124 | static unsigned int perf_poll(struct file *file, poll_table *wait) |
2127 | { | 2125 | { |
2128 | struct perf_event *event = file->private_data; | 2126 | struct perf_event *event = file->private_data; |
2129 | struct perf_mmap_data *data; | 2127 | struct perf_buffer *buffer; |
2130 | unsigned int events = POLL_HUP; | 2128 | unsigned int events = POLL_HUP; |
2131 | 2129 | ||
2132 | rcu_read_lock(); | 2130 | rcu_read_lock(); |
2133 | data = rcu_dereference(event->data); | 2131 | buffer = rcu_dereference(event->buffer); |
2134 | if (data) | 2132 | if (buffer) |
2135 | events = atomic_xchg(&data->poll, 0); | 2133 | events = atomic_xchg(&buffer->poll, 0); |
2136 | rcu_read_unlock(); | 2134 | rcu_read_unlock(); |
2137 | 2135 | ||
2138 | poll_wait(file, &event->waitq, wait); | 2136 | poll_wait(file, &event->waitq, wait); |
@@ -2143,7 +2141,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) | |||
2143 | static void perf_event_reset(struct perf_event *event) | 2141 | static void perf_event_reset(struct perf_event *event) |
2144 | { | 2142 | { |
2145 | (void)perf_event_read(event); | 2143 | (void)perf_event_read(event); |
2146 | atomic64_set(&event->count, 0); | 2144 | local64_set(&event->count, 0); |
2147 | perf_event_update_userpage(event); | 2145 | perf_event_update_userpage(event); |
2148 | } | 2146 | } |
2149 | 2147 | ||
@@ -2342,14 +2340,14 @@ static int perf_event_index(struct perf_event *event) | |||
2342 | void perf_event_update_userpage(struct perf_event *event) | 2340 | void perf_event_update_userpage(struct perf_event *event) |
2343 | { | 2341 | { |
2344 | struct perf_event_mmap_page *userpg; | 2342 | struct perf_event_mmap_page *userpg; |
2345 | struct perf_mmap_data *data; | 2343 | struct perf_buffer *buffer; |
2346 | 2344 | ||
2347 | rcu_read_lock(); | 2345 | rcu_read_lock(); |
2348 | data = rcu_dereference(event->data); | 2346 | buffer = rcu_dereference(event->buffer); |
2349 | if (!data) | 2347 | if (!buffer) |
2350 | goto unlock; | 2348 | goto unlock; |
2351 | 2349 | ||
2352 | userpg = data->user_page; | 2350 | userpg = buffer->user_page; |
2353 | 2351 | ||
2354 | /* | 2352 | /* |
2355 | * Disable preemption so as to not let the corresponding user-space | 2353 | * Disable preemption so as to not let the corresponding user-space |
@@ -2359,9 +2357,9 @@ void perf_event_update_userpage(struct perf_event *event) | |||
2359 | ++userpg->lock; | 2357 | ++userpg->lock; |
2360 | barrier(); | 2358 | barrier(); |
2361 | userpg->index = perf_event_index(event); | 2359 | userpg->index = perf_event_index(event); |
2362 | userpg->offset = atomic64_read(&event->count); | 2360 | userpg->offset = perf_event_count(event); |
2363 | if (event->state == PERF_EVENT_STATE_ACTIVE) | 2361 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
2364 | userpg->offset -= atomic64_read(&event->hw.prev_count); | 2362 | userpg->offset -= local64_read(&event->hw.prev_count); |
2365 | 2363 | ||
2366 | userpg->time_enabled = event->total_time_enabled + | 2364 | userpg->time_enabled = event->total_time_enabled + |
2367 | atomic64_read(&event->child_total_time_enabled); | 2365 | atomic64_read(&event->child_total_time_enabled); |
@@ -2376,6 +2374,25 @@ unlock: | |||
2376 | rcu_read_unlock(); | 2374 | rcu_read_unlock(); |
2377 | } | 2375 | } |
2378 | 2376 | ||
2377 | static unsigned long perf_data_size(struct perf_buffer *buffer); | ||
2378 | |||
2379 | static void | ||
2380 | perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) | ||
2381 | { | ||
2382 | long max_size = perf_data_size(buffer); | ||
2383 | |||
2384 | if (watermark) | ||
2385 | buffer->watermark = min(max_size, watermark); | ||
2386 | |||
2387 | if (!buffer->watermark) | ||
2388 | buffer->watermark = max_size / 2; | ||
2389 | |||
2390 | if (flags & PERF_BUFFER_WRITABLE) | ||
2391 | buffer->writable = 1; | ||
2392 | |||
2393 | atomic_set(&buffer->refcount, 1); | ||
2394 | } | ||
2395 | |||
2379 | #ifndef CONFIG_PERF_USE_VMALLOC | 2396 | #ifndef CONFIG_PERF_USE_VMALLOC |
2380 | 2397 | ||
2381 | /* | 2398 | /* |
@@ -2383,15 +2400,15 @@ unlock: | |||
2383 | */ | 2400 | */ |
2384 | 2401 | ||
2385 | static struct page * | 2402 | static struct page * |
2386 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | 2403 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) |
2387 | { | 2404 | { |
2388 | if (pgoff > data->nr_pages) | 2405 | if (pgoff > buffer->nr_pages) |
2389 | return NULL; | 2406 | return NULL; |
2390 | 2407 | ||
2391 | if (pgoff == 0) | 2408 | if (pgoff == 0) |
2392 | return virt_to_page(data->user_page); | 2409 | return virt_to_page(buffer->user_page); |
2393 | 2410 | ||
2394 | return virt_to_page(data->data_pages[pgoff - 1]); | 2411 | return virt_to_page(buffer->data_pages[pgoff - 1]); |
2395 | } | 2412 | } |
2396 | 2413 | ||
2397 | static void *perf_mmap_alloc_page(int cpu) | 2414 | static void *perf_mmap_alloc_page(int cpu) |
@@ -2407,42 +2424,44 @@ static void *perf_mmap_alloc_page(int cpu) | |||
2407 | return page_address(page); | 2424 | return page_address(page); |
2408 | } | 2425 | } |
2409 | 2426 | ||
2410 | static struct perf_mmap_data * | 2427 | static struct perf_buffer * |
2411 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2428 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) |
2412 | { | 2429 | { |
2413 | struct perf_mmap_data *data; | 2430 | struct perf_buffer *buffer; |
2414 | unsigned long size; | 2431 | unsigned long size; |
2415 | int i; | 2432 | int i; |
2416 | 2433 | ||
2417 | size = sizeof(struct perf_mmap_data); | 2434 | size = sizeof(struct perf_buffer); |
2418 | size += nr_pages * sizeof(void *); | 2435 | size += nr_pages * sizeof(void *); |
2419 | 2436 | ||
2420 | data = kzalloc(size, GFP_KERNEL); | 2437 | buffer = kzalloc(size, GFP_KERNEL); |
2421 | if (!data) | 2438 | if (!buffer) |
2422 | goto fail; | 2439 | goto fail; |
2423 | 2440 | ||
2424 | data->user_page = perf_mmap_alloc_page(event->cpu); | 2441 | buffer->user_page = perf_mmap_alloc_page(cpu); |
2425 | if (!data->user_page) | 2442 | if (!buffer->user_page) |
2426 | goto fail_user_page; | 2443 | goto fail_user_page; |
2427 | 2444 | ||
2428 | for (i = 0; i < nr_pages; i++) { | 2445 | for (i = 0; i < nr_pages; i++) { |
2429 | data->data_pages[i] = perf_mmap_alloc_page(event->cpu); | 2446 | buffer->data_pages[i] = perf_mmap_alloc_page(cpu); |
2430 | if (!data->data_pages[i]) | 2447 | if (!buffer->data_pages[i]) |
2431 | goto fail_data_pages; | 2448 | goto fail_data_pages; |
2432 | } | 2449 | } |
2433 | 2450 | ||
2434 | data->nr_pages = nr_pages; | 2451 | buffer->nr_pages = nr_pages; |
2452 | |||
2453 | perf_buffer_init(buffer, watermark, flags); | ||
2435 | 2454 | ||
2436 | return data; | 2455 | return buffer; |
2437 | 2456 | ||
2438 | fail_data_pages: | 2457 | fail_data_pages: |
2439 | for (i--; i >= 0; i--) | 2458 | for (i--; i >= 0; i--) |
2440 | free_page((unsigned long)data->data_pages[i]); | 2459 | free_page((unsigned long)buffer->data_pages[i]); |
2441 | 2460 | ||
2442 | free_page((unsigned long)data->user_page); | 2461 | free_page((unsigned long)buffer->user_page); |
2443 | 2462 | ||
2444 | fail_user_page: | 2463 | fail_user_page: |
2445 | kfree(data); | 2464 | kfree(buffer); |
2446 | 2465 | ||
2447 | fail: | 2466 | fail: |
2448 | return NULL; | 2467 | return NULL; |
@@ -2456,17 +2475,17 @@ static void perf_mmap_free_page(unsigned long addr) | |||
2456 | __free_page(page); | 2475 | __free_page(page); |
2457 | } | 2476 | } |
2458 | 2477 | ||
2459 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2478 | static void perf_buffer_free(struct perf_buffer *buffer) |
2460 | { | 2479 | { |
2461 | int i; | 2480 | int i; |
2462 | 2481 | ||
2463 | perf_mmap_free_page((unsigned long)data->user_page); | 2482 | perf_mmap_free_page((unsigned long)buffer->user_page); |
2464 | for (i = 0; i < data->nr_pages; i++) | 2483 | for (i = 0; i < buffer->nr_pages; i++) |
2465 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | 2484 | perf_mmap_free_page((unsigned long)buffer->data_pages[i]); |
2466 | kfree(data); | 2485 | kfree(buffer); |
2467 | } | 2486 | } |
2468 | 2487 | ||
2469 | static inline int page_order(struct perf_mmap_data *data) | 2488 | static inline int page_order(struct perf_buffer *buffer) |
2470 | { | 2489 | { |
2471 | return 0; | 2490 | return 0; |
2472 | } | 2491 | } |
@@ -2479,18 +2498,18 @@ static inline int page_order(struct perf_mmap_data *data) | |||
2479 | * Required for architectures that have d-cache aliasing issues. | 2498 | * Required for architectures that have d-cache aliasing issues. |
2480 | */ | 2499 | */ |
2481 | 2500 | ||
2482 | static inline int page_order(struct perf_mmap_data *data) | 2501 | static inline int page_order(struct perf_buffer *buffer) |
2483 | { | 2502 | { |
2484 | return data->page_order; | 2503 | return buffer->page_order; |
2485 | } | 2504 | } |
2486 | 2505 | ||
2487 | static struct page * | 2506 | static struct page * |
2488 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | 2507 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) |
2489 | { | 2508 | { |
2490 | if (pgoff > (1UL << page_order(data))) | 2509 | if (pgoff > (1UL << page_order(buffer))) |
2491 | return NULL; | 2510 | return NULL; |
2492 | 2511 | ||
2493 | return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); | 2512 | return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); |
2494 | } | 2513 | } |
2495 | 2514 | ||
2496 | static void perf_mmap_unmark_page(void *addr) | 2515 | static void perf_mmap_unmark_page(void *addr) |
@@ -2500,57 +2519,59 @@ static void perf_mmap_unmark_page(void *addr) | |||
2500 | page->mapping = NULL; | 2519 | page->mapping = NULL; |
2501 | } | 2520 | } |
2502 | 2521 | ||
2503 | static void perf_mmap_data_free_work(struct work_struct *work) | 2522 | static void perf_buffer_free_work(struct work_struct *work) |
2504 | { | 2523 | { |
2505 | struct perf_mmap_data *data; | 2524 | struct perf_buffer *buffer; |
2506 | void *base; | 2525 | void *base; |
2507 | int i, nr; | 2526 | int i, nr; |
2508 | 2527 | ||
2509 | data = container_of(work, struct perf_mmap_data, work); | 2528 | buffer = container_of(work, struct perf_buffer, work); |
2510 | nr = 1 << page_order(data); | 2529 | nr = 1 << page_order(buffer); |
2511 | 2530 | ||
2512 | base = data->user_page; | 2531 | base = buffer->user_page; |
2513 | for (i = 0; i < nr + 1; i++) | 2532 | for (i = 0; i < nr + 1; i++) |
2514 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | 2533 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
2515 | 2534 | ||
2516 | vfree(base); | 2535 | vfree(base); |
2517 | kfree(data); | 2536 | kfree(buffer); |
2518 | } | 2537 | } |
2519 | 2538 | ||
2520 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2539 | static void perf_buffer_free(struct perf_buffer *buffer) |
2521 | { | 2540 | { |
2522 | schedule_work(&data->work); | 2541 | schedule_work(&buffer->work); |
2523 | } | 2542 | } |
2524 | 2543 | ||
2525 | static struct perf_mmap_data * | 2544 | static struct perf_buffer * |
2526 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2545 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) |
2527 | { | 2546 | { |
2528 | struct perf_mmap_data *data; | 2547 | struct perf_buffer *buffer; |
2529 | unsigned long size; | 2548 | unsigned long size; |
2530 | void *all_buf; | 2549 | void *all_buf; |
2531 | 2550 | ||
2532 | size = sizeof(struct perf_mmap_data); | 2551 | size = sizeof(struct perf_buffer); |
2533 | size += sizeof(void *); | 2552 | size += sizeof(void *); |
2534 | 2553 | ||
2535 | data = kzalloc(size, GFP_KERNEL); | 2554 | buffer = kzalloc(size, GFP_KERNEL); |
2536 | if (!data) | 2555 | if (!buffer) |
2537 | goto fail; | 2556 | goto fail; |
2538 | 2557 | ||
2539 | INIT_WORK(&data->work, perf_mmap_data_free_work); | 2558 | INIT_WORK(&buffer->work, perf_buffer_free_work); |
2540 | 2559 | ||
2541 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | 2560 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); |
2542 | if (!all_buf) | 2561 | if (!all_buf) |
2543 | goto fail_all_buf; | 2562 | goto fail_all_buf; |
2544 | 2563 | ||
2545 | data->user_page = all_buf; | 2564 | buffer->user_page = all_buf; |
2546 | data->data_pages[0] = all_buf + PAGE_SIZE; | 2565 | buffer->data_pages[0] = all_buf + PAGE_SIZE; |
2547 | data->page_order = ilog2(nr_pages); | 2566 | buffer->page_order = ilog2(nr_pages); |
2548 | data->nr_pages = 1; | 2567 | buffer->nr_pages = 1; |
2568 | |||
2569 | perf_buffer_init(buffer, watermark, flags); | ||
2549 | 2570 | ||
2550 | return data; | 2571 | return buffer; |
2551 | 2572 | ||
2552 | fail_all_buf: | 2573 | fail_all_buf: |
2553 | kfree(data); | 2574 | kfree(buffer); |
2554 | 2575 | ||
2555 | fail: | 2576 | fail: |
2556 | return NULL; | 2577 | return NULL; |
@@ -2558,15 +2579,15 @@ fail: | |||
2558 | 2579 | ||
2559 | #endif | 2580 | #endif |
2560 | 2581 | ||
2561 | static unsigned long perf_data_size(struct perf_mmap_data *data) | 2582 | static unsigned long perf_data_size(struct perf_buffer *buffer) |
2562 | { | 2583 | { |
2563 | return data->nr_pages << (PAGE_SHIFT + page_order(data)); | 2584 | return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); |
2564 | } | 2585 | } |
2565 | 2586 | ||
2566 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2587 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
2567 | { | 2588 | { |
2568 | struct perf_event *event = vma->vm_file->private_data; | 2589 | struct perf_event *event = vma->vm_file->private_data; |
2569 | struct perf_mmap_data *data; | 2590 | struct perf_buffer *buffer; |
2570 | int ret = VM_FAULT_SIGBUS; | 2591 | int ret = VM_FAULT_SIGBUS; |
2571 | 2592 | ||
2572 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | 2593 | if (vmf->flags & FAULT_FLAG_MKWRITE) { |
@@ -2576,14 +2597,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
2576 | } | 2597 | } |
2577 | 2598 | ||
2578 | rcu_read_lock(); | 2599 | rcu_read_lock(); |
2579 | data = rcu_dereference(event->data); | 2600 | buffer = rcu_dereference(event->buffer); |
2580 | if (!data) | 2601 | if (!buffer) |
2581 | goto unlock; | 2602 | goto unlock; |
2582 | 2603 | ||
2583 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | 2604 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) |
2584 | goto unlock; | 2605 | goto unlock; |
2585 | 2606 | ||
2586 | vmf->page = perf_mmap_to_page(data, vmf->pgoff); | 2607 | vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); |
2587 | if (!vmf->page) | 2608 | if (!vmf->page) |
2588 | goto unlock; | 2609 | goto unlock; |
2589 | 2610 | ||
@@ -2598,52 +2619,35 @@ unlock: | |||
2598 | return ret; | 2619 | return ret; |
2599 | } | 2620 | } |
2600 | 2621 | ||
2601 | static void | 2622 | static void perf_buffer_free_rcu(struct rcu_head *rcu_head) |
2602 | perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) | ||
2603 | { | ||
2604 | long max_size = perf_data_size(data); | ||
2605 | |||
2606 | if (event->attr.watermark) { | ||
2607 | data->watermark = min_t(long, max_size, | ||
2608 | event->attr.wakeup_watermark); | ||
2609 | } | ||
2610 | |||
2611 | if (!data->watermark) | ||
2612 | data->watermark = max_size / 2; | ||
2613 | |||
2614 | atomic_set(&data->refcount, 1); | ||
2615 | rcu_assign_pointer(event->data, data); | ||
2616 | } | ||
2617 | |||
2618 | static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | ||
2619 | { | 2623 | { |
2620 | struct perf_mmap_data *data; | 2624 | struct perf_buffer *buffer; |
2621 | 2625 | ||
2622 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | 2626 | buffer = container_of(rcu_head, struct perf_buffer, rcu_head); |
2623 | perf_mmap_data_free(data); | 2627 | perf_buffer_free(buffer); |
2624 | } | 2628 | } |
2625 | 2629 | ||
2626 | static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event) | 2630 | static struct perf_buffer *perf_buffer_get(struct perf_event *event) |
2627 | { | 2631 | { |
2628 | struct perf_mmap_data *data; | 2632 | struct perf_buffer *buffer; |
2629 | 2633 | ||
2630 | rcu_read_lock(); | 2634 | rcu_read_lock(); |
2631 | data = rcu_dereference(event->data); | 2635 | buffer = rcu_dereference(event->buffer); |
2632 | if (data) { | 2636 | if (buffer) { |
2633 | if (!atomic_inc_not_zero(&data->refcount)) | 2637 | if (!atomic_inc_not_zero(&buffer->refcount)) |
2634 | data = NULL; | 2638 | buffer = NULL; |
2635 | } | 2639 | } |
2636 | rcu_read_unlock(); | 2640 | rcu_read_unlock(); |
2637 | 2641 | ||
2638 | return data; | 2642 | return buffer; |
2639 | } | 2643 | } |
2640 | 2644 | ||
2641 | static void perf_mmap_data_put(struct perf_mmap_data *data) | 2645 | static void perf_buffer_put(struct perf_buffer *buffer) |
2642 | { | 2646 | { |
2643 | if (!atomic_dec_and_test(&data->refcount)) | 2647 | if (!atomic_dec_and_test(&buffer->refcount)) |
2644 | return; | 2648 | return; |
2645 | 2649 | ||
2646 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); | 2650 | call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); |
2647 | } | 2651 | } |
2648 | 2652 | ||
2649 | static void perf_mmap_open(struct vm_area_struct *vma) | 2653 | static void perf_mmap_open(struct vm_area_struct *vma) |
@@ -2658,16 +2662,16 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
2658 | struct perf_event *event = vma->vm_file->private_data; | 2662 | struct perf_event *event = vma->vm_file->private_data; |
2659 | 2663 | ||
2660 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 2664 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
2661 | unsigned long size = perf_data_size(event->data); | 2665 | unsigned long size = perf_data_size(event->buffer); |
2662 | struct user_struct *user = event->mmap_user; | 2666 | struct user_struct *user = event->mmap_user; |
2663 | struct perf_mmap_data *data = event->data; | 2667 | struct perf_buffer *buffer = event->buffer; |
2664 | 2668 | ||
2665 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 2669 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
2666 | vma->vm_mm->locked_vm -= event->mmap_locked; | 2670 | vma->vm_mm->locked_vm -= event->mmap_locked; |
2667 | rcu_assign_pointer(event->data, NULL); | 2671 | rcu_assign_pointer(event->buffer, NULL); |
2668 | mutex_unlock(&event->mmap_mutex); | 2672 | mutex_unlock(&event->mmap_mutex); |
2669 | 2673 | ||
2670 | perf_mmap_data_put(data); | 2674 | perf_buffer_put(buffer); |
2671 | free_uid(user); | 2675 | free_uid(user); |
2672 | } | 2676 | } |
2673 | } | 2677 | } |
@@ -2685,11 +2689,11 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2685 | unsigned long user_locked, user_lock_limit; | 2689 | unsigned long user_locked, user_lock_limit; |
2686 | struct user_struct *user = current_user(); | 2690 | struct user_struct *user = current_user(); |
2687 | unsigned long locked, lock_limit; | 2691 | unsigned long locked, lock_limit; |
2688 | struct perf_mmap_data *data; | 2692 | struct perf_buffer *buffer; |
2689 | unsigned long vma_size; | 2693 | unsigned long vma_size; |
2690 | unsigned long nr_pages; | 2694 | unsigned long nr_pages; |
2691 | long user_extra, extra; | 2695 | long user_extra, extra; |
2692 | int ret = 0; | 2696 | int ret = 0, flags = 0; |
2693 | 2697 | ||
2694 | /* | 2698 | /* |
2695 | * Don't allow mmap() of inherited per-task counters. This would | 2699 | * Don't allow mmap() of inherited per-task counters. This would |
@@ -2706,7 +2710,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2706 | nr_pages = (vma_size / PAGE_SIZE) - 1; | 2710 | nr_pages = (vma_size / PAGE_SIZE) - 1; |
2707 | 2711 | ||
2708 | /* | 2712 | /* |
2709 | * If we have data pages ensure they're a power-of-two number, so we | 2713 | * If we have buffer pages ensure they're a power-of-two number, so we |
2710 | * can do bitmasks instead of modulo. | 2714 | * can do bitmasks instead of modulo. |
2711 | */ | 2715 | */ |
2712 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | 2716 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) |
@@ -2720,9 +2724,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2720 | 2724 | ||
2721 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2725 | WARN_ON_ONCE(event->ctx->parent_ctx); |
2722 | mutex_lock(&event->mmap_mutex); | 2726 | mutex_lock(&event->mmap_mutex); |
2723 | if (event->data) { | 2727 | if (event->buffer) { |
2724 | if (event->data->nr_pages == nr_pages) | 2728 | if (event->buffer->nr_pages == nr_pages) |
2725 | atomic_inc(&event->data->refcount); | 2729 | atomic_inc(&event->buffer->refcount); |
2726 | else | 2730 | else |
2727 | ret = -EINVAL; | 2731 | ret = -EINVAL; |
2728 | goto unlock; | 2732 | goto unlock; |
@@ -2752,17 +2756,18 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2752 | goto unlock; | 2756 | goto unlock; |
2753 | } | 2757 | } |
2754 | 2758 | ||
2755 | WARN_ON(event->data); | 2759 | WARN_ON(event->buffer); |
2760 | |||
2761 | if (vma->vm_flags & VM_WRITE) | ||
2762 | flags |= PERF_BUFFER_WRITABLE; | ||
2756 | 2763 | ||
2757 | data = perf_mmap_data_alloc(event, nr_pages); | 2764 | buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, |
2758 | if (!data) { | 2765 | event->cpu, flags); |
2766 | if (!buffer) { | ||
2759 | ret = -ENOMEM; | 2767 | ret = -ENOMEM; |
2760 | goto unlock; | 2768 | goto unlock; |
2761 | } | 2769 | } |
2762 | 2770 | rcu_assign_pointer(event->buffer, buffer); | |
2763 | perf_mmap_data_init(event, data); | ||
2764 | if (vma->vm_flags & VM_WRITE) | ||
2765 | event->data->writable = 1; | ||
2766 | 2771 | ||
2767 | atomic_long_add(user_extra, &user->locked_vm); | 2772 | atomic_long_add(user_extra, &user->locked_vm); |
2768 | event->mmap_locked = extra; | 2773 | event->mmap_locked = extra; |
@@ -2941,11 +2946,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2941 | return NULL; | 2946 | return NULL; |
2942 | } | 2947 | } |
2943 | 2948 | ||
2944 | __weak | ||
2945 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | ||
2946 | { | ||
2947 | } | ||
2948 | |||
2949 | 2949 | ||
2950 | /* | 2950 | /* |
2951 | * We assume there is only KVM supporting the callbacks. | 2951 | * We assume there is only KVM supporting the callbacks. |
@@ -2971,15 +2971,15 @@ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); | |||
2971 | /* | 2971 | /* |
2972 | * Output | 2972 | * Output |
2973 | */ | 2973 | */ |
2974 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | 2974 | static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, |
2975 | unsigned long offset, unsigned long head) | 2975 | unsigned long offset, unsigned long head) |
2976 | { | 2976 | { |
2977 | unsigned long mask; | 2977 | unsigned long mask; |
2978 | 2978 | ||
2979 | if (!data->writable) | 2979 | if (!buffer->writable) |
2980 | return true; | 2980 | return true; |
2981 | 2981 | ||
2982 | mask = perf_data_size(data) - 1; | 2982 | mask = perf_data_size(buffer) - 1; |
2983 | 2983 | ||
2984 | offset = (offset - tail) & mask; | 2984 | offset = (offset - tail) & mask; |
2985 | head = (head - tail) & mask; | 2985 | head = (head - tail) & mask; |
@@ -2992,7 +2992,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | |||
2992 | 2992 | ||
2993 | static void perf_output_wakeup(struct perf_output_handle *handle) | 2993 | static void perf_output_wakeup(struct perf_output_handle *handle) |
2994 | { | 2994 | { |
2995 | atomic_set(&handle->data->poll, POLL_IN); | 2995 | atomic_set(&handle->buffer->poll, POLL_IN); |
2996 | 2996 | ||
2997 | if (handle->nmi) { | 2997 | if (handle->nmi) { |
2998 | handle->event->pending_wakeup = 1; | 2998 | handle->event->pending_wakeup = 1; |
@@ -3012,45 +3012,45 @@ static void perf_output_wakeup(struct perf_output_handle *handle) | |||
3012 | */ | 3012 | */ |
3013 | static void perf_output_get_handle(struct perf_output_handle *handle) | 3013 | static void perf_output_get_handle(struct perf_output_handle *handle) |
3014 | { | 3014 | { |
3015 | struct perf_mmap_data *data = handle->data; | 3015 | struct perf_buffer *buffer = handle->buffer; |
3016 | 3016 | ||
3017 | preempt_disable(); | 3017 | preempt_disable(); |
3018 | local_inc(&data->nest); | 3018 | local_inc(&buffer->nest); |
3019 | handle->wakeup = local_read(&data->wakeup); | 3019 | handle->wakeup = local_read(&buffer->wakeup); |
3020 | } | 3020 | } |
3021 | 3021 | ||
3022 | static void perf_output_put_handle(struct perf_output_handle *handle) | 3022 | static void perf_output_put_handle(struct perf_output_handle *handle) |
3023 | { | 3023 | { |
3024 | struct perf_mmap_data *data = handle->data; | 3024 | struct perf_buffer *buffer = handle->buffer; |
3025 | unsigned long head; | 3025 | unsigned long head; |
3026 | 3026 | ||
3027 | again: | 3027 | again: |
3028 | head = local_read(&data->head); | 3028 | head = local_read(&buffer->head); |
3029 | 3029 | ||
3030 | /* | 3030 | /* |
3031 | * IRQ/NMI can happen here, which means we can miss a head update. | 3031 | * IRQ/NMI can happen here, which means we can miss a head update. |
3032 | */ | 3032 | */ |
3033 | 3033 | ||
3034 | if (!local_dec_and_test(&data->nest)) | 3034 | if (!local_dec_and_test(&buffer->nest)) |
3035 | goto out; | 3035 | goto out; |
3036 | 3036 | ||
3037 | /* | 3037 | /* |
3038 | * Publish the known good head. Rely on the full barrier implied | 3038 | * Publish the known good head. Rely on the full barrier implied |
3039 | * by atomic_dec_and_test() order the data->head read and this | 3039 | * by atomic_dec_and_test() order the buffer->head read and this |
3040 | * write. | 3040 | * write. |
3041 | */ | 3041 | */ |
3042 | data->user_page->data_head = head; | 3042 | buffer->user_page->data_head = head; |
3043 | 3043 | ||
3044 | /* | 3044 | /* |
3045 | * Now check if we missed an update, rely on the (compiler) | 3045 | * Now check if we missed an update, rely on the (compiler) |
3046 | * barrier in atomic_dec_and_test() to re-read data->head. | 3046 | * barrier in atomic_dec_and_test() to re-read buffer->head. |
3047 | */ | 3047 | */ |
3048 | if (unlikely(head != local_read(&data->head))) { | 3048 | if (unlikely(head != local_read(&buffer->head))) { |
3049 | local_inc(&data->nest); | 3049 | local_inc(&buffer->nest); |
3050 | goto again; | 3050 | goto again; |
3051 | } | 3051 | } |
3052 | 3052 | ||
3053 | if (handle->wakeup != local_read(&data->wakeup)) | 3053 | if (handle->wakeup != local_read(&buffer->wakeup)) |
3054 | perf_output_wakeup(handle); | 3054 | perf_output_wakeup(handle); |
3055 | 3055 | ||
3056 | out: | 3056 | out: |
@@ -3070,12 +3070,12 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle, | |||
3070 | buf += size; | 3070 | buf += size; |
3071 | handle->size -= size; | 3071 | handle->size -= size; |
3072 | if (!handle->size) { | 3072 | if (!handle->size) { |
3073 | struct perf_mmap_data *data = handle->data; | 3073 | struct perf_buffer *buffer = handle->buffer; |
3074 | 3074 | ||
3075 | handle->page++; | 3075 | handle->page++; |
3076 | handle->page &= data->nr_pages - 1; | 3076 | handle->page &= buffer->nr_pages - 1; |
3077 | handle->addr = data->data_pages[handle->page]; | 3077 | handle->addr = buffer->data_pages[handle->page]; |
3078 | handle->size = PAGE_SIZE << page_order(data); | 3078 | handle->size = PAGE_SIZE << page_order(buffer); |
3079 | } | 3079 | } |
3080 | } while (len); | 3080 | } while (len); |
3081 | } | 3081 | } |
@@ -3084,7 +3084,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3084 | struct perf_event *event, unsigned int size, | 3084 | struct perf_event *event, unsigned int size, |
3085 | int nmi, int sample) | 3085 | int nmi, int sample) |
3086 | { | 3086 | { |
3087 | struct perf_mmap_data *data; | 3087 | struct perf_buffer *buffer; |
3088 | unsigned long tail, offset, head; | 3088 | unsigned long tail, offset, head; |
3089 | int have_lost; | 3089 | int have_lost; |
3090 | struct { | 3090 | struct { |
@@ -3100,19 +3100,19 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3100 | if (event->parent) | 3100 | if (event->parent) |
3101 | event = event->parent; | 3101 | event = event->parent; |
3102 | 3102 | ||
3103 | data = rcu_dereference(event->data); | 3103 | buffer = rcu_dereference(event->buffer); |
3104 | if (!data) | 3104 | if (!buffer) |
3105 | goto out; | 3105 | goto out; |
3106 | 3106 | ||
3107 | handle->data = data; | 3107 | handle->buffer = buffer; |
3108 | handle->event = event; | 3108 | handle->event = event; |
3109 | handle->nmi = nmi; | 3109 | handle->nmi = nmi; |
3110 | handle->sample = sample; | 3110 | handle->sample = sample; |
3111 | 3111 | ||
3112 | if (!data->nr_pages) | 3112 | if (!buffer->nr_pages) |
3113 | goto out; | 3113 | goto out; |
3114 | 3114 | ||
3115 | have_lost = local_read(&data->lost); | 3115 | have_lost = local_read(&buffer->lost); |
3116 | if (have_lost) | 3116 | if (have_lost) |
3117 | size += sizeof(lost_event); | 3117 | size += sizeof(lost_event); |
3118 | 3118 | ||
@@ -3124,30 +3124,30 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3124 | * tail pointer. So that all reads will be completed before the | 3124 | * tail pointer. So that all reads will be completed before the |
3125 | * write is issued. | 3125 | * write is issued. |
3126 | */ | 3126 | */ |
3127 | tail = ACCESS_ONCE(data->user_page->data_tail); | 3127 | tail = ACCESS_ONCE(buffer->user_page->data_tail); |
3128 | smp_rmb(); | 3128 | smp_rmb(); |
3129 | offset = head = local_read(&data->head); | 3129 | offset = head = local_read(&buffer->head); |
3130 | head += size; | 3130 | head += size; |
3131 | if (unlikely(!perf_output_space(data, tail, offset, head))) | 3131 | if (unlikely(!perf_output_space(buffer, tail, offset, head))) |
3132 | goto fail; | 3132 | goto fail; |
3133 | } while (local_cmpxchg(&data->head, offset, head) != offset); | 3133 | } while (local_cmpxchg(&buffer->head, offset, head) != offset); |
3134 | 3134 | ||
3135 | if (head - local_read(&data->wakeup) > data->watermark) | 3135 | if (head - local_read(&buffer->wakeup) > buffer->watermark) |
3136 | local_add(data->watermark, &data->wakeup); | 3136 | local_add(buffer->watermark, &buffer->wakeup); |
3137 | 3137 | ||
3138 | handle->page = offset >> (PAGE_SHIFT + page_order(data)); | 3138 | handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); |
3139 | handle->page &= data->nr_pages - 1; | 3139 | handle->page &= buffer->nr_pages - 1; |
3140 | handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1); | 3140 | handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); |
3141 | handle->addr = data->data_pages[handle->page]; | 3141 | handle->addr = buffer->data_pages[handle->page]; |
3142 | handle->addr += handle->size; | 3142 | handle->addr += handle->size; |
3143 | handle->size = (PAGE_SIZE << page_order(data)) - handle->size; | 3143 | handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; |
3144 | 3144 | ||
3145 | if (have_lost) { | 3145 | if (have_lost) { |
3146 | lost_event.header.type = PERF_RECORD_LOST; | 3146 | lost_event.header.type = PERF_RECORD_LOST; |
3147 | lost_event.header.misc = 0; | 3147 | lost_event.header.misc = 0; |
3148 | lost_event.header.size = sizeof(lost_event); | 3148 | lost_event.header.size = sizeof(lost_event); |
3149 | lost_event.id = event->id; | 3149 | lost_event.id = event->id; |
3150 | lost_event.lost = local_xchg(&data->lost, 0); | 3150 | lost_event.lost = local_xchg(&buffer->lost, 0); |
3151 | 3151 | ||
3152 | perf_output_put(handle, lost_event); | 3152 | perf_output_put(handle, lost_event); |
3153 | } | 3153 | } |
@@ -3155,7 +3155,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3155 | return 0; | 3155 | return 0; |
3156 | 3156 | ||
3157 | fail: | 3157 | fail: |
3158 | local_inc(&data->lost); | 3158 | local_inc(&buffer->lost); |
3159 | perf_output_put_handle(handle); | 3159 | perf_output_put_handle(handle); |
3160 | out: | 3160 | out: |
3161 | rcu_read_unlock(); | 3161 | rcu_read_unlock(); |
@@ -3166,15 +3166,15 @@ out: | |||
3166 | void perf_output_end(struct perf_output_handle *handle) | 3166 | void perf_output_end(struct perf_output_handle *handle) |
3167 | { | 3167 | { |
3168 | struct perf_event *event = handle->event; | 3168 | struct perf_event *event = handle->event; |
3169 | struct perf_mmap_data *data = handle->data; | 3169 | struct perf_buffer *buffer = handle->buffer; |
3170 | 3170 | ||
3171 | int wakeup_events = event->attr.wakeup_events; | 3171 | int wakeup_events = event->attr.wakeup_events; |
3172 | 3172 | ||
3173 | if (handle->sample && wakeup_events) { | 3173 | if (handle->sample && wakeup_events) { |
3174 | int events = local_inc_return(&data->events); | 3174 | int events = local_inc_return(&buffer->events); |
3175 | if (events >= wakeup_events) { | 3175 | if (events >= wakeup_events) { |
3176 | local_sub(wakeup_events, &data->events); | 3176 | local_sub(wakeup_events, &buffer->events); |
3177 | local_inc(&data->wakeup); | 3177 | local_inc(&buffer->wakeup); |
3178 | } | 3178 | } |
3179 | } | 3179 | } |
3180 | 3180 | ||
@@ -3211,7 +3211,7 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
3211 | u64 values[4]; | 3211 | u64 values[4]; |
3212 | int n = 0; | 3212 | int n = 0; |
3213 | 3213 | ||
3214 | values[n++] = atomic64_read(&event->count); | 3214 | values[n++] = perf_event_count(event); |
3215 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 3215 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
3216 | values[n++] = event->total_time_enabled + | 3216 | values[n++] = event->total_time_enabled + |
3217 | atomic64_read(&event->child_total_time_enabled); | 3217 | atomic64_read(&event->child_total_time_enabled); |
@@ -3248,7 +3248,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
3248 | if (leader != event) | 3248 | if (leader != event) |
3249 | leader->pmu->read(leader); | 3249 | leader->pmu->read(leader); |
3250 | 3250 | ||
3251 | values[n++] = atomic64_read(&leader->count); | 3251 | values[n++] = perf_event_count(leader); |
3252 | if (read_format & PERF_FORMAT_ID) | 3252 | if (read_format & PERF_FORMAT_ID) |
3253 | values[n++] = primary_event_id(leader); | 3253 | values[n++] = primary_event_id(leader); |
3254 | 3254 | ||
@@ -3260,7 +3260,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
3260 | if (sub != event) | 3260 | if (sub != event) |
3261 | sub->pmu->read(sub); | 3261 | sub->pmu->read(sub); |
3262 | 3262 | ||
3263 | values[n++] = atomic64_read(&sub->count); | 3263 | values[n++] = perf_event_count(sub); |
3264 | if (read_format & PERF_FORMAT_ID) | 3264 | if (read_format & PERF_FORMAT_ID) |
3265 | values[n++] = primary_event_id(sub); | 3265 | values[n++] = primary_event_id(sub); |
3266 | 3266 | ||
@@ -3491,7 +3491,7 @@ perf_event_read_event(struct perf_event *event, | |||
3491 | /* | 3491 | /* |
3492 | * task tracking -- fork/exit | 3492 | * task tracking -- fork/exit |
3493 | * | 3493 | * |
3494 | * enabled by: attr.comm | attr.mmap | attr.task | 3494 | * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task |
3495 | */ | 3495 | */ |
3496 | 3496 | ||
3497 | struct perf_task_event { | 3497 | struct perf_task_event { |
@@ -3541,7 +3541,8 @@ static int perf_event_task_match(struct perf_event *event) | |||
3541 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3541 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3542 | return 0; | 3542 | return 0; |
3543 | 3543 | ||
3544 | if (event->attr.comm || event->attr.mmap || event->attr.task) | 3544 | if (event->attr.comm || event->attr.mmap || |
3545 | event->attr.mmap_data || event->attr.task) | ||
3545 | return 1; | 3546 | return 1; |
3546 | 3547 | ||
3547 | return 0; | 3548 | return 0; |
@@ -3766,7 +3767,8 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
3766 | } | 3767 | } |
3767 | 3768 | ||
3768 | static int perf_event_mmap_match(struct perf_event *event, | 3769 | static int perf_event_mmap_match(struct perf_event *event, |
3769 | struct perf_mmap_event *mmap_event) | 3770 | struct perf_mmap_event *mmap_event, |
3771 | int executable) | ||
3770 | { | 3772 | { |
3771 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 3773 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
3772 | return 0; | 3774 | return 0; |
@@ -3774,19 +3776,21 @@ static int perf_event_mmap_match(struct perf_event *event, | |||
3774 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3776 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3775 | return 0; | 3777 | return 0; |
3776 | 3778 | ||
3777 | if (event->attr.mmap) | 3779 | if ((!executable && event->attr.mmap_data) || |
3780 | (executable && event->attr.mmap)) | ||
3778 | return 1; | 3781 | return 1; |
3779 | 3782 | ||
3780 | return 0; | 3783 | return 0; |
3781 | } | 3784 | } |
3782 | 3785 | ||
3783 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | 3786 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, |
3784 | struct perf_mmap_event *mmap_event) | 3787 | struct perf_mmap_event *mmap_event, |
3788 | int executable) | ||
3785 | { | 3789 | { |
3786 | struct perf_event *event; | 3790 | struct perf_event *event; |
3787 | 3791 | ||
3788 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3792 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
3789 | if (perf_event_mmap_match(event, mmap_event)) | 3793 | if (perf_event_mmap_match(event, mmap_event, executable)) |
3790 | perf_event_mmap_output(event, mmap_event); | 3794 | perf_event_mmap_output(event, mmap_event); |
3791 | } | 3795 | } |
3792 | } | 3796 | } |
@@ -3830,6 +3834,14 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
3830 | if (!vma->vm_mm) { | 3834 | if (!vma->vm_mm) { |
3831 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3835 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
3832 | goto got_name; | 3836 | goto got_name; |
3837 | } else if (vma->vm_start <= vma->vm_mm->start_brk && | ||
3838 | vma->vm_end >= vma->vm_mm->brk) { | ||
3839 | name = strncpy(tmp, "[heap]", sizeof(tmp)); | ||
3840 | goto got_name; | ||
3841 | } else if (vma->vm_start <= vma->vm_mm->start_stack && | ||
3842 | vma->vm_end >= vma->vm_mm->start_stack) { | ||
3843 | name = strncpy(tmp, "[stack]", sizeof(tmp)); | ||
3844 | goto got_name; | ||
3833 | } | 3845 | } |
3834 | 3846 | ||
3835 | name = strncpy(tmp, "//anon", sizeof(tmp)); | 3847 | name = strncpy(tmp, "//anon", sizeof(tmp)); |
@@ -3846,17 +3858,17 @@ got_name: | |||
3846 | 3858 | ||
3847 | rcu_read_lock(); | 3859 | rcu_read_lock(); |
3848 | cpuctx = &get_cpu_var(perf_cpu_context); | 3860 | cpuctx = &get_cpu_var(perf_cpu_context); |
3849 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); | 3861 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); |
3850 | ctx = rcu_dereference(current->perf_event_ctxp); | 3862 | ctx = rcu_dereference(current->perf_event_ctxp); |
3851 | if (ctx) | 3863 | if (ctx) |
3852 | perf_event_mmap_ctx(ctx, mmap_event); | 3864 | perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); |
3853 | put_cpu_var(perf_cpu_context); | 3865 | put_cpu_var(perf_cpu_context); |
3854 | rcu_read_unlock(); | 3866 | rcu_read_unlock(); |
3855 | 3867 | ||
3856 | kfree(buf); | 3868 | kfree(buf); |
3857 | } | 3869 | } |
3858 | 3870 | ||
3859 | void __perf_event_mmap(struct vm_area_struct *vma) | 3871 | void perf_event_mmap(struct vm_area_struct *vma) |
3860 | { | 3872 | { |
3861 | struct perf_mmap_event mmap_event; | 3873 | struct perf_mmap_event mmap_event; |
3862 | 3874 | ||
@@ -4018,14 +4030,14 @@ static u64 perf_swevent_set_period(struct perf_event *event) | |||
4018 | hwc->last_period = hwc->sample_period; | 4030 | hwc->last_period = hwc->sample_period; |
4019 | 4031 | ||
4020 | again: | 4032 | again: |
4021 | old = val = atomic64_read(&hwc->period_left); | 4033 | old = val = local64_read(&hwc->period_left); |
4022 | if (val < 0) | 4034 | if (val < 0) |
4023 | return 0; | 4035 | return 0; |
4024 | 4036 | ||
4025 | nr = div64_u64(period + val, period); | 4037 | nr = div64_u64(period + val, period); |
4026 | offset = nr * period; | 4038 | offset = nr * period; |
4027 | val -= offset; | 4039 | val -= offset; |
4028 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | 4040 | if (local64_cmpxchg(&hwc->period_left, old, val) != old) |
4029 | goto again; | 4041 | goto again; |
4030 | 4042 | ||
4031 | return nr; | 4043 | return nr; |
@@ -4064,7 +4076,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
4064 | { | 4076 | { |
4065 | struct hw_perf_event *hwc = &event->hw; | 4077 | struct hw_perf_event *hwc = &event->hw; |
4066 | 4078 | ||
4067 | atomic64_add(nr, &event->count); | 4079 | local64_add(nr, &event->count); |
4068 | 4080 | ||
4069 | if (!regs) | 4081 | if (!regs) |
4070 | return; | 4082 | return; |
@@ -4075,7 +4087,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
4075 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4087 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
4076 | return perf_swevent_overflow(event, 1, nmi, data, regs); | 4088 | return perf_swevent_overflow(event, 1, nmi, data, regs); |
4077 | 4089 | ||
4078 | if (atomic64_add_negative(nr, &hwc->period_left)) | 4090 | if (local64_add_negative(nr, &hwc->period_left)) |
4079 | return; | 4091 | return; |
4080 | 4092 | ||
4081 | perf_swevent_overflow(event, 0, nmi, data, regs); | 4093 | perf_swevent_overflow(event, 0, nmi, data, regs); |
@@ -4213,14 +4225,12 @@ int perf_swevent_get_recursion_context(void) | |||
4213 | } | 4225 | } |
4214 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | 4226 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); |
4215 | 4227 | ||
4216 | void perf_swevent_put_recursion_context(int rctx) | 4228 | void inline perf_swevent_put_recursion_context(int rctx) |
4217 | { | 4229 | { |
4218 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 4230 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
4219 | barrier(); | 4231 | barrier(); |
4220 | cpuctx->recursion[rctx]--; | 4232 | cpuctx->recursion[rctx]--; |
4221 | } | 4233 | } |
4222 | EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); | ||
4223 | |||
4224 | 4234 | ||
4225 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 4235 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, |
4226 | struct pt_regs *regs, u64 addr) | 4236 | struct pt_regs *regs, u64 addr) |
@@ -4368,8 +4378,8 @@ static void cpu_clock_perf_event_update(struct perf_event *event) | |||
4368 | u64 now; | 4378 | u64 now; |
4369 | 4379 | ||
4370 | now = cpu_clock(cpu); | 4380 | now = cpu_clock(cpu); |
4371 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4381 | prev = local64_xchg(&event->hw.prev_count, now); |
4372 | atomic64_add(now - prev, &event->count); | 4382 | local64_add(now - prev, &event->count); |
4373 | } | 4383 | } |
4374 | 4384 | ||
4375 | static int cpu_clock_perf_event_enable(struct perf_event *event) | 4385 | static int cpu_clock_perf_event_enable(struct perf_event *event) |
@@ -4377,7 +4387,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
4377 | struct hw_perf_event *hwc = &event->hw; | 4387 | struct hw_perf_event *hwc = &event->hw; |
4378 | int cpu = raw_smp_processor_id(); | 4388 | int cpu = raw_smp_processor_id(); |
4379 | 4389 | ||
4380 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4390 | local64_set(&hwc->prev_count, cpu_clock(cpu)); |
4381 | perf_swevent_start_hrtimer(event); | 4391 | perf_swevent_start_hrtimer(event); |
4382 | 4392 | ||
4383 | return 0; | 4393 | return 0; |
@@ -4409,9 +4419,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now) | |||
4409 | u64 prev; | 4419 | u64 prev; |
4410 | s64 delta; | 4420 | s64 delta; |
4411 | 4421 | ||
4412 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4422 | prev = local64_xchg(&event->hw.prev_count, now); |
4413 | delta = now - prev; | 4423 | delta = now - prev; |
4414 | atomic64_add(delta, &event->count); | 4424 | local64_add(delta, &event->count); |
4415 | } | 4425 | } |
4416 | 4426 | ||
4417 | static int task_clock_perf_event_enable(struct perf_event *event) | 4427 | static int task_clock_perf_event_enable(struct perf_event *event) |
@@ -4421,7 +4431,7 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4421 | 4431 | ||
4422 | now = event->ctx->time; | 4432 | now = event->ctx->time; |
4423 | 4433 | ||
4424 | atomic64_set(&hwc->prev_count, now); | 4434 | local64_set(&hwc->prev_count, now); |
4425 | 4435 | ||
4426 | perf_swevent_start_hrtimer(event); | 4436 | perf_swevent_start_hrtimer(event); |
4427 | 4437 | ||
@@ -4601,7 +4611,7 @@ static int perf_tp_event_match(struct perf_event *event, | |||
4601 | } | 4611 | } |
4602 | 4612 | ||
4603 | void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | 4613 | void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, |
4604 | struct pt_regs *regs, struct hlist_head *head) | 4614 | struct pt_regs *regs, struct hlist_head *head, int rctx) |
4605 | { | 4615 | { |
4606 | struct perf_sample_data data; | 4616 | struct perf_sample_data data; |
4607 | struct perf_event *event; | 4617 | struct perf_event *event; |
@@ -4615,12 +4625,12 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | |||
4615 | perf_sample_data_init(&data, addr); | 4625 | perf_sample_data_init(&data, addr); |
4616 | data.raw = &raw; | 4626 | data.raw = &raw; |
4617 | 4627 | ||
4618 | rcu_read_lock(); | ||
4619 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 4628 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
4620 | if (perf_tp_event_match(event, &data, regs)) | 4629 | if (perf_tp_event_match(event, &data, regs)) |
4621 | perf_swevent_add(event, count, 1, &data, regs); | 4630 | perf_swevent_add(event, count, 1, &data, regs); |
4622 | } | 4631 | } |
4623 | rcu_read_unlock(); | 4632 | |
4633 | perf_swevent_put_recursion_context(rctx); | ||
4624 | } | 4634 | } |
4625 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4635 | EXPORT_SYMBOL_GPL(perf_tp_event); |
4626 | 4636 | ||
@@ -4864,7 +4874,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4864 | hwc->sample_period = 1; | 4874 | hwc->sample_period = 1; |
4865 | hwc->last_period = hwc->sample_period; | 4875 | hwc->last_period = hwc->sample_period; |
4866 | 4876 | ||
4867 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4877 | local64_set(&hwc->period_left, hwc->sample_period); |
4868 | 4878 | ||
4869 | /* | 4879 | /* |
4870 | * we currently do not support PERF_FORMAT_GROUP on inherited events | 4880 | * we currently do not support PERF_FORMAT_GROUP on inherited events |
@@ -4913,7 +4923,7 @@ done: | |||
4913 | 4923 | ||
4914 | if (!event->parent) { | 4924 | if (!event->parent) { |
4915 | atomic_inc(&nr_events); | 4925 | atomic_inc(&nr_events); |
4916 | if (event->attr.mmap) | 4926 | if (event->attr.mmap || event->attr.mmap_data) |
4917 | atomic_inc(&nr_mmap_events); | 4927 | atomic_inc(&nr_mmap_events); |
4918 | if (event->attr.comm) | 4928 | if (event->attr.comm) |
4919 | atomic_inc(&nr_comm_events); | 4929 | atomic_inc(&nr_comm_events); |
@@ -5007,7 +5017,7 @@ err_size: | |||
5007 | static int | 5017 | static int |
5008 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) | 5018 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
5009 | { | 5019 | { |
5010 | struct perf_mmap_data *data = NULL, *old_data = NULL; | 5020 | struct perf_buffer *buffer = NULL, *old_buffer = NULL; |
5011 | int ret = -EINVAL; | 5021 | int ret = -EINVAL; |
5012 | 5022 | ||
5013 | if (!output_event) | 5023 | if (!output_event) |
@@ -5037,19 +5047,19 @@ set: | |||
5037 | 5047 | ||
5038 | if (output_event) { | 5048 | if (output_event) { |
5039 | /* get the buffer we want to redirect to */ | 5049 | /* get the buffer we want to redirect to */ |
5040 | data = perf_mmap_data_get(output_event); | 5050 | buffer = perf_buffer_get(output_event); |
5041 | if (!data) | 5051 | if (!buffer) |
5042 | goto unlock; | 5052 | goto unlock; |
5043 | } | 5053 | } |
5044 | 5054 | ||
5045 | old_data = event->data; | 5055 | old_buffer = event->buffer; |
5046 | rcu_assign_pointer(event->data, data); | 5056 | rcu_assign_pointer(event->buffer, buffer); |
5047 | ret = 0; | 5057 | ret = 0; |
5048 | unlock: | 5058 | unlock: |
5049 | mutex_unlock(&event->mmap_mutex); | 5059 | mutex_unlock(&event->mmap_mutex); |
5050 | 5060 | ||
5051 | if (old_data) | 5061 | if (old_buffer) |
5052 | perf_mmap_data_put(old_data); | 5062 | perf_buffer_put(old_buffer); |
5053 | out: | 5063 | out: |
5054 | return ret; | 5064 | return ret; |
5055 | } | 5065 | } |
@@ -5298,7 +5308,7 @@ inherit_event(struct perf_event *parent_event, | |||
5298 | hwc->sample_period = sample_period; | 5308 | hwc->sample_period = sample_period; |
5299 | hwc->last_period = sample_period; | 5309 | hwc->last_period = sample_period; |
5300 | 5310 | ||
5301 | atomic64_set(&hwc->period_left, sample_period); | 5311 | local64_set(&hwc->period_left, sample_period); |
5302 | } | 5312 | } |
5303 | 5313 | ||
5304 | child_event->overflow_handler = parent_event->overflow_handler; | 5314 | child_event->overflow_handler = parent_event->overflow_handler; |
@@ -5359,12 +5369,12 @@ static void sync_child_event(struct perf_event *child_event, | |||
5359 | if (child_event->attr.inherit_stat) | 5369 | if (child_event->attr.inherit_stat) |
5360 | perf_event_read_event(child_event, child); | 5370 | perf_event_read_event(child_event, child); |
5361 | 5371 | ||
5362 | child_val = atomic64_read(&child_event->count); | 5372 | child_val = perf_event_count(child_event); |
5363 | 5373 | ||
5364 | /* | 5374 | /* |
5365 | * Add back the child's count to the parent's count: | 5375 | * Add back the child's count to the parent's count: |
5366 | */ | 5376 | */ |
5367 | atomic64_add(child_val, &parent_event->count); | 5377 | atomic64_add(child_val, &parent_event->child_count); |
5368 | atomic64_add(child_event->total_time_enabled, | 5378 | atomic64_add(child_event->total_time_enabled, |
5369 | &parent_event->child_total_time_enabled); | 5379 | &parent_event->child_total_time_enabled); |
5370 | atomic64_add(child_event->total_time_running, | 5380 | atomic64_add(child_event->total_time_running, |