diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 11:02:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 11:02:58 -0500 |
commit | 35b740e4662ef386f0c60e1b60aaf5b44db9914c (patch) | |
tree | 502a8f9499bc1b4cb3300d666dab2d01a1921224 /kernel/events/core.c | |
parent | 423d091dfe58d3109d84c408810a7cfa82f6f184 (diff) | |
parent | 9e183426bfb52bb44bf3c443d6587e4d02478603 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (106 commits)
perf kvm: Fix copy & paste error in description
perf script: Kill script_spec__delete
perf top: Fix a memory leak
perf stat: Introduce get_ratio_color() helper
perf session: Remove impossible condition check
perf tools: Fix feature-bits rework fallout, remove unused variable
perf script: Add generic perl handler to process events
perf tools: Use for_each_set_bit() to iterate over feature flags
perf tools: Unify handling of features when writing feature section
perf report: Accept fifos as input file
perf tools: Moving code in some files
perf tools: Fix out-of-bound access to struct perf_session
perf tools: Continue processing header on unknown features
perf tools: Improve macros for struct feature_ops
perf: builtin-record: Document and check that mmap_pages must be a power of two.
perf: builtin-record: Provide advice if mmap'ing fails with EPERM.
perf tools: Fix truncated annotation
perf script: look up thread using tid instead of pid
perf tools: Look up thread names for system wide profiling
perf tools: Fix comm for processes with named threads
...
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r-- | kernel/events/core.c | 296 |
1 files changed, 53 insertions, 243 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index fc0e7ff11dda..890eb02c2f21 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -128,7 +128,7 @@ enum event_type_t { | |||
128 | * perf_sched_events : >0 events exist | 128 | * perf_sched_events : >0 events exist |
129 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 129 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
130 | */ | 130 | */ |
131 | struct jump_label_key perf_sched_events __read_mostly; | 131 | struct jump_label_key_deferred perf_sched_events __read_mostly; |
132 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 132 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
133 | 133 | ||
134 | static atomic_t nr_mmap_events __read_mostly; | 134 | static atomic_t nr_mmap_events __read_mostly; |
@@ -1130,6 +1130,8 @@ event_sched_out(struct perf_event *event, | |||
1130 | if (!is_software_event(event)) | 1130 | if (!is_software_event(event)) |
1131 | cpuctx->active_oncpu--; | 1131 | cpuctx->active_oncpu--; |
1132 | ctx->nr_active--; | 1132 | ctx->nr_active--; |
1133 | if (event->attr.freq && event->attr.sample_freq) | ||
1134 | ctx->nr_freq--; | ||
1133 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1135 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
1134 | cpuctx->exclusive = 0; | 1136 | cpuctx->exclusive = 0; |
1135 | } | 1137 | } |
@@ -1325,6 +1327,7 @@ retry: | |||
1325 | } | 1327 | } |
1326 | raw_spin_unlock_irq(&ctx->lock); | 1328 | raw_spin_unlock_irq(&ctx->lock); |
1327 | } | 1329 | } |
1330 | EXPORT_SYMBOL_GPL(perf_event_disable); | ||
1328 | 1331 | ||
1329 | static void perf_set_shadow_time(struct perf_event *event, | 1332 | static void perf_set_shadow_time(struct perf_event *event, |
1330 | struct perf_event_context *ctx, | 1333 | struct perf_event_context *ctx, |
@@ -1406,6 +1409,8 @@ event_sched_in(struct perf_event *event, | |||
1406 | if (!is_software_event(event)) | 1409 | if (!is_software_event(event)) |
1407 | cpuctx->active_oncpu++; | 1410 | cpuctx->active_oncpu++; |
1408 | ctx->nr_active++; | 1411 | ctx->nr_active++; |
1412 | if (event->attr.freq && event->attr.sample_freq) | ||
1413 | ctx->nr_freq++; | ||
1409 | 1414 | ||
1410 | if (event->attr.exclusive) | 1415 | if (event->attr.exclusive) |
1411 | cpuctx->exclusive = 1; | 1416 | cpuctx->exclusive = 1; |
@@ -1662,8 +1667,7 @@ retry: | |||
1662 | * Note: this works for group members as well as group leaders | 1667 | * Note: this works for group members as well as group leaders |
1663 | * since the non-leader members' sibling_lists will be empty. | 1668 | * since the non-leader members' sibling_lists will be empty. |
1664 | */ | 1669 | */ |
1665 | static void __perf_event_mark_enabled(struct perf_event *event, | 1670 | static void __perf_event_mark_enabled(struct perf_event *event) |
1666 | struct perf_event_context *ctx) | ||
1667 | { | 1671 | { |
1668 | struct perf_event *sub; | 1672 | struct perf_event *sub; |
1669 | u64 tstamp = perf_event_time(event); | 1673 | u64 tstamp = perf_event_time(event); |
@@ -1701,7 +1705,7 @@ static int __perf_event_enable(void *info) | |||
1701 | */ | 1705 | */ |
1702 | perf_cgroup_set_timestamp(current, ctx); | 1706 | perf_cgroup_set_timestamp(current, ctx); |
1703 | 1707 | ||
1704 | __perf_event_mark_enabled(event, ctx); | 1708 | __perf_event_mark_enabled(event); |
1705 | 1709 | ||
1706 | if (!event_filter_match(event)) { | 1710 | if (!event_filter_match(event)) { |
1707 | if (is_cgroup_event(event)) | 1711 | if (is_cgroup_event(event)) |
@@ -1782,7 +1786,7 @@ void perf_event_enable(struct perf_event *event) | |||
1782 | 1786 | ||
1783 | retry: | 1787 | retry: |
1784 | if (!ctx->is_active) { | 1788 | if (!ctx->is_active) { |
1785 | __perf_event_mark_enabled(event, ctx); | 1789 | __perf_event_mark_enabled(event); |
1786 | goto out; | 1790 | goto out; |
1787 | } | 1791 | } |
1788 | 1792 | ||
@@ -1809,6 +1813,7 @@ retry: | |||
1809 | out: | 1813 | out: |
1810 | raw_spin_unlock_irq(&ctx->lock); | 1814 | raw_spin_unlock_irq(&ctx->lock); |
1811 | } | 1815 | } |
1816 | EXPORT_SYMBOL_GPL(perf_event_enable); | ||
1812 | 1817 | ||
1813 | int perf_event_refresh(struct perf_event *event, int refresh) | 1818 | int perf_event_refresh(struct perf_event *event, int refresh) |
1814 | { | 1819 | { |
@@ -2327,6 +2332,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) | |||
2327 | u64 interrupts, now; | 2332 | u64 interrupts, now; |
2328 | s64 delta; | 2333 | s64 delta; |
2329 | 2334 | ||
2335 | if (!ctx->nr_freq) | ||
2336 | return; | ||
2337 | |||
2330 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2338 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
2331 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2339 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
2332 | continue; | 2340 | continue; |
@@ -2382,12 +2390,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) | |||
2382 | { | 2390 | { |
2383 | u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; | 2391 | u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; |
2384 | struct perf_event_context *ctx = NULL; | 2392 | struct perf_event_context *ctx = NULL; |
2385 | int rotate = 0, remove = 1; | 2393 | int rotate = 0, remove = 1, freq = 0; |
2386 | 2394 | ||
2387 | if (cpuctx->ctx.nr_events) { | 2395 | if (cpuctx->ctx.nr_events) { |
2388 | remove = 0; | 2396 | remove = 0; |
2389 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | 2397 | if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) |
2390 | rotate = 1; | 2398 | rotate = 1; |
2399 | if (cpuctx->ctx.nr_freq) | ||
2400 | freq = 1; | ||
2391 | } | 2401 | } |
2392 | 2402 | ||
2393 | ctx = cpuctx->task_ctx; | 2403 | ctx = cpuctx->task_ctx; |
@@ -2395,33 +2405,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) | |||
2395 | remove = 0; | 2405 | remove = 0; |
2396 | if (ctx->nr_events != ctx->nr_active) | 2406 | if (ctx->nr_events != ctx->nr_active) |
2397 | rotate = 1; | 2407 | rotate = 1; |
2408 | if (ctx->nr_freq) | ||
2409 | freq = 1; | ||
2398 | } | 2410 | } |
2399 | 2411 | ||
2412 | if (!rotate && !freq) | ||
2413 | goto done; | ||
2414 | |||
2400 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); | 2415 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
2401 | perf_pmu_disable(cpuctx->ctx.pmu); | 2416 | perf_pmu_disable(cpuctx->ctx.pmu); |
2402 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); | ||
2403 | if (ctx) | ||
2404 | perf_ctx_adjust_freq(ctx, interval); | ||
2405 | 2417 | ||
2406 | if (!rotate) | 2418 | if (freq) { |
2407 | goto done; | 2419 | perf_ctx_adjust_freq(&cpuctx->ctx, interval); |
2420 | if (ctx) | ||
2421 | perf_ctx_adjust_freq(ctx, interval); | ||
2422 | } | ||
2408 | 2423 | ||
2409 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 2424 | if (rotate) { |
2410 | if (ctx) | 2425 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
2411 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); | 2426 | if (ctx) |
2427 | ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); | ||
2412 | 2428 | ||
2413 | rotate_ctx(&cpuctx->ctx); | 2429 | rotate_ctx(&cpuctx->ctx); |
2414 | if (ctx) | 2430 | if (ctx) |
2415 | rotate_ctx(ctx); | 2431 | rotate_ctx(ctx); |
2416 | 2432 | ||
2417 | perf_event_sched_in(cpuctx, ctx, current); | 2433 | perf_event_sched_in(cpuctx, ctx, current); |
2434 | } | ||
2435 | |||
2436 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
2437 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); | ||
2418 | 2438 | ||
2419 | done: | 2439 | done: |
2420 | if (remove) | 2440 | if (remove) |
2421 | list_del_init(&cpuctx->rotation_list); | 2441 | list_del_init(&cpuctx->rotation_list); |
2422 | |||
2423 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
2424 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); | ||
2425 | } | 2442 | } |
2426 | 2443 | ||
2427 | void perf_event_task_tick(void) | 2444 | void perf_event_task_tick(void) |
@@ -2448,7 +2465,7 @@ static int event_enable_on_exec(struct perf_event *event, | |||
2448 | if (event->state >= PERF_EVENT_STATE_INACTIVE) | 2465 | if (event->state >= PERF_EVENT_STATE_INACTIVE) |
2449 | return 0; | 2466 | return 0; |
2450 | 2467 | ||
2451 | __perf_event_mark_enabled(event, ctx); | 2468 | __perf_event_mark_enabled(event); |
2452 | 2469 | ||
2453 | return 1; | 2470 | return 1; |
2454 | } | 2471 | } |
@@ -2480,13 +2497,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) | |||
2480 | raw_spin_lock(&ctx->lock); | 2497 | raw_spin_lock(&ctx->lock); |
2481 | task_ctx_sched_out(ctx); | 2498 | task_ctx_sched_out(ctx); |
2482 | 2499 | ||
2483 | list_for_each_entry(event, &ctx->pinned_groups, group_entry) { | 2500 | list_for_each_entry(event, &ctx->event_list, event_entry) { |
2484 | ret = event_enable_on_exec(event, ctx); | ||
2485 | if (ret) | ||
2486 | enabled = 1; | ||
2487 | } | ||
2488 | |||
2489 | list_for_each_entry(event, &ctx->flexible_groups, group_entry) { | ||
2490 | ret = event_enable_on_exec(event, ctx); | 2501 | ret = event_enable_on_exec(event, ctx); |
2491 | if (ret) | 2502 | if (ret) |
2492 | enabled = 1; | 2503 | enabled = 1; |
@@ -2574,215 +2585,6 @@ static u64 perf_event_read(struct perf_event *event) | |||
2574 | } | 2585 | } |
2575 | 2586 | ||
2576 | /* | 2587 | /* |
2577 | * Callchain support | ||
2578 | */ | ||
2579 | |||
2580 | struct callchain_cpus_entries { | ||
2581 | struct rcu_head rcu_head; | ||
2582 | struct perf_callchain_entry *cpu_entries[0]; | ||
2583 | }; | ||
2584 | |||
2585 | static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); | ||
2586 | static atomic_t nr_callchain_events; | ||
2587 | static DEFINE_MUTEX(callchain_mutex); | ||
2588 | struct callchain_cpus_entries *callchain_cpus_entries; | ||
2589 | |||
2590 | |||
2591 | __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
2592 | struct pt_regs *regs) | ||
2593 | { | ||
2594 | } | ||
2595 | |||
2596 | __weak void perf_callchain_user(struct perf_callchain_entry *entry, | ||
2597 | struct pt_regs *regs) | ||
2598 | { | ||
2599 | } | ||
2600 | |||
2601 | static void release_callchain_buffers_rcu(struct rcu_head *head) | ||
2602 | { | ||
2603 | struct callchain_cpus_entries *entries; | ||
2604 | int cpu; | ||
2605 | |||
2606 | entries = container_of(head, struct callchain_cpus_entries, rcu_head); | ||
2607 | |||
2608 | for_each_possible_cpu(cpu) | ||
2609 | kfree(entries->cpu_entries[cpu]); | ||
2610 | |||
2611 | kfree(entries); | ||
2612 | } | ||
2613 | |||
2614 | static void release_callchain_buffers(void) | ||
2615 | { | ||
2616 | struct callchain_cpus_entries *entries; | ||
2617 | |||
2618 | entries = callchain_cpus_entries; | ||
2619 | rcu_assign_pointer(callchain_cpus_entries, NULL); | ||
2620 | call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); | ||
2621 | } | ||
2622 | |||
2623 | static int alloc_callchain_buffers(void) | ||
2624 | { | ||
2625 | int cpu; | ||
2626 | int size; | ||
2627 | struct callchain_cpus_entries *entries; | ||
2628 | |||
2629 | /* | ||
2630 | * We can't use the percpu allocation API for data that can be | ||
2631 | * accessed from NMI. Use a temporary manual per cpu allocation | ||
2632 | * until that gets sorted out. | ||
2633 | */ | ||
2634 | size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); | ||
2635 | |||
2636 | entries = kzalloc(size, GFP_KERNEL); | ||
2637 | if (!entries) | ||
2638 | return -ENOMEM; | ||
2639 | |||
2640 | size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; | ||
2641 | |||
2642 | for_each_possible_cpu(cpu) { | ||
2643 | entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, | ||
2644 | cpu_to_node(cpu)); | ||
2645 | if (!entries->cpu_entries[cpu]) | ||
2646 | goto fail; | ||
2647 | } | ||
2648 | |||
2649 | rcu_assign_pointer(callchain_cpus_entries, entries); | ||
2650 | |||
2651 | return 0; | ||
2652 | |||
2653 | fail: | ||
2654 | for_each_possible_cpu(cpu) | ||
2655 | kfree(entries->cpu_entries[cpu]); | ||
2656 | kfree(entries); | ||
2657 | |||
2658 | return -ENOMEM; | ||
2659 | } | ||
2660 | |||
2661 | static int get_callchain_buffers(void) | ||
2662 | { | ||
2663 | int err = 0; | ||
2664 | int count; | ||
2665 | |||
2666 | mutex_lock(&callchain_mutex); | ||
2667 | |||
2668 | count = atomic_inc_return(&nr_callchain_events); | ||
2669 | if (WARN_ON_ONCE(count < 1)) { | ||
2670 | err = -EINVAL; | ||
2671 | goto exit; | ||
2672 | } | ||
2673 | |||
2674 | if (count > 1) { | ||
2675 | /* If the allocation failed, give up */ | ||
2676 | if (!callchain_cpus_entries) | ||
2677 | err = -ENOMEM; | ||
2678 | goto exit; | ||
2679 | } | ||
2680 | |||
2681 | err = alloc_callchain_buffers(); | ||
2682 | if (err) | ||
2683 | release_callchain_buffers(); | ||
2684 | exit: | ||
2685 | mutex_unlock(&callchain_mutex); | ||
2686 | |||
2687 | return err; | ||
2688 | } | ||
2689 | |||
2690 | static void put_callchain_buffers(void) | ||
2691 | { | ||
2692 | if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { | ||
2693 | release_callchain_buffers(); | ||
2694 | mutex_unlock(&callchain_mutex); | ||
2695 | } | ||
2696 | } | ||
2697 | |||
2698 | static int get_recursion_context(int *recursion) | ||
2699 | { | ||
2700 | int rctx; | ||
2701 | |||
2702 | if (in_nmi()) | ||
2703 | rctx = 3; | ||
2704 | else if (in_irq()) | ||
2705 | rctx = 2; | ||
2706 | else if (in_softirq()) | ||
2707 | rctx = 1; | ||
2708 | else | ||
2709 | rctx = 0; | ||
2710 | |||
2711 | if (recursion[rctx]) | ||
2712 | return -1; | ||
2713 | |||
2714 | recursion[rctx]++; | ||
2715 | barrier(); | ||
2716 | |||
2717 | return rctx; | ||
2718 | } | ||
2719 | |||
2720 | static inline void put_recursion_context(int *recursion, int rctx) | ||
2721 | { | ||
2722 | barrier(); | ||
2723 | recursion[rctx]--; | ||
2724 | } | ||
2725 | |||
2726 | static struct perf_callchain_entry *get_callchain_entry(int *rctx) | ||
2727 | { | ||
2728 | int cpu; | ||
2729 | struct callchain_cpus_entries *entries; | ||
2730 | |||
2731 | *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); | ||
2732 | if (*rctx == -1) | ||
2733 | return NULL; | ||
2734 | |||
2735 | entries = rcu_dereference(callchain_cpus_entries); | ||
2736 | if (!entries) | ||
2737 | return NULL; | ||
2738 | |||
2739 | cpu = smp_processor_id(); | ||
2740 | |||
2741 | return &entries->cpu_entries[cpu][*rctx]; | ||
2742 | } | ||
2743 | |||
2744 | static void | ||
2745 | put_callchain_entry(int rctx) | ||
2746 | { | ||
2747 | put_recursion_context(__get_cpu_var(callchain_recursion), rctx); | ||
2748 | } | ||
2749 | |||
2750 | static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
2751 | { | ||
2752 | int rctx; | ||
2753 | struct perf_callchain_entry *entry; | ||
2754 | |||
2755 | |||
2756 | entry = get_callchain_entry(&rctx); | ||
2757 | if (rctx == -1) | ||
2758 | return NULL; | ||
2759 | |||
2760 | if (!entry) | ||
2761 | goto exit_put; | ||
2762 | |||
2763 | entry->nr = 0; | ||
2764 | |||
2765 | if (!user_mode(regs)) { | ||
2766 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
2767 | perf_callchain_kernel(entry, regs); | ||
2768 | if (current->mm) | ||
2769 | regs = task_pt_regs(current); | ||
2770 | else | ||
2771 | regs = NULL; | ||
2772 | } | ||
2773 | |||
2774 | if (regs) { | ||
2775 | perf_callchain_store(entry, PERF_CONTEXT_USER); | ||
2776 | perf_callchain_user(entry, regs); | ||
2777 | } | ||
2778 | |||
2779 | exit_put: | ||
2780 | put_callchain_entry(rctx); | ||
2781 | |||
2782 | return entry; | ||
2783 | } | ||
2784 | |||
2785 | /* | ||
2786 | * Initialize the perf_event context in a task_struct: | 2588 | * Initialize the perf_event context in a task_struct: |
2787 | */ | 2589 | */ |
2788 | static void __perf_event_init_context(struct perf_event_context *ctx) | 2590 | static void __perf_event_init_context(struct perf_event_context *ctx) |
@@ -2946,7 +2748,7 @@ static void free_event(struct perf_event *event) | |||
2946 | 2748 | ||
2947 | if (!event->parent) { | 2749 | if (!event->parent) { |
2948 | if (event->attach_state & PERF_ATTACH_TASK) | 2750 | if (event->attach_state & PERF_ATTACH_TASK) |
2949 | jump_label_dec(&perf_sched_events); | 2751 | jump_label_dec_deferred(&perf_sched_events); |
2950 | if (event->attr.mmap || event->attr.mmap_data) | 2752 | if (event->attr.mmap || event->attr.mmap_data) |
2951 | atomic_dec(&nr_mmap_events); | 2753 | atomic_dec(&nr_mmap_events); |
2952 | if (event->attr.comm) | 2754 | if (event->attr.comm) |
@@ -2957,7 +2759,7 @@ static void free_event(struct perf_event *event) | |||
2957 | put_callchain_buffers(); | 2759 | put_callchain_buffers(); |
2958 | if (is_cgroup_event(event)) { | 2760 | if (is_cgroup_event(event)) { |
2959 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); | 2761 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); |
2960 | jump_label_dec(&perf_sched_events); | 2762 | jump_label_dec_deferred(&perf_sched_events); |
2961 | } | 2763 | } |
2962 | } | 2764 | } |
2963 | 2765 | ||
@@ -4820,7 +4622,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, | |||
4820 | struct hw_perf_event *hwc = &event->hw; | 4622 | struct hw_perf_event *hwc = &event->hw; |
4821 | int throttle = 0; | 4623 | int throttle = 0; |
4822 | 4624 | ||
4823 | data->period = event->hw.last_period; | ||
4824 | if (!overflow) | 4625 | if (!overflow) |
4825 | overflow = perf_swevent_set_period(event); | 4626 | overflow = perf_swevent_set_period(event); |
4826 | 4627 | ||
@@ -4854,6 +4655,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr, | |||
4854 | if (!is_sampling_event(event)) | 4655 | if (!is_sampling_event(event)) |
4855 | return; | 4656 | return; |
4856 | 4657 | ||
4658 | if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { | ||
4659 | data->period = nr; | ||
4660 | return perf_swevent_overflow(event, 1, data, regs); | ||
4661 | } else | ||
4662 | data->period = event->hw.last_period; | ||
4663 | |||
4857 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4664 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
4858 | return perf_swevent_overflow(event, 1, data, regs); | 4665 | return perf_swevent_overflow(event, 1, data, regs); |
4859 | 4666 | ||
@@ -5981,7 +5788,7 @@ done: | |||
5981 | 5788 | ||
5982 | if (!event->parent) { | 5789 | if (!event->parent) { |
5983 | if (event->attach_state & PERF_ATTACH_TASK) | 5790 | if (event->attach_state & PERF_ATTACH_TASK) |
5984 | jump_label_inc(&perf_sched_events); | 5791 | jump_label_inc(&perf_sched_events.key); |
5985 | if (event->attr.mmap || event->attr.mmap_data) | 5792 | if (event->attr.mmap || event->attr.mmap_data) |
5986 | atomic_inc(&nr_mmap_events); | 5793 | atomic_inc(&nr_mmap_events); |
5987 | if (event->attr.comm) | 5794 | if (event->attr.comm) |
@@ -6219,7 +6026,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6219 | * - that may need work on context switch | 6026 | * - that may need work on context switch |
6220 | */ | 6027 | */ |
6221 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | 6028 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); |
6222 | jump_label_inc(&perf_sched_events); | 6029 | jump_label_inc(&perf_sched_events.key); |
6223 | } | 6030 | } |
6224 | 6031 | ||
6225 | /* | 6032 | /* |
@@ -7065,6 +6872,9 @@ void __init perf_event_init(void) | |||
7065 | 6872 | ||
7066 | ret = init_hw_breakpoint(); | 6873 | ret = init_hw_breakpoint(); |
7067 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | 6874 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); |
6875 | |||
6876 | /* do not patch jump label more than once per second */ | ||
6877 | jump_label_rate_limit(&perf_sched_events, HZ); | ||
7068 | } | 6878 | } |
7069 | 6879 | ||
7070 | static int __init perf_event_sysfs_init(void) | 6880 | static int __init perf_event_sysfs_init(void) |