diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 59 |
1 files changed, 48 insertions, 11 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e928e1af7b71..43c1dfb7b386 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2612,11 +2612,26 @@ static void perf_pending_event(struct perf_pending_entry *entry) | |||
2612 | __perf_event_disable(event); | 2612 | __perf_event_disable(event); |
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | #ifndef CONFIG_PREEMPT_RT | ||
2615 | if (event->pending_wakeup) { | 2616 | if (event->pending_wakeup) { |
2616 | event->pending_wakeup = 0; | 2617 | event->pending_wakeup = 0; |
2617 | perf_event_wakeup(event); | 2618 | perf_event_wakeup(event); |
2618 | } | 2619 | } |
2620 | #endif | ||
2621 | } | ||
2622 | |||
2623 | #ifdef CONFIG_PREEMPT_RT | ||
2624 | static void perf_pending_counter_softirq(struct perf_pending_entry *entry) | ||
2625 | { | ||
2626 | struct perf_event *counter = container_of(entry, | ||
2627 | struct perf_event, pending_softirq); | ||
2628 | |||
2629 | if (counter->pending_wakeup) { | ||
2630 | counter->pending_wakeup = 0; | ||
2631 | perf_event_wakeup(counter); | ||
2632 | } | ||
2619 | } | 2633 | } |
2634 | #endif | ||
2620 | 2635 | ||
2621 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | 2636 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) |
2622 | 2637 | ||
@@ -2624,33 +2639,42 @@ static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | |||
2624 | PENDING_TAIL, | 2639 | PENDING_TAIL, |
2625 | }; | 2640 | }; |
2626 | 2641 | ||
2627 | static void perf_pending_queue(struct perf_pending_entry *entry, | 2642 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_softirq_head) = { |
2628 | void (*func)(struct perf_pending_entry *)) | 2643 | PENDING_TAIL, |
2629 | { | 2644 | }; |
2630 | struct perf_pending_entry **head; | ||
2631 | 2645 | ||
2646 | static void __perf_pending_queue(struct perf_pending_entry **head, | ||
2647 | struct perf_pending_entry *entry, | ||
2648 | void (*func)(struct perf_pending_entry *)) | ||
2649 | { | ||
2632 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | 2650 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) |
2633 | return; | 2651 | return; |
2634 | 2652 | ||
2635 | entry->func = func; | 2653 | entry->func = func; |
2636 | 2654 | ||
2637 | head = &get_cpu_var(perf_pending_head); | ||
2638 | |||
2639 | do { | 2655 | do { |
2640 | entry->next = *head; | 2656 | entry->next = *head; |
2641 | } while (cmpxchg(head, entry->next, entry) != entry->next); | 2657 | } while (cmpxchg(head, entry->next, entry) != entry->next); |
2658 | } | ||
2642 | 2659 | ||
2643 | set_perf_event_pending(); | 2660 | static void perf_pending_queue(struct perf_pending_entry *entry, |
2661 | void (*func)(struct perf_pending_entry *)) | ||
2662 | { | ||
2663 | struct perf_pending_entry **head; | ||
2644 | 2664 | ||
2665 | head = &get_cpu_var(perf_pending_head); | ||
2666 | __perf_pending_queue(head, entry, func); | ||
2645 | put_cpu_var(perf_pending_head); | 2667 | put_cpu_var(perf_pending_head); |
2668 | |||
2669 | set_perf_event_pending(); | ||
2646 | } | 2670 | } |
2647 | 2671 | ||
2648 | static int __perf_pending_run(void) | 2672 | static int __perf_pending_run(struct perf_pending_entry **head) |
2649 | { | 2673 | { |
2650 | struct perf_pending_entry *list; | 2674 | struct perf_pending_entry *list; |
2651 | int nr = 0; | 2675 | int nr = 0; |
2652 | 2676 | ||
2653 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | 2677 | list = xchg(head, PENDING_TAIL); |
2654 | while (list != PENDING_TAIL) { | 2678 | while (list != PENDING_TAIL) { |
2655 | void (*func)(struct perf_pending_entry *); | 2679 | void (*func)(struct perf_pending_entry *); |
2656 | struct perf_pending_entry *entry = list; | 2680 | struct perf_pending_entry *entry = list; |
@@ -2680,7 +2704,8 @@ static inline int perf_not_pending(struct perf_event *event) | |||
2680 | * need to wait. | 2704 | * need to wait. |
2681 | */ | 2705 | */ |
2682 | get_cpu(); | 2706 | get_cpu(); |
2683 | __perf_pending_run(); | 2707 | __perf_pending_run(&__get_cpu_var(perf_pending_head)); |
2708 | __perf_pending_run(&__get_cpu_var(perf_pending_softirq_head)); | ||
2684 | put_cpu(); | 2709 | put_cpu(); |
2685 | 2710 | ||
2686 | /* | 2711 | /* |
@@ -2698,7 +2723,13 @@ static void perf_pending_sync(struct perf_event *event) | |||
2698 | 2723 | ||
2699 | void perf_event_do_pending(void) | 2724 | void perf_event_do_pending(void) |
2700 | { | 2725 | { |
2701 | __perf_pending_run(); | 2726 | __perf_pending_run(&__get_cpu_var(perf_pending_head)); |
2727 | } | ||
2728 | |||
2729 | void perf_event_do_pending_softirq(void) | ||
2730 | { | ||
2731 | __perf_pending_run(&__get_cpu_var(perf_pending_head)); | ||
2732 | __perf_pending_run(&__get_cpu_var(perf_pending_softirq_head)); | ||
2702 | } | 2733 | } |
2703 | 2734 | ||
2704 | /* | 2735 | /* |
@@ -2736,12 +2767,18 @@ static void perf_output_wakeup(struct perf_output_handle *handle) | |||
2736 | { | 2767 | { |
2737 | atomic_set(&handle->data->poll, POLL_IN); | 2768 | atomic_set(&handle->data->poll, POLL_IN); |
2738 | 2769 | ||
2770 | #ifndef CONFIG_PREEMPT_RT | ||
2739 | if (handle->nmi) { | 2771 | if (handle->nmi) { |
2740 | handle->event->pending_wakeup = 1; | 2772 | handle->event->pending_wakeup = 1; |
2741 | perf_pending_queue(&handle->event->pending, | 2773 | perf_pending_queue(&handle->event->pending, |
2742 | perf_pending_event); | 2774 | perf_pending_event); |
2743 | } else | 2775 | } else |
2744 | perf_event_wakeup(handle->event); | 2776 | perf_event_wakeup(handle->event); |
2777 | #else | ||
2778 | __perf_pending_queue(&__get_cpu_var(perf_pending_softirq_head), | ||
2779 | &handle->event->pending_softirq, | ||
2780 | perf_pending_counter_softirq); | ||
2781 | #endif | ||
2745 | } | 2782 | } |
2746 | 2783 | ||
2747 | /* | 2784 | /* |