diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 516 |
1 files changed, 311 insertions, 205 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a641eb753b8c..b0b20a07f394 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1; | |||
42 | static atomic_t nr_counters __read_mostly; | 42 | static atomic_t nr_counters __read_mostly; |
43 | static atomic_t nr_mmap_counters __read_mostly; | 43 | static atomic_t nr_mmap_counters __read_mostly; |
44 | static atomic_t nr_comm_counters __read_mostly; | 44 | static atomic_t nr_comm_counters __read_mostly; |
45 | static atomic_t nr_task_counters __read_mostly; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * perf counter paranoia level: | 48 | * perf counter paranoia level: |
@@ -146,6 +147,28 @@ static void put_ctx(struct perf_counter_context *ctx) | |||
146 | } | 147 | } |
147 | } | 148 | } |
148 | 149 | ||
150 | static void unclone_ctx(struct perf_counter_context *ctx) | ||
151 | { | ||
152 | if (ctx->parent_ctx) { | ||
153 | put_ctx(ctx->parent_ctx); | ||
154 | ctx->parent_ctx = NULL; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * If we inherit counters we want to return the parent counter id | ||
160 | * to userspace. | ||
161 | */ | ||
162 | static u64 primary_counter_id(struct perf_counter *counter) | ||
163 | { | ||
164 | u64 id = counter->id; | ||
165 | |||
166 | if (counter->parent) | ||
167 | id = counter->parent->id; | ||
168 | |||
169 | return id; | ||
170 | } | ||
171 | |||
149 | /* | 172 | /* |
150 | * Get the perf_counter_context for a task and lock it. | 173 | * Get the perf_counter_context for a task and lock it. |
151 | * This has to cope with with the fact that until it is locked, | 174 | * This has to cope with with the fact that until it is locked, |
@@ -1081,7 +1104,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx, | |||
1081 | __perf_counter_sync_stat(counter, next_counter); | 1104 | __perf_counter_sync_stat(counter, next_counter); |
1082 | 1105 | ||
1083 | counter = list_next_entry(counter, event_entry); | 1106 | counter = list_next_entry(counter, event_entry); |
1084 | next_counter = list_next_entry(counter, event_entry); | 1107 | next_counter = list_next_entry(next_counter, event_entry); |
1085 | } | 1108 | } |
1086 | } | 1109 | } |
1087 | 1110 | ||
@@ -1288,7 +1311,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1288 | #define MAX_INTERRUPTS (~0ULL) | 1311 | #define MAX_INTERRUPTS (~0ULL) |
1289 | 1312 | ||
1290 | static void perf_log_throttle(struct perf_counter *counter, int enable); | 1313 | static void perf_log_throttle(struct perf_counter *counter, int enable); |
1291 | static void perf_log_period(struct perf_counter *counter, u64 period); | ||
1292 | 1314 | ||
1293 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | 1315 | static void perf_adjust_period(struct perf_counter *counter, u64 events) |
1294 | { | 1316 | { |
@@ -1307,8 +1329,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events) | |||
1307 | if (!sample_period) | 1329 | if (!sample_period) |
1308 | sample_period = 1; | 1330 | sample_period = 1; |
1309 | 1331 | ||
1310 | perf_log_period(counter, sample_period); | ||
1311 | |||
1312 | hwc->sample_period = sample_period; | 1332 | hwc->sample_period = sample_period; |
1313 | } | 1333 | } |
1314 | 1334 | ||
@@ -1463,10 +1483,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1463 | /* | 1483 | /* |
1464 | * Unclone this context if we enabled any counter. | 1484 | * Unclone this context if we enabled any counter. |
1465 | */ | 1485 | */ |
1466 | if (enabled && ctx->parent_ctx) { | 1486 | if (enabled) |
1467 | put_ctx(ctx->parent_ctx); | 1487 | unclone_ctx(ctx); |
1468 | ctx->parent_ctx = NULL; | ||
1469 | } | ||
1470 | 1488 | ||
1471 | spin_unlock(&ctx->lock); | 1489 | spin_unlock(&ctx->lock); |
1472 | 1490 | ||
@@ -1526,7 +1544,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, | |||
1526 | 1544 | ||
1527 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | 1545 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) |
1528 | { | 1546 | { |
1529 | struct perf_counter_context *parent_ctx; | ||
1530 | struct perf_counter_context *ctx; | 1547 | struct perf_counter_context *ctx; |
1531 | struct perf_cpu_context *cpuctx; | 1548 | struct perf_cpu_context *cpuctx; |
1532 | struct task_struct *task; | 1549 | struct task_struct *task; |
@@ -1586,11 +1603,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | |||
1586 | retry: | 1603 | retry: |
1587 | ctx = perf_lock_task_context(task, &flags); | 1604 | ctx = perf_lock_task_context(task, &flags); |
1588 | if (ctx) { | 1605 | if (ctx) { |
1589 | parent_ctx = ctx->parent_ctx; | 1606 | unclone_ctx(ctx); |
1590 | if (parent_ctx) { | ||
1591 | put_ctx(parent_ctx); | ||
1592 | ctx->parent_ctx = NULL; /* no longer a clone */ | ||
1593 | } | ||
1594 | spin_unlock_irqrestore(&ctx->lock, flags); | 1607 | spin_unlock_irqrestore(&ctx->lock, flags); |
1595 | } | 1608 | } |
1596 | 1609 | ||
@@ -1642,6 +1655,8 @@ static void free_counter(struct perf_counter *counter) | |||
1642 | atomic_dec(&nr_mmap_counters); | 1655 | atomic_dec(&nr_mmap_counters); |
1643 | if (counter->attr.comm) | 1656 | if (counter->attr.comm) |
1644 | atomic_dec(&nr_comm_counters); | 1657 | atomic_dec(&nr_comm_counters); |
1658 | if (counter->attr.task) | ||
1659 | atomic_dec(&nr_task_counters); | ||
1645 | } | 1660 | } |
1646 | 1661 | ||
1647 | if (counter->destroy) | 1662 | if (counter->destroy) |
@@ -1676,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1676 | return 0; | 1691 | return 0; |
1677 | } | 1692 | } |
1678 | 1693 | ||
1694 | static u64 perf_counter_read_tree(struct perf_counter *counter) | ||
1695 | { | ||
1696 | struct perf_counter *child; | ||
1697 | u64 total = 0; | ||
1698 | |||
1699 | total += perf_counter_read(counter); | ||
1700 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1701 | total += perf_counter_read(child); | ||
1702 | |||
1703 | return total; | ||
1704 | } | ||
1705 | |||
1679 | /* | 1706 | /* |
1680 | * Read the performance counter - simple non blocking version for now | 1707 | * Read the performance counter - simple non blocking version for now |
1681 | */ | 1708 | */ |
@@ -1695,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1695 | 1722 | ||
1696 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1723 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
1697 | mutex_lock(&counter->child_mutex); | 1724 | mutex_lock(&counter->child_mutex); |
1698 | values[0] = perf_counter_read(counter); | 1725 | values[0] = perf_counter_read_tree(counter); |
1699 | n = 1; | 1726 | n = 1; |
1700 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1727 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
1701 | values[n++] = counter->total_time_enabled + | 1728 | values[n++] = counter->total_time_enabled + |
@@ -1704,7 +1731,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1704 | values[n++] = counter->total_time_running + | 1731 | values[n++] = counter->total_time_running + |
1705 | atomic64_read(&counter->child_total_time_running); | 1732 | atomic64_read(&counter->child_total_time_running); |
1706 | if (counter->attr.read_format & PERF_FORMAT_ID) | 1733 | if (counter->attr.read_format & PERF_FORMAT_ID) |
1707 | values[n++] = counter->id; | 1734 | values[n++] = primary_counter_id(counter); |
1708 | mutex_unlock(&counter->child_mutex); | 1735 | mutex_unlock(&counter->child_mutex); |
1709 | 1736 | ||
1710 | if (count < n * sizeof(u64)) | 1737 | if (count < n * sizeof(u64)) |
@@ -1811,8 +1838,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1811 | 1838 | ||
1812 | counter->attr.sample_freq = value; | 1839 | counter->attr.sample_freq = value; |
1813 | } else { | 1840 | } else { |
1814 | perf_log_period(counter, value); | ||
1815 | |||
1816 | counter->attr.sample_period = value; | 1841 | counter->attr.sample_period = value; |
1817 | counter->hw.sample_period = value; | 1842 | counter->hw.sample_period = value; |
1818 | } | 1843 | } |
@@ -2661,10 +2686,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2661 | if (sample_type & PERF_SAMPLE_ID) | 2686 | if (sample_type & PERF_SAMPLE_ID) |
2662 | header.size += sizeof(u64); | 2687 | header.size += sizeof(u64); |
2663 | 2688 | ||
2689 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2690 | header.size += sizeof(u64); | ||
2691 | |||
2664 | if (sample_type & PERF_SAMPLE_CPU) { | 2692 | if (sample_type & PERF_SAMPLE_CPU) { |
2665 | header.size += sizeof(cpu_entry); | 2693 | header.size += sizeof(cpu_entry); |
2666 | 2694 | ||
2667 | cpu_entry.cpu = raw_smp_processor_id(); | 2695 | cpu_entry.cpu = raw_smp_processor_id(); |
2696 | cpu_entry.reserved = 0; | ||
2668 | } | 2697 | } |
2669 | 2698 | ||
2670 | if (sample_type & PERF_SAMPLE_PERIOD) | 2699 | if (sample_type & PERF_SAMPLE_PERIOD) |
@@ -2685,6 +2714,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2685 | header.size += sizeof(u64); | 2714 | header.size += sizeof(u64); |
2686 | } | 2715 | } |
2687 | 2716 | ||
2717 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2718 | int size = sizeof(u32); | ||
2719 | |||
2720 | if (data->raw) | ||
2721 | size += data->raw->size; | ||
2722 | else | ||
2723 | size += sizeof(u32); | ||
2724 | |||
2725 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | ||
2726 | header.size += size; | ||
2727 | } | ||
2728 | |||
2688 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2729 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
2689 | if (ret) | 2730 | if (ret) |
2690 | return; | 2731 | return; |
@@ -2703,7 +2744,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2703 | if (sample_type & PERF_SAMPLE_ADDR) | 2744 | if (sample_type & PERF_SAMPLE_ADDR) |
2704 | perf_output_put(&handle, data->addr); | 2745 | perf_output_put(&handle, data->addr); |
2705 | 2746 | ||
2706 | if (sample_type & PERF_SAMPLE_ID) | 2747 | if (sample_type & PERF_SAMPLE_ID) { |
2748 | u64 id = primary_counter_id(counter); | ||
2749 | |||
2750 | perf_output_put(&handle, id); | ||
2751 | } | ||
2752 | |||
2753 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2707 | perf_output_put(&handle, counter->id); | 2754 | perf_output_put(&handle, counter->id); |
2708 | 2755 | ||
2709 | if (sample_type & PERF_SAMPLE_CPU) | 2756 | if (sample_type & PERF_SAMPLE_CPU) |
@@ -2726,7 +2773,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2726 | if (sub != counter) | 2773 | if (sub != counter) |
2727 | sub->pmu->read(sub); | 2774 | sub->pmu->read(sub); |
2728 | 2775 | ||
2729 | group_entry.id = sub->id; | 2776 | group_entry.id = primary_counter_id(sub); |
2730 | group_entry.counter = atomic64_read(&sub->count); | 2777 | group_entry.counter = atomic64_read(&sub->count); |
2731 | 2778 | ||
2732 | perf_output_put(&handle, group_entry); | 2779 | perf_output_put(&handle, group_entry); |
@@ -2742,6 +2789,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2742 | } | 2789 | } |
2743 | } | 2790 | } |
2744 | 2791 | ||
2792 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2793 | if (data->raw) { | ||
2794 | perf_output_put(&handle, data->raw->size); | ||
2795 | perf_output_copy(&handle, data->raw->data, data->raw->size); | ||
2796 | } else { | ||
2797 | struct { | ||
2798 | u32 size; | ||
2799 | u32 data; | ||
2800 | } raw = { | ||
2801 | .size = sizeof(u32), | ||
2802 | .data = 0, | ||
2803 | }; | ||
2804 | perf_output_put(&handle, raw); | ||
2805 | } | ||
2806 | } | ||
2807 | |||
2745 | perf_output_end(&handle); | 2808 | perf_output_end(&handle); |
2746 | } | 2809 | } |
2747 | 2810 | ||
@@ -2786,15 +2849,8 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2786 | } | 2849 | } |
2787 | 2850 | ||
2788 | if (counter->attr.read_format & PERF_FORMAT_ID) { | 2851 | if (counter->attr.read_format & PERF_FORMAT_ID) { |
2789 | u64 id; | ||
2790 | |||
2791 | event.header.size += sizeof(u64); | 2852 | event.header.size += sizeof(u64); |
2792 | if (counter->parent) | 2853 | event.format[i++] = primary_counter_id(counter); |
2793 | id = counter->parent->id; | ||
2794 | else | ||
2795 | id = counter->id; | ||
2796 | |||
2797 | event.format[i++] = id; | ||
2798 | } | 2854 | } |
2799 | 2855 | ||
2800 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2856 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
@@ -2806,48 +2862,56 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2806 | } | 2862 | } |
2807 | 2863 | ||
2808 | /* | 2864 | /* |
2809 | * fork tracking | 2865 | * task tracking -- fork/exit |
2866 | * | ||
2867 | * enabled by: attr.comm | attr.mmap | attr.task | ||
2810 | */ | 2868 | */ |
2811 | 2869 | ||
2812 | struct perf_fork_event { | 2870 | struct perf_task_event { |
2813 | struct task_struct *task; | 2871 | struct task_struct *task; |
2872 | struct perf_counter_context *task_ctx; | ||
2814 | 2873 | ||
2815 | struct { | 2874 | struct { |
2816 | struct perf_event_header header; | 2875 | struct perf_event_header header; |
2817 | 2876 | ||
2818 | u32 pid; | 2877 | u32 pid; |
2819 | u32 ppid; | 2878 | u32 ppid; |
2879 | u32 tid; | ||
2880 | u32 ptid; | ||
2820 | } event; | 2881 | } event; |
2821 | }; | 2882 | }; |
2822 | 2883 | ||
2823 | static void perf_counter_fork_output(struct perf_counter *counter, | 2884 | static void perf_counter_task_output(struct perf_counter *counter, |
2824 | struct perf_fork_event *fork_event) | 2885 | struct perf_task_event *task_event) |
2825 | { | 2886 | { |
2826 | struct perf_output_handle handle; | 2887 | struct perf_output_handle handle; |
2827 | int size = fork_event->event.header.size; | 2888 | int size = task_event->event.header.size; |
2828 | struct task_struct *task = fork_event->task; | 2889 | struct task_struct *task = task_event->task; |
2829 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 2890 | int ret = perf_output_begin(&handle, counter, size, 0, 0); |
2830 | 2891 | ||
2831 | if (ret) | 2892 | if (ret) |
2832 | return; | 2893 | return; |
2833 | 2894 | ||
2834 | fork_event->event.pid = perf_counter_pid(counter, task); | 2895 | task_event->event.pid = perf_counter_pid(counter, task); |
2835 | fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 2896 | task_event->event.ppid = perf_counter_pid(counter, task->real_parent); |
2836 | 2897 | ||
2837 | perf_output_put(&handle, fork_event->event); | 2898 | task_event->event.tid = perf_counter_tid(counter, task); |
2899 | task_event->event.ptid = perf_counter_tid(counter, task->real_parent); | ||
2900 | |||
2901 | perf_output_put(&handle, task_event->event); | ||
2838 | perf_output_end(&handle); | 2902 | perf_output_end(&handle); |
2839 | } | 2903 | } |
2840 | 2904 | ||
2841 | static int perf_counter_fork_match(struct perf_counter *counter) | 2905 | static int perf_counter_task_match(struct perf_counter *counter) |
2842 | { | 2906 | { |
2843 | if (counter->attr.comm || counter->attr.mmap) | 2907 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) |
2844 | return 1; | 2908 | return 1; |
2845 | 2909 | ||
2846 | return 0; | 2910 | return 0; |
2847 | } | 2911 | } |
2848 | 2912 | ||
2849 | static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | 2913 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, |
2850 | struct perf_fork_event *fork_event) | 2914 | struct perf_task_event *task_event) |
2851 | { | 2915 | { |
2852 | struct perf_counter *counter; | 2916 | struct perf_counter *counter; |
2853 | 2917 | ||
@@ -2856,51 +2920,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | |||
2856 | 2920 | ||
2857 | rcu_read_lock(); | 2921 | rcu_read_lock(); |
2858 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 2922 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
2859 | if (perf_counter_fork_match(counter)) | 2923 | if (perf_counter_task_match(counter)) |
2860 | perf_counter_fork_output(counter, fork_event); | 2924 | perf_counter_task_output(counter, task_event); |
2861 | } | 2925 | } |
2862 | rcu_read_unlock(); | 2926 | rcu_read_unlock(); |
2863 | } | 2927 | } |
2864 | 2928 | ||
2865 | static void perf_counter_fork_event(struct perf_fork_event *fork_event) | 2929 | static void perf_counter_task_event(struct perf_task_event *task_event) |
2866 | { | 2930 | { |
2867 | struct perf_cpu_context *cpuctx; | 2931 | struct perf_cpu_context *cpuctx; |
2868 | struct perf_counter_context *ctx; | 2932 | struct perf_counter_context *ctx = task_event->task_ctx; |
2869 | 2933 | ||
2870 | cpuctx = &get_cpu_var(perf_cpu_context); | 2934 | cpuctx = &get_cpu_var(perf_cpu_context); |
2871 | perf_counter_fork_ctx(&cpuctx->ctx, fork_event); | 2935 | perf_counter_task_ctx(&cpuctx->ctx, task_event); |
2872 | put_cpu_var(perf_cpu_context); | 2936 | put_cpu_var(perf_cpu_context); |
2873 | 2937 | ||
2874 | rcu_read_lock(); | 2938 | rcu_read_lock(); |
2875 | /* | 2939 | if (!ctx) |
2876 | * doesn't really matter which of the child contexts the | 2940 | ctx = rcu_dereference(task_event->task->perf_counter_ctxp); |
2877 | * events ends up in. | ||
2878 | */ | ||
2879 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
2880 | if (ctx) | 2941 | if (ctx) |
2881 | perf_counter_fork_ctx(ctx, fork_event); | 2942 | perf_counter_task_ctx(ctx, task_event); |
2882 | rcu_read_unlock(); | 2943 | rcu_read_unlock(); |
2883 | } | 2944 | } |
2884 | 2945 | ||
2885 | void perf_counter_fork(struct task_struct *task) | 2946 | static void perf_counter_task(struct task_struct *task, |
2947 | struct perf_counter_context *task_ctx, | ||
2948 | int new) | ||
2886 | { | 2949 | { |
2887 | struct perf_fork_event fork_event; | 2950 | struct perf_task_event task_event; |
2888 | 2951 | ||
2889 | if (!atomic_read(&nr_comm_counters) && | 2952 | if (!atomic_read(&nr_comm_counters) && |
2890 | !atomic_read(&nr_mmap_counters)) | 2953 | !atomic_read(&nr_mmap_counters) && |
2954 | !atomic_read(&nr_task_counters)) | ||
2891 | return; | 2955 | return; |
2892 | 2956 | ||
2893 | fork_event = (struct perf_fork_event){ | 2957 | task_event = (struct perf_task_event){ |
2894 | .task = task, | 2958 | .task = task, |
2895 | .event = { | 2959 | .task_ctx = task_ctx, |
2960 | .event = { | ||
2896 | .header = { | 2961 | .header = { |
2897 | .type = PERF_EVENT_FORK, | 2962 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, |
2898 | .size = sizeof(fork_event.event), | 2963 | .misc = 0, |
2964 | .size = sizeof(task_event.event), | ||
2899 | }, | 2965 | }, |
2966 | /* .pid */ | ||
2967 | /* .ppid */ | ||
2968 | /* .tid */ | ||
2969 | /* .ptid */ | ||
2900 | }, | 2970 | }, |
2901 | }; | 2971 | }; |
2902 | 2972 | ||
2903 | perf_counter_fork_event(&fork_event); | 2973 | perf_counter_task_event(&task_event); |
2974 | } | ||
2975 | |||
2976 | void perf_counter_fork(struct task_struct *task) | ||
2977 | { | ||
2978 | perf_counter_task(task, NULL, 1); | ||
2904 | } | 2979 | } |
2905 | 2980 | ||
2906 | /* | 2981 | /* |
@@ -2968,8 +3043,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event) | |||
2968 | struct perf_cpu_context *cpuctx; | 3043 | struct perf_cpu_context *cpuctx; |
2969 | struct perf_counter_context *ctx; | 3044 | struct perf_counter_context *ctx; |
2970 | unsigned int size; | 3045 | unsigned int size; |
2971 | char *comm = comm_event->task->comm; | 3046 | char comm[TASK_COMM_LEN]; |
2972 | 3047 | ||
3048 | memset(comm, 0, sizeof(comm)); | ||
3049 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
2973 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | 3050 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
2974 | 3051 | ||
2975 | comm_event->comm = comm; | 3052 | comm_event->comm = comm; |
@@ -3004,8 +3081,16 @@ void perf_counter_comm(struct task_struct *task) | |||
3004 | 3081 | ||
3005 | comm_event = (struct perf_comm_event){ | 3082 | comm_event = (struct perf_comm_event){ |
3006 | .task = task, | 3083 | .task = task, |
3084 | /* .comm */ | ||
3085 | /* .comm_size */ | ||
3007 | .event = { | 3086 | .event = { |
3008 | .header = { .type = PERF_EVENT_COMM, }, | 3087 | .header = { |
3088 | .type = PERF_EVENT_COMM, | ||
3089 | .misc = 0, | ||
3090 | /* .size */ | ||
3091 | }, | ||
3092 | /* .pid */ | ||
3093 | /* .tid */ | ||
3009 | }, | 3094 | }, |
3010 | }; | 3095 | }; |
3011 | 3096 | ||
@@ -3088,8 +3173,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3088 | char *buf = NULL; | 3173 | char *buf = NULL; |
3089 | const char *name; | 3174 | const char *name; |
3090 | 3175 | ||
3176 | memset(tmp, 0, sizeof(tmp)); | ||
3177 | |||
3091 | if (file) { | 3178 | if (file) { |
3092 | buf = kzalloc(PATH_MAX, GFP_KERNEL); | 3179 | /* |
3180 | * d_path works from the end of the buffer backwards, so we | ||
3181 | * need to add enough zero bytes after the string to handle | ||
3182 | * the 64bit alignment we do later. | ||
3183 | */ | ||
3184 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3093 | if (!buf) { | 3185 | if (!buf) { |
3094 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | 3186 | name = strncpy(tmp, "//enomem", sizeof(tmp)); |
3095 | goto got_name; | 3187 | goto got_name; |
@@ -3100,9 +3192,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | |||
3100 | goto got_name; | 3192 | goto got_name; |
3101 | } | 3193 | } |
3102 | } else { | 3194 | } else { |
3103 | name = arch_vma_name(mmap_event->vma); | 3195 | if (arch_vma_name(mmap_event->vma)) { |
3104 | if (name) | 3196 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
3197 | sizeof(tmp)); | ||
3105 | goto got_name; | 3198 | goto got_name; |
3199 | } | ||
3106 | 3200 | ||
3107 | if (!vma->vm_mm) { | 3201 | if (!vma->vm_mm) { |
3108 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3202 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
@@ -3147,8 +3241,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3147 | 3241 | ||
3148 | mmap_event = (struct perf_mmap_event){ | 3242 | mmap_event = (struct perf_mmap_event){ |
3149 | .vma = vma, | 3243 | .vma = vma, |
3244 | /* .file_name */ | ||
3245 | /* .file_size */ | ||
3150 | .event = { | 3246 | .event = { |
3151 | .header = { .type = PERF_EVENT_MMAP, }, | 3247 | .header = { |
3248 | .type = PERF_EVENT_MMAP, | ||
3249 | .misc = 0, | ||
3250 | /* .size */ | ||
3251 | }, | ||
3252 | /* .pid */ | ||
3253 | /* .tid */ | ||
3152 | .start = vma->vm_start, | 3254 | .start = vma->vm_start, |
3153 | .len = vma->vm_end - vma->vm_start, | 3255 | .len = vma->vm_end - vma->vm_start, |
3154 | .pgoff = vma->vm_pgoff, | 3256 | .pgoff = vma->vm_pgoff, |
@@ -3159,49 +3261,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3159 | } | 3261 | } |
3160 | 3262 | ||
3161 | /* | 3263 | /* |
3162 | * Log sample_period changes so that analyzing tools can re-normalize the | ||
3163 | * event flow. | ||
3164 | */ | ||
3165 | |||
3166 | struct freq_event { | ||
3167 | struct perf_event_header header; | ||
3168 | u64 time; | ||
3169 | u64 id; | ||
3170 | u64 period; | ||
3171 | }; | ||
3172 | |||
3173 | static void perf_log_period(struct perf_counter *counter, u64 period) | ||
3174 | { | ||
3175 | struct perf_output_handle handle; | ||
3176 | struct freq_event event; | ||
3177 | int ret; | ||
3178 | |||
3179 | if (counter->hw.sample_period == period) | ||
3180 | return; | ||
3181 | |||
3182 | if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) | ||
3183 | return; | ||
3184 | |||
3185 | event = (struct freq_event) { | ||
3186 | .header = { | ||
3187 | .type = PERF_EVENT_PERIOD, | ||
3188 | .misc = 0, | ||
3189 | .size = sizeof(event), | ||
3190 | }, | ||
3191 | .time = sched_clock(), | ||
3192 | .id = counter->id, | ||
3193 | .period = period, | ||
3194 | }; | ||
3195 | |||
3196 | ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); | ||
3197 | if (ret) | ||
3198 | return; | ||
3199 | |||
3200 | perf_output_put(&handle, event); | ||
3201 | perf_output_end(&handle); | ||
3202 | } | ||
3203 | |||
3204 | /* | ||
3205 | * IRQ throttle logging | 3264 | * IRQ throttle logging |
3206 | */ | 3265 | */ |
3207 | 3266 | ||
@@ -3214,16 +3273,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3214 | struct perf_event_header header; | 3273 | struct perf_event_header header; |
3215 | u64 time; | 3274 | u64 time; |
3216 | u64 id; | 3275 | u64 id; |
3276 | u64 stream_id; | ||
3217 | } throttle_event = { | 3277 | } throttle_event = { |
3218 | .header = { | 3278 | .header = { |
3219 | .type = PERF_EVENT_THROTTLE + 1, | 3279 | .type = PERF_EVENT_THROTTLE, |
3220 | .misc = 0, | 3280 | .misc = 0, |
3221 | .size = sizeof(throttle_event), | 3281 | .size = sizeof(throttle_event), |
3222 | }, | 3282 | }, |
3223 | .time = sched_clock(), | 3283 | .time = sched_clock(), |
3224 | .id = counter->id, | 3284 | .id = primary_counter_id(counter), |
3285 | .stream_id = counter->id, | ||
3225 | }; | 3286 | }; |
3226 | 3287 | ||
3288 | if (enable) | ||
3289 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | ||
3290 | |||
3227 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | 3291 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); |
3228 | if (ret) | 3292 | if (ret) |
3229 | return; | 3293 | return; |
@@ -3300,87 +3364,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, | |||
3300 | * Generic software counter infrastructure | 3364 | * Generic software counter infrastructure |
3301 | */ | 3365 | */ |
3302 | 3366 | ||
3303 | static void perf_swcounter_update(struct perf_counter *counter) | 3367 | /* |
3368 | * We directly increment counter->count and keep a second value in | ||
3369 | * counter->hw.period_left to count intervals. This period counter | ||
3370 | * is kept in the range [-sample_period, 0] so that we can use the | ||
3371 | * sign as trigger. | ||
3372 | */ | ||
3373 | |||
3374 | static u64 perf_swcounter_set_period(struct perf_counter *counter) | ||
3304 | { | 3375 | { |
3305 | struct hw_perf_counter *hwc = &counter->hw; | 3376 | struct hw_perf_counter *hwc = &counter->hw; |
3306 | u64 prev, now; | 3377 | u64 period = hwc->last_period; |
3307 | s64 delta; | 3378 | u64 nr, offset; |
3379 | s64 old, val; | ||
3380 | |||
3381 | hwc->last_period = hwc->sample_period; | ||
3308 | 3382 | ||
3309 | again: | 3383 | again: |
3310 | prev = atomic64_read(&hwc->prev_count); | 3384 | old = val = atomic64_read(&hwc->period_left); |
3311 | now = atomic64_read(&hwc->count); | 3385 | if (val < 0) |
3312 | if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) | 3386 | return 0; |
3313 | goto again; | ||
3314 | 3387 | ||
3315 | delta = now - prev; | 3388 | nr = div64_u64(period + val, period); |
3389 | offset = nr * period; | ||
3390 | val -= offset; | ||
3391 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | ||
3392 | goto again; | ||
3316 | 3393 | ||
3317 | atomic64_add(delta, &counter->count); | 3394 | return nr; |
3318 | atomic64_sub(delta, &hwc->period_left); | ||
3319 | } | 3395 | } |
3320 | 3396 | ||
3321 | static void perf_swcounter_set_period(struct perf_counter *counter) | 3397 | static void perf_swcounter_overflow(struct perf_counter *counter, |
3398 | int nmi, struct perf_sample_data *data) | ||
3322 | { | 3399 | { |
3323 | struct hw_perf_counter *hwc = &counter->hw; | 3400 | struct hw_perf_counter *hwc = &counter->hw; |
3324 | s64 left = atomic64_read(&hwc->period_left); | 3401 | u64 overflow; |
3325 | s64 period = hwc->sample_period; | ||
3326 | 3402 | ||
3327 | if (unlikely(left <= -period)) { | 3403 | data->period = counter->hw.last_period; |
3328 | left = period; | 3404 | overflow = perf_swcounter_set_period(counter); |
3329 | atomic64_set(&hwc->period_left, left); | ||
3330 | hwc->last_period = period; | ||
3331 | } | ||
3332 | 3405 | ||
3333 | if (unlikely(left <= 0)) { | 3406 | if (hwc->interrupts == MAX_INTERRUPTS) |
3334 | left += period; | 3407 | return; |
3335 | atomic64_add(period, &hwc->period_left); | ||
3336 | hwc->last_period = period; | ||
3337 | } | ||
3338 | 3408 | ||
3339 | atomic64_set(&hwc->prev_count, -left); | 3409 | for (; overflow; overflow--) { |
3340 | atomic64_set(&hwc->count, -left); | 3410 | if (perf_counter_overflow(counter, nmi, data)) { |
3411 | /* | ||
3412 | * We inhibit the overflow from happening when | ||
3413 | * hwc->interrupts == MAX_INTERRUPTS. | ||
3414 | */ | ||
3415 | break; | ||
3416 | } | ||
3417 | } | ||
3341 | } | 3418 | } |
3342 | 3419 | ||
3343 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | 3420 | static void perf_swcounter_unthrottle(struct perf_counter *counter) |
3344 | { | 3421 | { |
3345 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3346 | struct perf_sample_data data; | ||
3347 | struct perf_counter *counter; | ||
3348 | u64 period; | ||
3349 | |||
3350 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
3351 | counter->pmu->read(counter); | ||
3352 | |||
3353 | data.addr = 0; | ||
3354 | data.regs = get_irq_regs(); | ||
3355 | /* | 3422 | /* |
3356 | * In case we exclude kernel IPs or are somehow not in interrupt | 3423 | * Nothing to do, we already reset hwc->interrupts. |
3357 | * context, provide the next best thing, the user IP. | ||
3358 | */ | 3424 | */ |
3359 | if ((counter->attr.exclude_kernel || !data.regs) && | 3425 | } |
3360 | !counter->attr.exclude_user) | ||
3361 | data.regs = task_pt_regs(current); | ||
3362 | 3426 | ||
3363 | if (data.regs) { | 3427 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, |
3364 | if (perf_counter_overflow(counter, 0, &data)) | 3428 | int nmi, struct perf_sample_data *data) |
3365 | ret = HRTIMER_NORESTART; | 3429 | { |
3366 | } | 3430 | struct hw_perf_counter *hwc = &counter->hw; |
3367 | 3431 | ||
3368 | period = max_t(u64, 10000, counter->hw.sample_period); | 3432 | atomic64_add(nr, &counter->count); |
3369 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3370 | 3433 | ||
3371 | return ret; | 3434 | if (!hwc->sample_period) |
3372 | } | 3435 | return; |
3373 | 3436 | ||
3374 | static void perf_swcounter_overflow(struct perf_counter *counter, | 3437 | if (!data->regs) |
3375 | int nmi, struct perf_sample_data *data) | 3438 | return; |
3376 | { | ||
3377 | data->period = counter->hw.last_period; | ||
3378 | 3439 | ||
3379 | perf_swcounter_update(counter); | 3440 | if (!atomic64_add_negative(nr, &hwc->period_left)) |
3380 | perf_swcounter_set_period(counter); | 3441 | perf_swcounter_overflow(counter, nmi, data); |
3381 | if (perf_counter_overflow(counter, nmi, data)) | ||
3382 | /* soft-disable the counter */ | ||
3383 | ; | ||
3384 | } | 3442 | } |
3385 | 3443 | ||
3386 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3444 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
@@ -3444,15 +3502,6 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
3444 | return 1; | 3502 | return 1; |
3445 | } | 3503 | } |
3446 | 3504 | ||
3447 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | ||
3448 | int nmi, struct perf_sample_data *data) | ||
3449 | { | ||
3450 | int neg = atomic64_add_negative(nr, &counter->hw.count); | ||
3451 | |||
3452 | if (counter->hw.sample_period && !neg && data->regs) | ||
3453 | perf_swcounter_overflow(counter, nmi, data); | ||
3454 | } | ||
3455 | |||
3456 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 3505 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
3457 | enum perf_type_id type, | 3506 | enum perf_type_id type, |
3458 | u32 event, u64 nr, int nmi, | 3507 | u32 event, u64 nr, int nmi, |
@@ -3531,27 +3580,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi, | |||
3531 | 3580 | ||
3532 | static void perf_swcounter_read(struct perf_counter *counter) | 3581 | static void perf_swcounter_read(struct perf_counter *counter) |
3533 | { | 3582 | { |
3534 | perf_swcounter_update(counter); | ||
3535 | } | 3583 | } |
3536 | 3584 | ||
3537 | static int perf_swcounter_enable(struct perf_counter *counter) | 3585 | static int perf_swcounter_enable(struct perf_counter *counter) |
3538 | { | 3586 | { |
3539 | perf_swcounter_set_period(counter); | 3587 | struct hw_perf_counter *hwc = &counter->hw; |
3588 | |||
3589 | if (hwc->sample_period) { | ||
3590 | hwc->last_period = hwc->sample_period; | ||
3591 | perf_swcounter_set_period(counter); | ||
3592 | } | ||
3540 | return 0; | 3593 | return 0; |
3541 | } | 3594 | } |
3542 | 3595 | ||
3543 | static void perf_swcounter_disable(struct perf_counter *counter) | 3596 | static void perf_swcounter_disable(struct perf_counter *counter) |
3544 | { | 3597 | { |
3545 | perf_swcounter_update(counter); | ||
3546 | } | 3598 | } |
3547 | 3599 | ||
3548 | static const struct pmu perf_ops_generic = { | 3600 | static const struct pmu perf_ops_generic = { |
3549 | .enable = perf_swcounter_enable, | 3601 | .enable = perf_swcounter_enable, |
3550 | .disable = perf_swcounter_disable, | 3602 | .disable = perf_swcounter_disable, |
3551 | .read = perf_swcounter_read, | 3603 | .read = perf_swcounter_read, |
3604 | .unthrottle = perf_swcounter_unthrottle, | ||
3552 | }; | 3605 | }; |
3553 | 3606 | ||
3554 | /* | 3607 | /* |
3608 | * hrtimer based swcounter callback | ||
3609 | */ | ||
3610 | |||
3611 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | ||
3612 | { | ||
3613 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3614 | struct perf_sample_data data; | ||
3615 | struct perf_counter *counter; | ||
3616 | u64 period; | ||
3617 | |||
3618 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
3619 | counter->pmu->read(counter); | ||
3620 | |||
3621 | data.addr = 0; | ||
3622 | data.regs = get_irq_regs(); | ||
3623 | /* | ||
3624 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
3625 | * context, provide the next best thing, the user IP. | ||
3626 | */ | ||
3627 | if ((counter->attr.exclude_kernel || !data.regs) && | ||
3628 | !counter->attr.exclude_user) | ||
3629 | data.regs = task_pt_regs(current); | ||
3630 | |||
3631 | if (data.regs) { | ||
3632 | if (perf_counter_overflow(counter, 0, &data)) | ||
3633 | ret = HRTIMER_NORESTART; | ||
3634 | } | ||
3635 | |||
3636 | period = max_t(u64, 10000, counter->hw.sample_period); | ||
3637 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3638 | |||
3639 | return ret; | ||
3640 | } | ||
3641 | |||
3642 | /* | ||
3555 | * Software counter: cpu wall time clock | 3643 | * Software counter: cpu wall time clock |
3556 | */ | 3644 | */ |
3557 | 3645 | ||
@@ -3668,17 +3756,24 @@ static const struct pmu perf_ops_task_clock = { | |||
3668 | }; | 3756 | }; |
3669 | 3757 | ||
3670 | #ifdef CONFIG_EVENT_PROFILE | 3758 | #ifdef CONFIG_EVENT_PROFILE |
3671 | void perf_tpcounter_event(int event_id) | 3759 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, |
3760 | int entry_size) | ||
3672 | { | 3761 | { |
3762 | struct perf_raw_record raw = { | ||
3763 | .size = entry_size, | ||
3764 | .data = record, | ||
3765 | }; | ||
3766 | |||
3673 | struct perf_sample_data data = { | 3767 | struct perf_sample_data data = { |
3674 | .regs = get_irq_regs(); | 3768 | .regs = get_irq_regs(), |
3675 | .addr = 0, | 3769 | .addr = addr, |
3770 | .raw = &raw, | ||
3676 | }; | 3771 | }; |
3677 | 3772 | ||
3678 | if (!data.regs) | 3773 | if (!data.regs) |
3679 | data.regs = task_pt_regs(current); | 3774 | data.regs = task_pt_regs(current); |
3680 | 3775 | ||
3681 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 3776 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); |
3682 | } | 3777 | } |
3683 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3778 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
3684 | 3779 | ||
@@ -3687,16 +3782,20 @@ extern void ftrace_profile_disable(int); | |||
3687 | 3782 | ||
3688 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 3783 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
3689 | { | 3784 | { |
3690 | ftrace_profile_disable(perf_event_id(&counter->attr)); | 3785 | ftrace_profile_disable(counter->attr.config); |
3691 | } | 3786 | } |
3692 | 3787 | ||
3693 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3788 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
3694 | { | 3789 | { |
3695 | int event_id = perf_event_id(&counter->attr); | 3790 | /* |
3696 | int ret; | 3791 | * Raw tracepoint data is a severe data leak, only allow root to |
3792 | * have these. | ||
3793 | */ | ||
3794 | if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && | ||
3795 | !capable(CAP_SYS_ADMIN)) | ||
3796 | return ERR_PTR(-EPERM); | ||
3697 | 3797 | ||
3698 | ret = ftrace_profile_enable(event_id); | 3798 | if (ftrace_profile_enable(counter->attr.config)) |
3699 | if (ret) | ||
3700 | return NULL; | 3799 | return NULL; |
3701 | 3800 | ||
3702 | counter->destroy = tp_perf_counter_destroy; | 3801 | counter->destroy = tp_perf_counter_destroy; |
@@ -3874,6 +3973,8 @@ done: | |||
3874 | atomic_inc(&nr_mmap_counters); | 3973 | atomic_inc(&nr_mmap_counters); |
3875 | if (counter->attr.comm) | 3974 | if (counter->attr.comm) |
3876 | atomic_inc(&nr_comm_counters); | 3975 | atomic_inc(&nr_comm_counters); |
3976 | if (counter->attr.task) | ||
3977 | atomic_inc(&nr_task_counters); | ||
3877 | } | 3978 | } |
3878 | 3979 | ||
3879 | return counter; | 3980 | return counter; |
@@ -4235,8 +4336,10 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4235 | struct perf_counter_context *child_ctx; | 4336 | struct perf_counter_context *child_ctx; |
4236 | unsigned long flags; | 4337 | unsigned long flags; |
4237 | 4338 | ||
4238 | if (likely(!child->perf_counter_ctxp)) | 4339 | if (likely(!child->perf_counter_ctxp)) { |
4340 | perf_counter_task(child, NULL, 0); | ||
4239 | return; | 4341 | return; |
4342 | } | ||
4240 | 4343 | ||
4241 | local_irq_save(flags); | 4344 | local_irq_save(flags); |
4242 | /* | 4345 | /* |
@@ -4255,17 +4358,20 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4255 | */ | 4358 | */ |
4256 | spin_lock(&child_ctx->lock); | 4359 | spin_lock(&child_ctx->lock); |
4257 | child->perf_counter_ctxp = NULL; | 4360 | child->perf_counter_ctxp = NULL; |
4258 | if (child_ctx->parent_ctx) { | 4361 | /* |
4259 | /* | 4362 | * If this context is a clone; unclone it so it can't get |
4260 | * This context is a clone; unclone it so it can't get | 4363 | * swapped to another process while we're removing all |
4261 | * swapped to another process while we're removing all | 4364 | * the counters from it. |
4262 | * the counters from it. | 4365 | */ |
4263 | */ | 4366 | unclone_ctx(child_ctx); |
4264 | put_ctx(child_ctx->parent_ctx); | 4367 | spin_unlock_irqrestore(&child_ctx->lock, flags); |
4265 | child_ctx->parent_ctx = NULL; | 4368 | |
4266 | } | 4369 | /* |
4267 | spin_unlock(&child_ctx->lock); | 4370 | * Report the task dead after unscheduling the counters so that we |
4268 | local_irq_restore(flags); | 4371 | * won't get any samples after PERF_EVENT_EXIT. We can however still |
4372 | * get a few PERF_EVENT_READ events. | ||
4373 | */ | ||
4374 | perf_counter_task(child, child_ctx, 0); | ||
4269 | 4375 | ||
4270 | /* | 4376 | /* |
4271 | * We can recurse on the same lock type through: | 4377 | * We can recurse on the same lock type through: |