diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 92 |
1 files changed, 31 insertions, 61 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e1d6a3aa1333..7530588fa5c5 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -155,6 +155,20 @@ static void unclone_ctx(struct perf_counter_context *ctx) | |||
155 | } | 155 | } |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * If we inherit counters we want to return the parent counter id | ||
159 | * to userspace. | ||
160 | */ | ||
161 | static u64 primary_counter_id(struct perf_counter *counter) | ||
162 | { | ||
163 | u64 id = counter->id; | ||
164 | |||
165 | if (counter->parent) | ||
166 | id = counter->parent->id; | ||
167 | |||
168 | return id; | ||
169 | } | ||
170 | |||
171 | /* | ||
158 | * Get the perf_counter_context for a task and lock it. | 172 | * Get the perf_counter_context for a task and lock it. |
159 | * This has to cope with with the fact that until it is locked, | 173 | * This has to cope with with the fact that until it is locked, |
160 | * the context could get moved to another task. | 174 | * the context could get moved to another task. |
@@ -1296,7 +1310,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1296 | #define MAX_INTERRUPTS (~0ULL) | 1310 | #define MAX_INTERRUPTS (~0ULL) |
1297 | 1311 | ||
1298 | static void perf_log_throttle(struct perf_counter *counter, int enable); | 1312 | static void perf_log_throttle(struct perf_counter *counter, int enable); |
1299 | static void perf_log_period(struct perf_counter *counter, u64 period); | ||
1300 | 1313 | ||
1301 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | 1314 | static void perf_adjust_period(struct perf_counter *counter, u64 events) |
1302 | { | 1315 | { |
@@ -1315,8 +1328,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events) | |||
1315 | if (!sample_period) | 1328 | if (!sample_period) |
1316 | sample_period = 1; | 1329 | sample_period = 1; |
1317 | 1330 | ||
1318 | perf_log_period(counter, sample_period); | ||
1319 | |||
1320 | hwc->sample_period = sample_period; | 1331 | hwc->sample_period = sample_period; |
1321 | } | 1332 | } |
1322 | 1333 | ||
@@ -1705,7 +1716,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1705 | values[n++] = counter->total_time_running + | 1716 | values[n++] = counter->total_time_running + |
1706 | atomic64_read(&counter->child_total_time_running); | 1717 | atomic64_read(&counter->child_total_time_running); |
1707 | if (counter->attr.read_format & PERF_FORMAT_ID) | 1718 | if (counter->attr.read_format & PERF_FORMAT_ID) |
1708 | values[n++] = counter->id; | 1719 | values[n++] = primary_counter_id(counter); |
1709 | mutex_unlock(&counter->child_mutex); | 1720 | mutex_unlock(&counter->child_mutex); |
1710 | 1721 | ||
1711 | if (count < n * sizeof(u64)) | 1722 | if (count < n * sizeof(u64)) |
@@ -1812,8 +1823,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1812 | 1823 | ||
1813 | counter->attr.sample_freq = value; | 1824 | counter->attr.sample_freq = value; |
1814 | } else { | 1825 | } else { |
1815 | perf_log_period(counter, value); | ||
1816 | |||
1817 | counter->attr.sample_period = value; | 1826 | counter->attr.sample_period = value; |
1818 | counter->hw.sample_period = value; | 1827 | counter->hw.sample_period = value; |
1819 | } | 1828 | } |
@@ -2662,6 +2671,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2662 | if (sample_type & PERF_SAMPLE_ID) | 2671 | if (sample_type & PERF_SAMPLE_ID) |
2663 | header.size += sizeof(u64); | 2672 | header.size += sizeof(u64); |
2664 | 2673 | ||
2674 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2675 | header.size += sizeof(u64); | ||
2676 | |||
2665 | if (sample_type & PERF_SAMPLE_CPU) { | 2677 | if (sample_type & PERF_SAMPLE_CPU) { |
2666 | header.size += sizeof(cpu_entry); | 2678 | header.size += sizeof(cpu_entry); |
2667 | 2679 | ||
@@ -2705,7 +2717,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2705 | if (sample_type & PERF_SAMPLE_ADDR) | 2717 | if (sample_type & PERF_SAMPLE_ADDR) |
2706 | perf_output_put(&handle, data->addr); | 2718 | perf_output_put(&handle, data->addr); |
2707 | 2719 | ||
2708 | if (sample_type & PERF_SAMPLE_ID) | 2720 | if (sample_type & PERF_SAMPLE_ID) { |
2721 | u64 id = primary_counter_id(counter); | ||
2722 | |||
2723 | perf_output_put(&handle, id); | ||
2724 | } | ||
2725 | |||
2726 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2709 | perf_output_put(&handle, counter->id); | 2727 | perf_output_put(&handle, counter->id); |
2710 | 2728 | ||
2711 | if (sample_type & PERF_SAMPLE_CPU) | 2729 | if (sample_type & PERF_SAMPLE_CPU) |
@@ -2728,7 +2746,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2728 | if (sub != counter) | 2746 | if (sub != counter) |
2729 | sub->pmu->read(sub); | 2747 | sub->pmu->read(sub); |
2730 | 2748 | ||
2731 | group_entry.id = sub->id; | 2749 | group_entry.id = primary_counter_id(sub); |
2732 | group_entry.counter = atomic64_read(&sub->count); | 2750 | group_entry.counter = atomic64_read(&sub->count); |
2733 | 2751 | ||
2734 | perf_output_put(&handle, group_entry); | 2752 | perf_output_put(&handle, group_entry); |
@@ -2788,15 +2806,8 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2788 | } | 2806 | } |
2789 | 2807 | ||
2790 | if (counter->attr.read_format & PERF_FORMAT_ID) { | 2808 | if (counter->attr.read_format & PERF_FORMAT_ID) { |
2791 | u64 id; | ||
2792 | |||
2793 | event.header.size += sizeof(u64); | 2809 | event.header.size += sizeof(u64); |
2794 | if (counter->parent) | 2810 | event.format[i++] = primary_counter_id(counter); |
2795 | id = counter->parent->id; | ||
2796 | else | ||
2797 | id = counter->id; | ||
2798 | |||
2799 | event.format[i++] = id; | ||
2800 | } | 2811 | } |
2801 | 2812 | ||
2802 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2813 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
@@ -3191,49 +3202,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma) | |||
3191 | } | 3202 | } |
3192 | 3203 | ||
3193 | /* | 3204 | /* |
3194 | * Log sample_period changes so that analyzing tools can re-normalize the | ||
3195 | * event flow. | ||
3196 | */ | ||
3197 | |||
3198 | struct freq_event { | ||
3199 | struct perf_event_header header; | ||
3200 | u64 time; | ||
3201 | u64 id; | ||
3202 | u64 period; | ||
3203 | }; | ||
3204 | |||
3205 | static void perf_log_period(struct perf_counter *counter, u64 period) | ||
3206 | { | ||
3207 | struct perf_output_handle handle; | ||
3208 | struct freq_event event; | ||
3209 | int ret; | ||
3210 | |||
3211 | if (counter->hw.sample_period == period) | ||
3212 | return; | ||
3213 | |||
3214 | if (counter->attr.sample_type & PERF_SAMPLE_PERIOD) | ||
3215 | return; | ||
3216 | |||
3217 | event = (struct freq_event) { | ||
3218 | .header = { | ||
3219 | .type = PERF_EVENT_PERIOD, | ||
3220 | .misc = 0, | ||
3221 | .size = sizeof(event), | ||
3222 | }, | ||
3223 | .time = sched_clock(), | ||
3224 | .id = counter->id, | ||
3225 | .period = period, | ||
3226 | }; | ||
3227 | |||
3228 | ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0); | ||
3229 | if (ret) | ||
3230 | return; | ||
3231 | |||
3232 | perf_output_put(&handle, event); | ||
3233 | perf_output_end(&handle); | ||
3234 | } | ||
3235 | |||
3236 | /* | ||
3237 | * IRQ throttle logging | 3205 | * IRQ throttle logging |
3238 | */ | 3206 | */ |
3239 | 3207 | ||
@@ -3246,14 +3214,16 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3246 | struct perf_event_header header; | 3214 | struct perf_event_header header; |
3247 | u64 time; | 3215 | u64 time; |
3248 | u64 id; | 3216 | u64 id; |
3217 | u64 stream_id; | ||
3249 | } throttle_event = { | 3218 | } throttle_event = { |
3250 | .header = { | 3219 | .header = { |
3251 | .type = PERF_EVENT_THROTTLE + 1, | 3220 | .type = PERF_EVENT_THROTTLE + 1, |
3252 | .misc = 0, | 3221 | .misc = 0, |
3253 | .size = sizeof(throttle_event), | 3222 | .size = sizeof(throttle_event), |
3254 | }, | 3223 | }, |
3255 | .time = sched_clock(), | 3224 | .time = sched_clock(), |
3256 | .id = counter->id, | 3225 | .id = primary_counter_id(counter), |
3226 | .stream_id = counter->id, | ||
3257 | }; | 3227 | }; |
3258 | 3228 | ||
3259 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | 3229 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); |