aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_counter.c10
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c15
-rw-r--r--include/linux/perf_counter.h10
-rw-r--r--kernel/perf_counter.c38
4 files changed, 48 insertions, 25 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 4786ad9a2887..5e0bf399c433 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
1001 * Finally record data if requested. 1001 * Finally record data if requested.
1002 */ 1002 */
1003 if (record) { 1003 if (record) {
1004 addr = 0; 1004 struct perf_sample_data data = {
1005 .regs = regs,
1006 .addr = 0,
1007 };
1008
1005 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { 1009 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
1006 /* 1010 /*
1007 * The user wants a data address recorded. 1011 * The user wants a data address recorded.
@@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1016 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? 1020 sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
1017 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; 1021 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
1018 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 1022 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
1019 addr = mfspr(SPRN_SDAR); 1023 data.addr = mfspr(SPRN_SDAR);
1020 } 1024 }
1021 if (perf_counter_overflow(counter, nmi, regs, addr)) { 1025 if (perf_counter_overflow(counter, nmi, &data)) {
1022 /* 1026 /*
1023 * Interrupts are coming too fast - throttle them 1027 * Interrupts are coming too fast - throttle them
1024 * by setting the counter to 0, so it will be 1028 * by setting the counter to 0, so it will be
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 240ca5630632..82a23d487f92 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void)
1173 */ 1173 */
1174static int intel_pmu_handle_irq(struct pt_regs *regs) 1174static int intel_pmu_handle_irq(struct pt_regs *regs)
1175{ 1175{
1176 struct perf_sample_data data;
1176 struct cpu_hw_counters *cpuc; 1177 struct cpu_hw_counters *cpuc;
1177 struct cpu_hw_counters;
1178 int bit, cpu, loops; 1178 int bit, cpu, loops;
1179 u64 ack, status; 1179 u64 ack, status;
1180 1180
1181 data.regs = regs;
1182 data.addr = 0;
1183
1181 cpu = smp_processor_id(); 1184 cpu = smp_processor_id();
1182 cpuc = &per_cpu(cpu_hw_counters, cpu); 1185 cpuc = &per_cpu(cpu_hw_counters, cpu);
1183 1186
@@ -1210,7 +1213,7 @@ again:
1210 if (!intel_pmu_save_and_restart(counter)) 1213 if (!intel_pmu_save_and_restart(counter))
1211 continue; 1214 continue;
1212 1215
1213 if (perf_counter_overflow(counter, 1, regs, 0)) 1216 if (perf_counter_overflow(counter, 1, &data))
1214 intel_pmu_disable_counter(&counter->hw, bit); 1217 intel_pmu_disable_counter(&counter->hw, bit);
1215 } 1218 }
1216 1219
@@ -1230,12 +1233,16 @@ again:
1230 1233
1231static int amd_pmu_handle_irq(struct pt_regs *regs) 1234static int amd_pmu_handle_irq(struct pt_regs *regs)
1232{ 1235{
1233 int cpu, idx, handled = 0; 1236 struct perf_sample_data data;
1234 struct cpu_hw_counters *cpuc; 1237 struct cpu_hw_counters *cpuc;
1235 struct perf_counter *counter; 1238 struct perf_counter *counter;
1236 struct hw_perf_counter *hwc; 1239 struct hw_perf_counter *hwc;
1240 int cpu, idx, handled = 0;
1237 u64 val; 1241 u64 val;
1238 1242
1243 data.regs = regs;
1244 data.addr = 0;
1245
1239 cpu = smp_processor_id(); 1246 cpu = smp_processor_id();
1240 cpuc = &per_cpu(cpu_hw_counters, cpu); 1247 cpuc = &per_cpu(cpu_hw_counters, cpu);
1241 1248
@@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1256 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1263 if (!x86_perf_counter_set_period(counter, hwc, idx))
1257 continue; 1264 continue;
1258 1265
1259 if (perf_counter_overflow(counter, 1, regs, 0)) 1266 if (perf_counter_overflow(counter, 1, &data))
1260 amd_pmu_disable_counter(hwc, idx); 1267 amd_pmu_disable_counter(hwc, idx);
1261 } 1268 }
1262 1269
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 282d8cc48980..d8c0eb480f9a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -605,8 +605,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
605 struct perf_counter_context *ctx, int cpu); 605 struct perf_counter_context *ctx, int cpu);
606extern void perf_counter_update_userpage(struct perf_counter *counter); 606extern void perf_counter_update_userpage(struct perf_counter *counter);
607 607
608extern int perf_counter_overflow(struct perf_counter *counter, 608struct perf_sample_data {
609 int nmi, struct pt_regs *regs, u64 addr); 609 struct pt_regs *regs;
610 u64 addr;
611};
612
613extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
614 struct perf_sample_data *data);
615
610/* 616/*
611 * Return 1 for a software counter, 0 for a hardware counter 617 * Return 1 for a software counter, 0 for a hardware counter
612 */ 618 */
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index ae591a1275a6..4fe85e804f43 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2378 return task_pid_nr_ns(p, counter->ns); 2378 return task_pid_nr_ns(p, counter->ns);
2379} 2379}
2380 2380
2381static void perf_counter_output(struct perf_counter *counter, 2381static void perf_counter_output(struct perf_counter *counter, int nmi,
2382 int nmi, struct pt_regs *regs, u64 addr) 2382 struct perf_sample_data *data)
2383{ 2383{
2384 int ret; 2384 int ret;
2385 u64 sample_type = counter->attr.sample_type; 2385 u64 sample_type = counter->attr.sample_type;
@@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter,
2404 header.size = sizeof(header); 2404 header.size = sizeof(header);
2405 2405
2406 header.misc = PERF_EVENT_MISC_OVERFLOW; 2406 header.misc = PERF_EVENT_MISC_OVERFLOW;
2407 header.misc |= perf_misc_flags(regs); 2407 header.misc |= perf_misc_flags(data->regs);
2408 2408
2409 if (sample_type & PERF_SAMPLE_IP) { 2409 if (sample_type & PERF_SAMPLE_IP) {
2410 ip = perf_instruction_pointer(regs); 2410 ip = perf_instruction_pointer(data->regs);
2411 header.type |= PERF_SAMPLE_IP; 2411 header.type |= PERF_SAMPLE_IP;
2412 header.size += sizeof(ip); 2412 header.size += sizeof(ip);
2413 } 2413 }
@@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter,
2460 } 2460 }
2461 2461
2462 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2462 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2463 callchain = perf_callchain(regs); 2463 callchain = perf_callchain(data->regs);
2464 2464
2465 if (callchain) { 2465 if (callchain) {
2466 callchain_size = (1 + callchain->nr) * sizeof(u64); 2466 callchain_size = (1 + callchain->nr) * sizeof(u64);
@@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter,
2486 perf_output_put(&handle, time); 2486 perf_output_put(&handle, time);
2487 2487
2488 if (sample_type & PERF_SAMPLE_ADDR) 2488 if (sample_type & PERF_SAMPLE_ADDR)
2489 perf_output_put(&handle, addr); 2489 perf_output_put(&handle, data->addr);
2490 2490
2491 if (sample_type & PERF_SAMPLE_ID) 2491 if (sample_type & PERF_SAMPLE_ID)
2492 perf_output_put(&handle, counter->id); 2492 perf_output_put(&handle, counter->id);
@@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
2950 * Generic counter overflow handling. 2950 * Generic counter overflow handling.
2951 */ 2951 */
2952 2952
2953int perf_counter_overflow(struct perf_counter *counter, 2953int perf_counter_overflow(struct perf_counter *counter, int nmi,
2954 int nmi, struct pt_regs *regs, u64 addr) 2954 struct perf_sample_data *data)
2955{ 2955{
2956 int events = atomic_read(&counter->event_limit); 2956 int events = atomic_read(&counter->event_limit);
2957 int throttle = counter->pmu->unthrottle != NULL; 2957 int throttle = counter->pmu->unthrottle != NULL;
@@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter,
3005 perf_counter_disable(counter); 3005 perf_counter_disable(counter);
3006 } 3006 }
3007 3007
3008 perf_counter_output(counter, nmi, regs, addr); 3008 perf_counter_output(counter, nmi, data);
3009 return ret; 3009 return ret;
3010} 3010}
3011 3011
@@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
3054static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3054static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3055{ 3055{
3056 enum hrtimer_restart ret = HRTIMER_RESTART; 3056 enum hrtimer_restart ret = HRTIMER_RESTART;
3057 struct perf_sample_data data;
3057 struct perf_counter *counter; 3058 struct perf_counter *counter;
3058 struct pt_regs *regs;
3059 u64 period; 3059 u64 period;
3060 3060
3061 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 3061 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3062 counter->pmu->read(counter); 3062 counter->pmu->read(counter);
3063 3063
3064 regs = get_irq_regs(); 3064 data.addr = 0;
3065 data.regs = get_irq_regs();
3065 /* 3066 /*
3066 * In case we exclude kernel IPs or are somehow not in interrupt 3067 * In case we exclude kernel IPs or are somehow not in interrupt
3067 * context, provide the next best thing, the user IP. 3068 * context, provide the next best thing, the user IP.
3068 */ 3069 */
3069 if ((counter->attr.exclude_kernel || !regs) && 3070 if ((counter->attr.exclude_kernel || !data.regs) &&
3070 !counter->attr.exclude_user) 3071 !counter->attr.exclude_user)
3071 regs = task_pt_regs(current); 3072 data.regs = task_pt_regs(current);
3072 3073
3073 if (regs) { 3074 if (data.regs) {
3074 if (perf_counter_overflow(counter, 0, regs, 0)) 3075 if (perf_counter_overflow(counter, 0, &data))
3075 ret = HRTIMER_NORESTART; 3076 ret = HRTIMER_NORESTART;
3076 } 3077 }
3077 3078
@@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3084static void perf_swcounter_overflow(struct perf_counter *counter, 3085static void perf_swcounter_overflow(struct perf_counter *counter,
3085 int nmi, struct pt_regs *regs, u64 addr) 3086 int nmi, struct pt_regs *regs, u64 addr)
3086{ 3087{
3088 struct perf_sample_data data = {
3089 .regs = regs,
3090 .addr = addr,
3091 };
3092
3087 perf_swcounter_update(counter); 3093 perf_swcounter_update(counter);
3088 perf_swcounter_set_period(counter); 3094 perf_swcounter_set_period(counter);
3089 if (perf_counter_overflow(counter, nmi, regs, addr)) 3095 if (perf_counter_overflow(counter, nmi, &data))
3090 /* soft-disable the counter */ 3096 /* soft-disable the counter */
3091 ; 3097 ;
3092 3098