aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_counter.c12
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c8
-rw-r--r--include/linux/perf_counter.h32
-rw-r--r--kernel/perf_counter.c104
4 files changed, 78 insertions, 78 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index f96d55f55bd6..c9633321e7a5 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -535,7 +535,7 @@ void hw_perf_enable(void)
535 continue; 535 continue;
536 } 536 }
537 val = 0; 537 val = 0;
538 if (counter->hw.irq_period) { 538 if (counter->hw.sample_period) {
539 left = atomic64_read(&counter->hw.period_left); 539 left = atomic64_read(&counter->hw.period_left);
540 if (left < 0x80000000L) 540 if (left < 0x80000000L)
541 val = 0x80000000L - left; 541 val = 0x80000000L - left;
@@ -749,12 +749,12 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
749 s64 val, left; 749 s64 val, left;
750 unsigned long flags; 750 unsigned long flags;
751 751
752 if (!counter->hw.idx || !counter->hw.irq_period) 752 if (!counter->hw.idx || !counter->hw.sample_period)
753 return; 753 return;
754 local_irq_save(flags); 754 local_irq_save(flags);
755 perf_disable(); 755 perf_disable();
756 power_pmu_read(counter); 756 power_pmu_read(counter);
757 left = counter->hw.irq_period; 757 left = counter->hw.sample_period;
758 val = 0; 758 val = 0;
759 if (left < 0x80000000L) 759 if (left < 0x80000000L)
760 val = 0x80000000L - left; 760 val = 0x80000000L - left;
@@ -789,7 +789,7 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
789 if (counter->hw_event.exclude_user 789 if (counter->hw_event.exclude_user
790 || counter->hw_event.exclude_kernel 790 || counter->hw_event.exclude_kernel
791 || counter->hw_event.exclude_hv 791 || counter->hw_event.exclude_hv
792 || counter->hw_event.irq_period) 792 || counter->hw_event.sample_period)
793 return 0; 793 return 0;
794 794
795 if (ppmu->limited_pmc_event(ev)) 795 if (ppmu->limited_pmc_event(ev))
@@ -925,7 +925,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
925 925
926 counter->hw.config = events[n]; 926 counter->hw.config = events[n];
927 counter->hw.counter_base = cflags[n]; 927 counter->hw.counter_base = cflags[n];
928 atomic64_set(&counter->hw.period_left, counter->hw.irq_period); 928 atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
929 929
930 /* 930 /*
931 * See if we need to reserve the PMU. 931 * See if we need to reserve the PMU.
@@ -958,7 +958,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
958static void record_and_restart(struct perf_counter *counter, long val, 958static void record_and_restart(struct perf_counter *counter, long val,
959 struct pt_regs *regs, int nmi) 959 struct pt_regs *regs, int nmi)
960{ 960{
961 u64 period = counter->hw.irq_period; 961 u64 period = counter->hw.sample_period;
962 s64 prev, delta, left; 962 s64 prev, delta, left;
963 int record = 0; 963 int record = 0;
964 u64 addr, mmcra, sdsync; 964 u64 addr, mmcra, sdsync;
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 316b0c995f38..ec06aa5e9282 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -290,11 +290,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
290 hwc->nmi = 1; 290 hwc->nmi = 1;
291 hw_event->nmi = 1; 291 hw_event->nmi = 1;
292 292
293 if (!hwc->irq_period) 293 if (!hwc->sample_period)
294 hwc->irq_period = x86_pmu.max_period; 294 hwc->sample_period = x86_pmu.max_period;
295 295
296 atomic64_set(&hwc->period_left, 296 atomic64_set(&hwc->period_left,
297 min(x86_pmu.max_period, hwc->irq_period)); 297 min(x86_pmu.max_period, hwc->sample_period));
298 298
299 /* 299 /*
300 * Raw event type provide the config in the event structure 300 * Raw event type provide the config in the event structure
@@ -462,7 +462,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
462 struct hw_perf_counter *hwc, int idx) 462 struct hw_perf_counter *hwc, int idx)
463{ 463{
464 s64 left = atomic64_read(&hwc->period_left); 464 s64 left = atomic64_read(&hwc->period_left);
465 s64 period = min(x86_pmu.max_period, hwc->irq_period); 465 s64 period = min(x86_pmu.max_period, hwc->sample_period);
466 int err; 466 int err;
467 467
468 /* 468 /*
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 4845a214b9e7..1fcd3cc93855 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -94,18 +94,18 @@ enum sw_event_ids {
94#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) 94#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
95 95
96/* 96/*
97 * Bits that can be set in hw_event.record_type to request information 97 * Bits that can be set in hw_event.sample_type to request information
98 * in the overflow packets. 98 * in the overflow packets.
99 */ 99 */
100enum perf_counter_record_format { 100enum perf_counter_sample_format {
101 PERF_RECORD_IP = 1U << 0, 101 PERF_SAMPLE_IP = 1U << 0,
102 PERF_RECORD_TID = 1U << 1, 102 PERF_SAMPLE_TID = 1U << 1,
103 PERF_RECORD_TIME = 1U << 2, 103 PERF_SAMPLE_TIME = 1U << 2,
104 PERF_RECORD_ADDR = 1U << 3, 104 PERF_SAMPLE_ADDR = 1U << 3,
105 PERF_RECORD_GROUP = 1U << 4, 105 PERF_SAMPLE_GROUP = 1U << 4,
106 PERF_RECORD_CALLCHAIN = 1U << 5, 106 PERF_SAMPLE_CALLCHAIN = 1U << 5,
107 PERF_RECORD_CONFIG = 1U << 6, 107 PERF_SAMPLE_CONFIG = 1U << 6,
108 PERF_RECORD_CPU = 1U << 7, 108 PERF_SAMPLE_CPU = 1U << 7,
109}; 109};
110 110
111/* 111/*
@@ -132,12 +132,12 @@ struct perf_counter_hw_event {
132 __u64 config; 132 __u64 config;
133 133
134 union { 134 union {
135 __u64 irq_period; 135 __u64 sample_period;
136 __u64 irq_freq; 136 __u64 sample_freq;
137 }; 137 };
138 138
139 __u32 record_type; 139 __u64 sample_type;
140 __u32 read_format; 140 __u64 read_format;
141 141
142 __u64 disabled : 1, /* off by default */ 142 __u64 disabled : 1, /* off by default */
143 nmi : 1, /* NMI sampling */ 143 nmi : 1, /* NMI sampling */
@@ -262,7 +262,7 @@ enum perf_event_type {
262 * struct { 262 * struct {
263 * struct perf_event_header header; 263 * struct perf_event_header header;
264 * u64 time; 264 * u64 time;
265 * u64 irq_period; 265 * u64 sample_period;
266 * }; 266 * };
267 */ 267 */
268 PERF_EVENT_PERIOD = 4, 268 PERF_EVENT_PERIOD = 4,
@@ -363,7 +363,7 @@ struct hw_perf_counter {
363 }; 363 };
364 }; 364 };
365 atomic64_t prev_count; 365 atomic64_t prev_count;
366 u64 irq_period; 366 u64 sample_period;
367 atomic64_t period_left; 367 atomic64_t period_left;
368 u64 interrupts; 368 u64 interrupts;
369#endif 369#endif
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 978ecfcc7aaf..5ecd9981c035 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1186,7 +1186,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period);
1186static void perf_adjust_freq(struct perf_counter_context *ctx) 1186static void perf_adjust_freq(struct perf_counter_context *ctx)
1187{ 1187{
1188 struct perf_counter *counter; 1188 struct perf_counter *counter;
1189 u64 interrupts, irq_period; 1189 u64 interrupts, sample_period;
1190 u64 events, period; 1190 u64 events, period;
1191 s64 delta; 1191 s64 delta;
1192 1192
@@ -1204,23 +1204,23 @@ static void perf_adjust_freq(struct perf_counter_context *ctx)
1204 interrupts = 2*sysctl_perf_counter_limit/HZ; 1204 interrupts = 2*sysctl_perf_counter_limit/HZ;
1205 } 1205 }
1206 1206
1207 if (!counter->hw_event.freq || !counter->hw_event.irq_freq) 1207 if (!counter->hw_event.freq || !counter->hw_event.sample_freq)
1208 continue; 1208 continue;
1209 1209
1210 events = HZ * interrupts * counter->hw.irq_period; 1210 events = HZ * interrupts * counter->hw.sample_period;
1211 period = div64_u64(events, counter->hw_event.irq_freq); 1211 period = div64_u64(events, counter->hw_event.sample_freq);
1212 1212
1213 delta = (s64)(1 + period - counter->hw.irq_period); 1213 delta = (s64)(1 + period - counter->hw.sample_period);
1214 delta >>= 1; 1214 delta >>= 1;
1215 1215
1216 irq_period = counter->hw.irq_period + delta; 1216 sample_period = counter->hw.sample_period + delta;
1217 1217
1218 if (!irq_period) 1218 if (!sample_period)
1219 irq_period = 1; 1219 sample_period = 1;
1220 1220
1221 perf_log_period(counter, irq_period); 1221 perf_log_period(counter, sample_period);
1222 1222
1223 counter->hw.irq_period = irq_period; 1223 counter->hw.sample_period = sample_period;
1224 } 1224 }
1225 spin_unlock(&ctx->lock); 1225 spin_unlock(&ctx->lock);
1226} 1226}
@@ -2297,7 +2297,7 @@ static void perf_counter_output(struct perf_counter *counter,
2297 int nmi, struct pt_regs *regs, u64 addr) 2297 int nmi, struct pt_regs *regs, u64 addr)
2298{ 2298{
2299 int ret; 2299 int ret;
2300 u64 record_type = counter->hw_event.record_type; 2300 u64 sample_type = counter->hw_event.sample_type;
2301 struct perf_output_handle handle; 2301 struct perf_output_handle handle;
2302 struct perf_event_header header; 2302 struct perf_event_header header;
2303 u64 ip; 2303 u64 ip;
@@ -2321,61 +2321,61 @@ static void perf_counter_output(struct perf_counter *counter,
2321 header.misc = PERF_EVENT_MISC_OVERFLOW; 2321 header.misc = PERF_EVENT_MISC_OVERFLOW;
2322 header.misc |= perf_misc_flags(regs); 2322 header.misc |= perf_misc_flags(regs);
2323 2323
2324 if (record_type & PERF_RECORD_IP) { 2324 if (sample_type & PERF_SAMPLE_IP) {
2325 ip = perf_instruction_pointer(regs); 2325 ip = perf_instruction_pointer(regs);
2326 header.type |= PERF_RECORD_IP; 2326 header.type |= PERF_SAMPLE_IP;
2327 header.size += sizeof(ip); 2327 header.size += sizeof(ip);
2328 } 2328 }
2329 2329
2330 if (record_type & PERF_RECORD_TID) { 2330 if (sample_type & PERF_SAMPLE_TID) {
2331 /* namespace issues */ 2331 /* namespace issues */
2332 tid_entry.pid = perf_counter_pid(counter, current); 2332 tid_entry.pid = perf_counter_pid(counter, current);
2333 tid_entry.tid = perf_counter_tid(counter, current); 2333 tid_entry.tid = perf_counter_tid(counter, current);
2334 2334
2335 header.type |= PERF_RECORD_TID; 2335 header.type |= PERF_SAMPLE_TID;
2336 header.size += sizeof(tid_entry); 2336 header.size += sizeof(tid_entry);
2337 } 2337 }
2338 2338
2339 if (record_type & PERF_RECORD_TIME) { 2339 if (sample_type & PERF_SAMPLE_TIME) {
2340 /* 2340 /*
2341 * Maybe do better on x86 and provide cpu_clock_nmi() 2341 * Maybe do better on x86 and provide cpu_clock_nmi()
2342 */ 2342 */
2343 time = sched_clock(); 2343 time = sched_clock();
2344 2344
2345 header.type |= PERF_RECORD_TIME; 2345 header.type |= PERF_SAMPLE_TIME;
2346 header.size += sizeof(u64); 2346 header.size += sizeof(u64);
2347 } 2347 }
2348 2348
2349 if (record_type & PERF_RECORD_ADDR) { 2349 if (sample_type & PERF_SAMPLE_ADDR) {
2350 header.type |= PERF_RECORD_ADDR; 2350 header.type |= PERF_SAMPLE_ADDR;
2351 header.size += sizeof(u64); 2351 header.size += sizeof(u64);
2352 } 2352 }
2353 2353
2354 if (record_type & PERF_RECORD_CONFIG) { 2354 if (sample_type & PERF_SAMPLE_CONFIG) {
2355 header.type |= PERF_RECORD_CONFIG; 2355 header.type |= PERF_SAMPLE_CONFIG;
2356 header.size += sizeof(u64); 2356 header.size += sizeof(u64);
2357 } 2357 }
2358 2358
2359 if (record_type & PERF_RECORD_CPU) { 2359 if (sample_type & PERF_SAMPLE_CPU) {
2360 header.type |= PERF_RECORD_CPU; 2360 header.type |= PERF_SAMPLE_CPU;
2361 header.size += sizeof(cpu_entry); 2361 header.size += sizeof(cpu_entry);
2362 2362
2363 cpu_entry.cpu = raw_smp_processor_id(); 2363 cpu_entry.cpu = raw_smp_processor_id();
2364 } 2364 }
2365 2365
2366 if (record_type & PERF_RECORD_GROUP) { 2366 if (sample_type & PERF_SAMPLE_GROUP) {
2367 header.type |= PERF_RECORD_GROUP; 2367 header.type |= PERF_SAMPLE_GROUP;
2368 header.size += sizeof(u64) + 2368 header.size += sizeof(u64) +
2369 counter->nr_siblings * sizeof(group_entry); 2369 counter->nr_siblings * sizeof(group_entry);
2370 } 2370 }
2371 2371
2372 if (record_type & PERF_RECORD_CALLCHAIN) { 2372 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2373 callchain = perf_callchain(regs); 2373 callchain = perf_callchain(regs);
2374 2374
2375 if (callchain) { 2375 if (callchain) {
2376 callchain_size = (1 + callchain->nr) * sizeof(u64); 2376 callchain_size = (1 + callchain->nr) * sizeof(u64);
2377 2377
2378 header.type |= PERF_RECORD_CALLCHAIN; 2378 header.type |= PERF_SAMPLE_CALLCHAIN;
2379 header.size += callchain_size; 2379 header.size += callchain_size;
2380 } 2380 }
2381 } 2381 }
@@ -2386,28 +2386,28 @@ static void perf_counter_output(struct perf_counter *counter,
2386 2386
2387 perf_output_put(&handle, header); 2387 perf_output_put(&handle, header);
2388 2388
2389 if (record_type & PERF_RECORD_IP) 2389 if (sample_type & PERF_SAMPLE_IP)
2390 perf_output_put(&handle, ip); 2390 perf_output_put(&handle, ip);
2391 2391
2392 if (record_type & PERF_RECORD_TID) 2392 if (sample_type & PERF_SAMPLE_TID)
2393 perf_output_put(&handle, tid_entry); 2393 perf_output_put(&handle, tid_entry);
2394 2394
2395 if (record_type & PERF_RECORD_TIME) 2395 if (sample_type & PERF_SAMPLE_TIME)
2396 perf_output_put(&handle, time); 2396 perf_output_put(&handle, time);
2397 2397
2398 if (record_type & PERF_RECORD_ADDR) 2398 if (sample_type & PERF_SAMPLE_ADDR)
2399 perf_output_put(&handle, addr); 2399 perf_output_put(&handle, addr);
2400 2400
2401 if (record_type & PERF_RECORD_CONFIG) 2401 if (sample_type & PERF_SAMPLE_CONFIG)
2402 perf_output_put(&handle, counter->hw_event.config); 2402 perf_output_put(&handle, counter->hw_event.config);
2403 2403
2404 if (record_type & PERF_RECORD_CPU) 2404 if (sample_type & PERF_SAMPLE_CPU)
2405 perf_output_put(&handle, cpu_entry); 2405 perf_output_put(&handle, cpu_entry);
2406 2406
2407 /* 2407 /*
2408 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. 2408 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2409 */ 2409 */
2410 if (record_type & PERF_RECORD_GROUP) { 2410 if (sample_type & PERF_SAMPLE_GROUP) {
2411 struct perf_counter *leader, *sub; 2411 struct perf_counter *leader, *sub;
2412 u64 nr = counter->nr_siblings; 2412 u64 nr = counter->nr_siblings;
2413 2413
@@ -2702,7 +2702,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
2702} 2702}
2703 2703
2704/* 2704/*
2705 * Log irq_period changes so that analyzing tools can re-normalize the 2705 * Log sample_period changes so that analyzing tools can re-normalize the
2706 * event flow. 2706 * event flow.
2707 */ 2707 */
2708 2708
@@ -2725,7 +2725,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
2725 .period = period, 2725 .period = period,
2726 }; 2726 };
2727 2727
2728 if (counter->hw.irq_period == period) 2728 if (counter->hw.sample_period == period)
2729 return; 2729 return;
2730 2730
2731 ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0); 2731 ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
@@ -2834,7 +2834,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
2834{ 2834{
2835 struct hw_perf_counter *hwc = &counter->hw; 2835 struct hw_perf_counter *hwc = &counter->hw;
2836 s64 left = atomic64_read(&hwc->period_left); 2836 s64 left = atomic64_read(&hwc->period_left);
2837 s64 period = hwc->irq_period; 2837 s64 period = hwc->sample_period;
2838 2838
2839 if (unlikely(left <= -period)) { 2839 if (unlikely(left <= -period)) {
2840 left = period; 2840 left = period;
@@ -2874,7 +2874,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2874 ret = HRTIMER_NORESTART; 2874 ret = HRTIMER_NORESTART;
2875 } 2875 }
2876 2876
2877 period = max_t(u64, 10000, counter->hw.irq_period); 2877 period = max_t(u64, 10000, counter->hw.sample_period);
2878 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 2878 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
2879 2879
2880 return ret; 2880 return ret;
@@ -2959,7 +2959,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2959{ 2959{
2960 int neg = atomic64_add_negative(nr, &counter->hw.count); 2960 int neg = atomic64_add_negative(nr, &counter->hw.count);
2961 2961
2962 if (counter->hw.irq_period && !neg && regs) 2962 if (counter->hw.sample_period && !neg && regs)
2963 perf_swcounter_overflow(counter, nmi, regs, addr); 2963 perf_swcounter_overflow(counter, nmi, regs, addr);
2964} 2964}
2965 2965
@@ -3080,8 +3080,8 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3080 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 3080 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3081 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3081 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3082 hwc->hrtimer.function = perf_swcounter_hrtimer; 3082 hwc->hrtimer.function = perf_swcounter_hrtimer;
3083 if (hwc->irq_period) { 3083 if (hwc->sample_period) {
3084 u64 period = max_t(u64, 10000, hwc->irq_period); 3084 u64 period = max_t(u64, 10000, hwc->sample_period);
3085 __hrtimer_start_range_ns(&hwc->hrtimer, 3085 __hrtimer_start_range_ns(&hwc->hrtimer,
3086 ns_to_ktime(period), 0, 3086 ns_to_ktime(period), 0,
3087 HRTIMER_MODE_REL, 0); 3087 HRTIMER_MODE_REL, 0);
@@ -3092,7 +3092,7 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3092 3092
3093static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 3093static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3094{ 3094{
3095 if (counter->hw.irq_period) 3095 if (counter->hw.sample_period)
3096 hrtimer_cancel(&counter->hw.hrtimer); 3096 hrtimer_cancel(&counter->hw.hrtimer);
3097 cpu_clock_perf_counter_update(counter); 3097 cpu_clock_perf_counter_update(counter);
3098} 3098}
@@ -3132,8 +3132,8 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
3132 atomic64_set(&hwc->prev_count, now); 3132 atomic64_set(&hwc->prev_count, now);
3133 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3133 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3134 hwc->hrtimer.function = perf_swcounter_hrtimer; 3134 hwc->hrtimer.function = perf_swcounter_hrtimer;
3135 if (hwc->irq_period) { 3135 if (hwc->sample_period) {
3136 u64 period = max_t(u64, 10000, hwc->irq_period); 3136 u64 period = max_t(u64, 10000, hwc->sample_period);
3137 __hrtimer_start_range_ns(&hwc->hrtimer, 3137 __hrtimer_start_range_ns(&hwc->hrtimer,
3138 ns_to_ktime(period), 0, 3138 ns_to_ktime(period), 0,
3139 HRTIMER_MODE_REL, 0); 3139 HRTIMER_MODE_REL, 0);
@@ -3144,7 +3144,7 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
3144 3144
3145static void task_clock_perf_counter_disable(struct perf_counter *counter) 3145static void task_clock_perf_counter_disable(struct perf_counter *counter)
3146{ 3146{
3147 if (counter->hw.irq_period) 3147 if (counter->hw.sample_period)
3148 hrtimer_cancel(&counter->hw.hrtimer); 3148 hrtimer_cancel(&counter->hw.hrtimer);
3149 task_clock_perf_counter_update(counter, counter->ctx->time); 3149 task_clock_perf_counter_update(counter, counter->ctx->time);
3150 3150
@@ -3223,7 +3223,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3223 return NULL; 3223 return NULL;
3224 3224
3225 counter->destroy = tp_perf_counter_destroy; 3225 counter->destroy = tp_perf_counter_destroy;
3226 counter->hw.irq_period = counter->hw_event.irq_period; 3226 counter->hw.sample_period = counter->hw_event.sample_period;
3227 3227
3228 return &perf_ops_generic; 3228 return &perf_ops_generic;
3229} 3229}
@@ -3323,15 +3323,15 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
3323 pmu = NULL; 3323 pmu = NULL;
3324 3324
3325 hwc = &counter->hw; 3325 hwc = &counter->hw;
3326 if (hw_event->freq && hw_event->irq_freq) 3326 if (hw_event->freq && hw_event->sample_freq)
3327 hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq); 3327 hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq);
3328 else 3328 else
3329 hwc->irq_period = hw_event->irq_period; 3329 hwc->sample_period = hw_event->sample_period;
3330 3330
3331 /* 3331 /*
3332 * we currently do not support PERF_RECORD_GROUP on inherited counters 3332 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3333 */ 3333 */
3334 if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP)) 3334 if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP))
3335 goto done; 3335 goto done;
3336 3336
3337 if (perf_event_raw(hw_event)) { 3337 if (perf_event_raw(hw_event)) {