aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_counter.c38
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c16
-rw-r--r--include/linux/perf_counter.h34
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--kernel/perf_counter.c116
5 files changed, 104 insertions, 104 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index c9633321e7a5..ea54686cb787 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -262,13 +262,13 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
262 } 262 }
263 counter = ctrs[i]; 263 counter = ctrs[i];
264 if (first) { 264 if (first) {
265 eu = counter->hw_event.exclude_user; 265 eu = counter->attr.exclude_user;
266 ek = counter->hw_event.exclude_kernel; 266 ek = counter->attr.exclude_kernel;
267 eh = counter->hw_event.exclude_hv; 267 eh = counter->attr.exclude_hv;
268 first = 0; 268 first = 0;
269 } else if (counter->hw_event.exclude_user != eu || 269 } else if (counter->attr.exclude_user != eu ||
270 counter->hw_event.exclude_kernel != ek || 270 counter->attr.exclude_kernel != ek ||
271 counter->hw_event.exclude_hv != eh) { 271 counter->attr.exclude_hv != eh) {
272 return -EAGAIN; 272 return -EAGAIN;
273 } 273 }
274 } 274 }
@@ -483,16 +483,16 @@ void hw_perf_enable(void)
483 483
484 /* 484 /*
485 * Add in MMCR0 freeze bits corresponding to the 485 * Add in MMCR0 freeze bits corresponding to the
486 * hw_event.exclude_* bits for the first counter. 486 * attr.exclude_* bits for the first counter.
487 * We have already checked that all counters have the 487 * We have already checked that all counters have the
488 * same values for these bits as the first counter. 488 * same values for these bits as the first counter.
489 */ 489 */
490 counter = cpuhw->counter[0]; 490 counter = cpuhw->counter[0];
491 if (counter->hw_event.exclude_user) 491 if (counter->attr.exclude_user)
492 cpuhw->mmcr[0] |= MMCR0_FCP; 492 cpuhw->mmcr[0] |= MMCR0_FCP;
493 if (counter->hw_event.exclude_kernel) 493 if (counter->attr.exclude_kernel)
494 cpuhw->mmcr[0] |= freeze_counters_kernel; 494 cpuhw->mmcr[0] |= freeze_counters_kernel;
495 if (counter->hw_event.exclude_hv) 495 if (counter->attr.exclude_hv)
496 cpuhw->mmcr[0] |= MMCR0_FCHV; 496 cpuhw->mmcr[0] |= MMCR0_FCHV;
497 497
498 /* 498 /*
@@ -786,10 +786,10 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
786 int n; 786 int n;
787 u64 alt[MAX_EVENT_ALTERNATIVES]; 787 u64 alt[MAX_EVENT_ALTERNATIVES];
788 788
789 if (counter->hw_event.exclude_user 789 if (counter->attr.exclude_user
790 || counter->hw_event.exclude_kernel 790 || counter->attr.exclude_kernel
791 || counter->hw_event.exclude_hv 791 || counter->attr.exclude_hv
792 || counter->hw_event.sample_period) 792 || counter->attr.sample_period)
793 return 0; 793 return 0;
794 794
795 if (ppmu->limited_pmc_event(ev)) 795 if (ppmu->limited_pmc_event(ev))
@@ -855,13 +855,13 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
855 855
856 if (!ppmu) 856 if (!ppmu)
857 return ERR_PTR(-ENXIO); 857 return ERR_PTR(-ENXIO);
858 if (!perf_event_raw(&counter->hw_event)) { 858 if (!perf_event_raw(&counter->attr)) {
859 ev = perf_event_id(&counter->hw_event); 859 ev = perf_event_id(&counter->attr);
860 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 860 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
861 return ERR_PTR(-EOPNOTSUPP); 861 return ERR_PTR(-EOPNOTSUPP);
862 ev = ppmu->generic_events[ev]; 862 ev = ppmu->generic_events[ev];
863 } else { 863 } else {
864 ev = perf_event_config(&counter->hw_event); 864 ev = perf_event_config(&counter->attr);
865 } 865 }
866 counter->hw.config_base = ev; 866 counter->hw.config_base = ev;
867 counter->hw.idx = 0; 867 counter->hw.idx = 0;
@@ -872,7 +872,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
872 * the user set it to. 872 * the user set it to.
873 */ 873 */
874 if (!firmware_has_feature(FW_FEATURE_LPAR)) 874 if (!firmware_has_feature(FW_FEATURE_LPAR))
875 counter->hw_event.exclude_hv = 0; 875 counter->attr.exclude_hv = 0;
876 876
877 /* 877 /*
878 * If this is a per-task counter, then we can use 878 * If this is a per-task counter, then we can use
@@ -990,7 +990,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
990 */ 990 */
991 if (record) { 991 if (record) {
992 addr = 0; 992 addr = 0;
993 if (counter->hw_event.record_type & PERF_RECORD_ADDR) { 993 if (counter->attr.record_type & PERF_RECORD_ADDR) {
994 /* 994 /*
995 * The user wants a data address recorded. 995 * The user wants a data address recorded.
996 * If we're not doing instruction sampling, 996 * If we're not doing instruction sampling,
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 904571bea710..e16e8c13132f 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -247,11 +247,11 @@ static inline int x86_pmu_initialized(void)
247} 247}
248 248
249/* 249/*
250 * Setup the hardware configuration for a given hw_event_type 250 * Setup the hardware configuration for a given attr_type
251 */ 251 */
252static int __hw_perf_counter_init(struct perf_counter *counter) 252static int __hw_perf_counter_init(struct perf_counter *counter)
253{ 253{
254 struct perf_counter_hw_event *hw_event = &counter->hw_event; 254 struct perf_counter_attr *attr = &counter->attr;
255 struct hw_perf_counter *hwc = &counter->hw; 255 struct hw_perf_counter *hwc = &counter->hw;
256 int err; 256 int err;
257 257
@@ -279,9 +279,9 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
279 /* 279 /*
280 * Count user and OS events unless requested not to. 280 * Count user and OS events unless requested not to.
281 */ 281 */
282 if (!hw_event->exclude_user) 282 if (!attr->exclude_user)
283 hwc->config |= ARCH_PERFMON_EVENTSEL_USR; 283 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
284 if (!hw_event->exclude_kernel) 284 if (!attr->exclude_kernel)
285 hwc->config |= ARCH_PERFMON_EVENTSEL_OS; 285 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
286 286
287 if (!hwc->sample_period) 287 if (!hwc->sample_period)
@@ -292,15 +292,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
292 /* 292 /*
293 * Raw event type provide the config in the event structure 293 * Raw event type provide the config in the event structure
294 */ 294 */
295 if (perf_event_raw(hw_event)) { 295 if (perf_event_raw(attr)) {
296 hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event)); 296 hwc->config |= x86_pmu.raw_event(perf_event_config(attr));
297 } else { 297 } else {
298 if (perf_event_id(hw_event) >= x86_pmu.max_events) 298 if (perf_event_id(attr) >= x86_pmu.max_events)
299 return -EINVAL; 299 return -EINVAL;
300 /* 300 /*
301 * The generic map: 301 * The generic map:
302 */ 302 */
303 hwc->config |= x86_pmu.event_map(perf_event_id(hw_event)); 303 hwc->config |= x86_pmu.event_map(perf_event_id(attr));
304 } 304 }
305 305
306 counter->destroy = hw_perf_counter_destroy; 306 counter->destroy = hw_perf_counter_destroy;
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 45bdd3b95d3e..37d5541d74cb 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24/* 24/*
25 * hw_event.type 25 * attr.type
26 */ 26 */
27enum perf_event_types { 27enum perf_event_types {
28 PERF_TYPE_HARDWARE = 0, 28 PERF_TYPE_HARDWARE = 0,
@@ -37,10 +37,10 @@ enum perf_event_types {
37}; 37};
38 38
39/* 39/*
40 * Generalized performance counter event types, used by the hw_event.event_id 40 * Generalized performance counter event types, used by the attr.event_id
41 * parameter of the sys_perf_counter_open() syscall: 41 * parameter of the sys_perf_counter_open() syscall:
42 */ 42 */
43enum hw_event_ids { 43enum attr_ids {
44 /* 44 /*
45 * Common hardware events, generalized by the kernel: 45 * Common hardware events, generalized by the kernel:
46 */ 46 */
@@ -94,7 +94,7 @@ enum sw_event_ids {
94#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) 94#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
95 95
96/* 96/*
97 * Bits that can be set in hw_event.sample_type to request information 97 * Bits that can be set in attr.sample_type to request information
98 * in the overflow packets. 98 * in the overflow packets.
99 */ 99 */
100enum perf_counter_sample_format { 100enum perf_counter_sample_format {
@@ -109,7 +109,7 @@ enum perf_counter_sample_format {
109}; 109};
110 110
111/* 111/*
112 * Bits that can be set in hw_event.read_format to request that 112 * Bits that can be set in attr.read_format to request that
113 * reads on the counter should return the indicated quantities, 113 * reads on the counter should return the indicated quantities,
114 * in increasing order of bit value, after the counter value. 114 * in increasing order of bit value, after the counter value.
115 */ 115 */
@@ -122,7 +122,7 @@ enum perf_counter_read_format {
122/* 122/*
123 * Hardware event to monitor via a performance monitoring counter: 123 * Hardware event to monitor via a performance monitoring counter:
124 */ 124 */
125struct perf_counter_hw_event { 125struct perf_counter_attr {
126 /* 126 /*
127 * The MSB of the config word signifies if the rest contains cpu 127 * The MSB of the config word signifies if the rest contains cpu
128 * specific (raw) counter configuration data, if unset, the next 128 * specific (raw) counter configuration data, if unset, the next
@@ -323,25 +323,25 @@ enum perf_event_type {
323 323
324struct task_struct; 324struct task_struct;
325 325
326static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event) 326static inline u64 perf_event_raw(struct perf_counter_attr *attr)
327{ 327{
328 return hw_event->config & PERF_COUNTER_RAW_MASK; 328 return attr->config & PERF_COUNTER_RAW_MASK;
329} 329}
330 330
331static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event) 331static inline u64 perf_event_config(struct perf_counter_attr *attr)
332{ 332{
333 return hw_event->config & PERF_COUNTER_CONFIG_MASK; 333 return attr->config & PERF_COUNTER_CONFIG_MASK;
334} 334}
335 335
336static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event) 336static inline u64 perf_event_type(struct perf_counter_attr *attr)
337{ 337{
338 return (hw_event->config & PERF_COUNTER_TYPE_MASK) >> 338 return (attr->config & PERF_COUNTER_TYPE_MASK) >>
339 PERF_COUNTER_TYPE_SHIFT; 339 PERF_COUNTER_TYPE_SHIFT;
340} 340}
341 341
342static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event) 342static inline u64 perf_event_id(struct perf_counter_attr *attr)
343{ 343{
344 return hw_event->config & PERF_COUNTER_EVENT_MASK; 344 return attr->config & PERF_COUNTER_EVENT_MASK;
345} 345}
346 346
347/** 347/**
@@ -457,7 +457,7 @@ struct perf_counter {
457 u64 tstamp_running; 457 u64 tstamp_running;
458 u64 tstamp_stopped; 458 u64 tstamp_stopped;
459 459
460 struct perf_counter_hw_event hw_event; 460 struct perf_counter_attr attr;
461 struct hw_perf_counter hw; 461 struct hw_perf_counter hw;
462 462
463 struct perf_counter_context *ctx; 463 struct perf_counter_context *ctx;
@@ -605,8 +605,8 @@ extern int perf_counter_overflow(struct perf_counter *counter,
605 */ 605 */
606static inline int is_software_counter(struct perf_counter *counter) 606static inline int is_software_counter(struct perf_counter *counter)
607{ 607{
608 return !perf_event_raw(&counter->hw_event) && 608 return !perf_event_raw(&counter->attr) &&
609 perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; 609 perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE;
610} 610}
611 611
612extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); 612extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 79faae950e2e..c6c84ad8bd71 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -55,7 +55,7 @@ struct compat_timeval;
55struct robust_list_head; 55struct robust_list_head;
56struct getcpu_cache; 56struct getcpu_cache;
57struct old_linux_dirent; 57struct old_linux_dirent;
58struct perf_counter_hw_event; 58struct perf_counter_attr;
59 59
60#include <linux/types.h> 60#include <linux/types.h>
61#include <linux/aio_abi.h> 61#include <linux/aio_abi.h>
@@ -758,6 +758,6 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
758 758
759 759
760asmlinkage long sys_perf_counter_open( 760asmlinkage long sys_perf_counter_open(
761 const struct perf_counter_hw_event __user *hw_event_uptr, 761 const struct perf_counter_attr __user *attr_uptr,
762 pid_t pid, int cpu, int group_fd, unsigned long flags); 762 pid_t pid, int cpu, int group_fd, unsigned long flags);
763#endif 763#endif
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index abe2f3b6c424..317cef78a388 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -260,7 +260,7 @@ counter_sched_out(struct perf_counter *counter,
260 if (!is_software_counter(counter)) 260 if (!is_software_counter(counter))
261 cpuctx->active_oncpu--; 261 cpuctx->active_oncpu--;
262 ctx->nr_active--; 262 ctx->nr_active--;
263 if (counter->hw_event.exclusive || !cpuctx->active_oncpu) 263 if (counter->attr.exclusive || !cpuctx->active_oncpu)
264 cpuctx->exclusive = 0; 264 cpuctx->exclusive = 0;
265} 265}
266 266
@@ -282,7 +282,7 @@ group_sched_out(struct perf_counter *group_counter,
282 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 282 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
283 counter_sched_out(counter, cpuctx, ctx); 283 counter_sched_out(counter, cpuctx, ctx);
284 284
285 if (group_counter->hw_event.exclusive) 285 if (group_counter->attr.exclusive)
286 cpuctx->exclusive = 0; 286 cpuctx->exclusive = 0;
287} 287}
288 288
@@ -550,7 +550,7 @@ counter_sched_in(struct perf_counter *counter,
550 cpuctx->active_oncpu++; 550 cpuctx->active_oncpu++;
551 ctx->nr_active++; 551 ctx->nr_active++;
552 552
553 if (counter->hw_event.exclusive) 553 if (counter->attr.exclusive)
554 cpuctx->exclusive = 1; 554 cpuctx->exclusive = 1;
555 555
556 return 0; 556 return 0;
@@ -642,7 +642,7 @@ static int group_can_go_on(struct perf_counter *counter,
642 * If this group is exclusive and there are already 642 * If this group is exclusive and there are already
643 * counters on the CPU, it can't go on. 643 * counters on the CPU, it can't go on.
644 */ 644 */
645 if (counter->hw_event.exclusive && cpuctx->active_oncpu) 645 if (counter->attr.exclusive && cpuctx->active_oncpu)
646 return 0; 646 return 0;
647 /* 647 /*
648 * Otherwise, try to add it if all previous groups were able 648 * Otherwise, try to add it if all previous groups were able
@@ -725,7 +725,7 @@ static void __perf_install_in_context(void *info)
725 */ 725 */
726 if (leader != counter) 726 if (leader != counter)
727 group_sched_out(leader, cpuctx, ctx); 727 group_sched_out(leader, cpuctx, ctx);
728 if (leader->hw_event.pinned) { 728 if (leader->attr.pinned) {
729 update_group_times(leader); 729 update_group_times(leader);
730 leader->state = PERF_COUNTER_STATE_ERROR; 730 leader->state = PERF_COUNTER_STATE_ERROR;
731 } 731 }
@@ -849,7 +849,7 @@ static void __perf_counter_enable(void *info)
849 */ 849 */
850 if (leader != counter) 850 if (leader != counter)
851 group_sched_out(leader, cpuctx, ctx); 851 group_sched_out(leader, cpuctx, ctx);
852 if (leader->hw_event.pinned) { 852 if (leader->attr.pinned) {
853 update_group_times(leader); 853 update_group_times(leader);
854 leader->state = PERF_COUNTER_STATE_ERROR; 854 leader->state = PERF_COUNTER_STATE_ERROR;
855 } 855 }
@@ -927,7 +927,7 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh)
927 /* 927 /*
928 * not supported on inherited counters 928 * not supported on inherited counters
929 */ 929 */
930 if (counter->hw_event.inherit) 930 if (counter->attr.inherit)
931 return -EINVAL; 931 return -EINVAL;
932 932
933 atomic_add(refresh, &counter->event_limit); 933 atomic_add(refresh, &counter->event_limit);
@@ -1094,7 +1094,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1094 */ 1094 */
1095 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1095 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1096 if (counter->state <= PERF_COUNTER_STATE_OFF || 1096 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1097 !counter->hw_event.pinned) 1097 !counter->attr.pinned)
1098 continue; 1098 continue;
1099 if (counter->cpu != -1 && counter->cpu != cpu) 1099 if (counter->cpu != -1 && counter->cpu != cpu)
1100 continue; 1100 continue;
@@ -1122,7 +1122,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1122 * ignore pinned counters since we did them already. 1122 * ignore pinned counters since we did them already.
1123 */ 1123 */
1124 if (counter->state <= PERF_COUNTER_STATE_OFF || 1124 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1125 counter->hw_event.pinned) 1125 counter->attr.pinned)
1126 continue; 1126 continue;
1127 1127
1128 /* 1128 /*
@@ -1204,11 +1204,11 @@ static void perf_adjust_freq(struct perf_counter_context *ctx)
1204 interrupts = 2*sysctl_perf_counter_limit/HZ; 1204 interrupts = 2*sysctl_perf_counter_limit/HZ;
1205 } 1205 }
1206 1206
1207 if (!counter->hw_event.freq || !counter->hw_event.sample_freq) 1207 if (!counter->attr.freq || !counter->attr.sample_freq)
1208 continue; 1208 continue;
1209 1209
1210 events = HZ * interrupts * counter->hw.sample_period; 1210 events = HZ * interrupts * counter->hw.sample_period;
1211 period = div64_u64(events, counter->hw_event.sample_freq); 1211 period = div64_u64(events, counter->attr.sample_freq);
1212 1212
1213 delta = (s64)(1 + period - counter->hw.sample_period); 1213 delta = (s64)(1 + period - counter->hw.sample_period);
1214 delta >>= 1; 1214 delta >>= 1;
@@ -1444,11 +1444,11 @@ static void free_counter(struct perf_counter *counter)
1444 perf_pending_sync(counter); 1444 perf_pending_sync(counter);
1445 1445
1446 atomic_dec(&nr_counters); 1446 atomic_dec(&nr_counters);
1447 if (counter->hw_event.mmap) 1447 if (counter->attr.mmap)
1448 atomic_dec(&nr_mmap_tracking); 1448 atomic_dec(&nr_mmap_tracking);
1449 if (counter->hw_event.munmap) 1449 if (counter->attr.munmap)
1450 atomic_dec(&nr_munmap_tracking); 1450 atomic_dec(&nr_munmap_tracking);
1451 if (counter->hw_event.comm) 1451 if (counter->attr.comm)
1452 atomic_dec(&nr_comm_tracking); 1452 atomic_dec(&nr_comm_tracking);
1453 1453
1454 if (counter->destroy) 1454 if (counter->destroy)
@@ -1504,13 +1504,13 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1504 mutex_lock(&counter->child_mutex); 1504 mutex_lock(&counter->child_mutex);
1505 values[0] = perf_counter_read(counter); 1505 values[0] = perf_counter_read(counter);
1506 n = 1; 1506 n = 1;
1507 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1507 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1508 values[n++] = counter->total_time_enabled + 1508 values[n++] = counter->total_time_enabled +
1509 atomic64_read(&counter->child_total_time_enabled); 1509 atomic64_read(&counter->child_total_time_enabled);
1510 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1510 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1511 values[n++] = counter->total_time_running + 1511 values[n++] = counter->total_time_running +
1512 atomic64_read(&counter->child_total_time_running); 1512 atomic64_read(&counter->child_total_time_running);
1513 if (counter->hw_event.read_format & PERF_FORMAT_ID) 1513 if (counter->attr.read_format & PERF_FORMAT_ID)
1514 values[n++] = counter->id; 1514 values[n++] = counter->id;
1515 mutex_unlock(&counter->child_mutex); 1515 mutex_unlock(&counter->child_mutex);
1516 1516
@@ -1611,7 +1611,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1611 int ret = 0; 1611 int ret = 0;
1612 u64 value; 1612 u64 value;
1613 1613
1614 if (!counter->hw_event.sample_period) 1614 if (!counter->attr.sample_period)
1615 return -EINVAL; 1615 return -EINVAL;
1616 1616
1617 size = copy_from_user(&value, arg, sizeof(value)); 1617 size = copy_from_user(&value, arg, sizeof(value));
@@ -1622,15 +1622,15 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1622 return -EINVAL; 1622 return -EINVAL;
1623 1623
1624 spin_lock_irq(&ctx->lock); 1624 spin_lock_irq(&ctx->lock);
1625 if (counter->hw_event.freq) { 1625 if (counter->attr.freq) {
1626 if (value > sysctl_perf_counter_limit) { 1626 if (value > sysctl_perf_counter_limit) {
1627 ret = -EINVAL; 1627 ret = -EINVAL;
1628 goto unlock; 1628 goto unlock;
1629 } 1629 }
1630 1630
1631 counter->hw_event.sample_freq = value; 1631 counter->attr.sample_freq = value;
1632 } else { 1632 } else {
1633 counter->hw_event.sample_period = value; 1633 counter->attr.sample_period = value;
1634 counter->hw.sample_period = value; 1634 counter->hw.sample_period = value;
1635 1635
1636 perf_log_period(counter, value); 1636 perf_log_period(counter, value);
@@ -2299,7 +2299,7 @@ static void perf_output_end(struct perf_output_handle *handle)
2299 struct perf_counter *counter = handle->counter; 2299 struct perf_counter *counter = handle->counter;
2300 struct perf_mmap_data *data = handle->data; 2300 struct perf_mmap_data *data = handle->data;
2301 2301
2302 int wakeup_events = counter->hw_event.wakeup_events; 2302 int wakeup_events = counter->attr.wakeup_events;
2303 2303
2304 if (handle->overflow && wakeup_events) { 2304 if (handle->overflow && wakeup_events) {
2305 int events = atomic_inc_return(&data->events); 2305 int events = atomic_inc_return(&data->events);
@@ -2339,7 +2339,7 @@ static void perf_counter_output(struct perf_counter *counter,
2339 int nmi, struct pt_regs *regs, u64 addr) 2339 int nmi, struct pt_regs *regs, u64 addr)
2340{ 2340{
2341 int ret; 2341 int ret;
2342 u64 sample_type = counter->hw_event.sample_type; 2342 u64 sample_type = counter->attr.sample_type;
2343 struct perf_output_handle handle; 2343 struct perf_output_handle handle;
2344 struct perf_event_header header; 2344 struct perf_event_header header;
2345 u64 ip; 2345 u64 ip;
@@ -2441,7 +2441,7 @@ static void perf_counter_output(struct perf_counter *counter,
2441 perf_output_put(&handle, addr); 2441 perf_output_put(&handle, addr);
2442 2442
2443 if (sample_type & PERF_SAMPLE_CONFIG) 2443 if (sample_type & PERF_SAMPLE_CONFIG)
2444 perf_output_put(&handle, counter->hw_event.config); 2444 perf_output_put(&handle, counter->attr.config);
2445 2445
2446 if (sample_type & PERF_SAMPLE_CPU) 2446 if (sample_type & PERF_SAMPLE_CPU)
2447 perf_output_put(&handle, cpu_entry); 2447 perf_output_put(&handle, cpu_entry);
@@ -2512,7 +2512,7 @@ static void perf_counter_comm_output(struct perf_counter *counter,
2512static int perf_counter_comm_match(struct perf_counter *counter, 2512static int perf_counter_comm_match(struct perf_counter *counter,
2513 struct perf_comm_event *comm_event) 2513 struct perf_comm_event *comm_event)
2514{ 2514{
2515 if (counter->hw_event.comm && 2515 if (counter->attr.comm &&
2516 comm_event->event.header.type == PERF_EVENT_COMM) 2516 comm_event->event.header.type == PERF_EVENT_COMM)
2517 return 1; 2517 return 1;
2518 2518
@@ -2623,11 +2623,11 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
2623static int perf_counter_mmap_match(struct perf_counter *counter, 2623static int perf_counter_mmap_match(struct perf_counter *counter,
2624 struct perf_mmap_event *mmap_event) 2624 struct perf_mmap_event *mmap_event)
2625{ 2625{
2626 if (counter->hw_event.mmap && 2626 if (counter->attr.mmap &&
2627 mmap_event->event.header.type == PERF_EVENT_MMAP) 2627 mmap_event->event.header.type == PERF_EVENT_MMAP)
2628 return 1; 2628 return 1;
2629 2629
2630 if (counter->hw_event.munmap && 2630 if (counter->attr.munmap &&
2631 mmap_event->event.header.type == PERF_EVENT_MUNMAP) 2631 mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2632 return 1; 2632 return 1;
2633 2633
@@ -2907,8 +2907,8 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2907 * In case we exclude kernel IPs or are somehow not in interrupt 2907 * In case we exclude kernel IPs or are somehow not in interrupt
2908 * context, provide the next best thing, the user IP. 2908 * context, provide the next best thing, the user IP.
2909 */ 2909 */
2910 if ((counter->hw_event.exclude_kernel || !regs) && 2910 if ((counter->attr.exclude_kernel || !regs) &&
2911 !counter->hw_event.exclude_user) 2911 !counter->attr.exclude_user)
2912 regs = task_pt_regs(current); 2912 regs = task_pt_regs(current);
2913 2913
2914 if (regs) { 2914 if (regs) {
@@ -2982,14 +2982,14 @@ static int perf_swcounter_match(struct perf_counter *counter,
2982 if (!perf_swcounter_is_counting(counter)) 2982 if (!perf_swcounter_is_counting(counter))
2983 return 0; 2983 return 0;
2984 2984
2985 if (counter->hw_event.config != event_config) 2985 if (counter->attr.config != event_config)
2986 return 0; 2986 return 0;
2987 2987
2988 if (regs) { 2988 if (regs) {
2989 if (counter->hw_event.exclude_user && user_mode(regs)) 2989 if (counter->attr.exclude_user && user_mode(regs))
2990 return 0; 2990 return 0;
2991 2991
2992 if (counter->hw_event.exclude_kernel && !user_mode(regs)) 2992 if (counter->attr.exclude_kernel && !user_mode(regs))
2993 return 0; 2993 return 0;
2994 } 2994 }
2995 2995
@@ -3252,12 +3252,12 @@ extern void ftrace_profile_disable(int);
3252 3252
3253static void tp_perf_counter_destroy(struct perf_counter *counter) 3253static void tp_perf_counter_destroy(struct perf_counter *counter)
3254{ 3254{
3255 ftrace_profile_disable(perf_event_id(&counter->hw_event)); 3255 ftrace_profile_disable(perf_event_id(&counter->attr));
3256} 3256}
3257 3257
3258static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3258static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3259{ 3259{
3260 int event_id = perf_event_id(&counter->hw_event); 3260 int event_id = perf_event_id(&counter->attr);
3261 int ret; 3261 int ret;
3262 3262
3263 ret = ftrace_profile_enable(event_id); 3263 ret = ftrace_profile_enable(event_id);
@@ -3265,7 +3265,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3265 return NULL; 3265 return NULL;
3266 3266
3267 counter->destroy = tp_perf_counter_destroy; 3267 counter->destroy = tp_perf_counter_destroy;
3268 counter->hw.sample_period = counter->hw_event.sample_period; 3268 counter->hw.sample_period = counter->attr.sample_period;
3269 3269
3270 return &perf_ops_generic; 3270 return &perf_ops_generic;
3271} 3271}
@@ -3287,7 +3287,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3287 * to be kernel events, and page faults are never hypervisor 3287 * to be kernel events, and page faults are never hypervisor
3288 * events. 3288 * events.
3289 */ 3289 */
3290 switch (perf_event_id(&counter->hw_event)) { 3290 switch (perf_event_id(&counter->attr)) {
3291 case PERF_COUNT_CPU_CLOCK: 3291 case PERF_COUNT_CPU_CLOCK:
3292 pmu = &perf_ops_cpu_clock; 3292 pmu = &perf_ops_cpu_clock;
3293 3293
@@ -3319,7 +3319,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3319 * Allocate and initialize a counter structure 3319 * Allocate and initialize a counter structure
3320 */ 3320 */
3321static struct perf_counter * 3321static struct perf_counter *
3322perf_counter_alloc(struct perf_counter_hw_event *hw_event, 3322perf_counter_alloc(struct perf_counter_attr *attr,
3323 int cpu, 3323 int cpu,
3324 struct perf_counter_context *ctx, 3324 struct perf_counter_context *ctx,
3325 struct perf_counter *group_leader, 3325 struct perf_counter *group_leader,
@@ -3352,36 +3352,36 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
3352 mutex_init(&counter->mmap_mutex); 3352 mutex_init(&counter->mmap_mutex);
3353 3353
3354 counter->cpu = cpu; 3354 counter->cpu = cpu;
3355 counter->hw_event = *hw_event; 3355 counter->attr = *attr;
3356 counter->group_leader = group_leader; 3356 counter->group_leader = group_leader;
3357 counter->pmu = NULL; 3357 counter->pmu = NULL;
3358 counter->ctx = ctx; 3358 counter->ctx = ctx;
3359 counter->oncpu = -1; 3359 counter->oncpu = -1;
3360 3360
3361 counter->state = PERF_COUNTER_STATE_INACTIVE; 3361 counter->state = PERF_COUNTER_STATE_INACTIVE;
3362 if (hw_event->disabled) 3362 if (attr->disabled)
3363 counter->state = PERF_COUNTER_STATE_OFF; 3363 counter->state = PERF_COUNTER_STATE_OFF;
3364 3364
3365 pmu = NULL; 3365 pmu = NULL;
3366 3366
3367 hwc = &counter->hw; 3367 hwc = &counter->hw;
3368 if (hw_event->freq && hw_event->sample_freq) 3368 if (attr->freq && attr->sample_freq)
3369 hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq); 3369 hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3370 else 3370 else
3371 hwc->sample_period = hw_event->sample_period; 3371 hwc->sample_period = attr->sample_period;
3372 3372
3373 /* 3373 /*
3374 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 3374 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3375 */ 3375 */
3376 if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP)) 3376 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3377 goto done; 3377 goto done;
3378 3378
3379 if (perf_event_raw(hw_event)) { 3379 if (perf_event_raw(attr)) {
3380 pmu = hw_perf_counter_init(counter); 3380 pmu = hw_perf_counter_init(counter);
3381 goto done; 3381 goto done;
3382 } 3382 }
3383 3383
3384 switch (perf_event_type(hw_event)) { 3384 switch (perf_event_type(attr)) {
3385 case PERF_TYPE_HARDWARE: 3385 case PERF_TYPE_HARDWARE:
3386 pmu = hw_perf_counter_init(counter); 3386 pmu = hw_perf_counter_init(counter);
3387 break; 3387 break;
@@ -3409,11 +3409,11 @@ done:
3409 counter->pmu = pmu; 3409 counter->pmu = pmu;
3410 3410
3411 atomic_inc(&nr_counters); 3411 atomic_inc(&nr_counters);
3412 if (counter->hw_event.mmap) 3412 if (counter->attr.mmap)
3413 atomic_inc(&nr_mmap_tracking); 3413 atomic_inc(&nr_mmap_tracking);
3414 if (counter->hw_event.munmap) 3414 if (counter->attr.munmap)
3415 atomic_inc(&nr_munmap_tracking); 3415 atomic_inc(&nr_munmap_tracking);
3416 if (counter->hw_event.comm) 3416 if (counter->attr.comm)
3417 atomic_inc(&nr_comm_tracking); 3417 atomic_inc(&nr_comm_tracking);
3418 3418
3419 return counter; 3419 return counter;
@@ -3424,17 +3424,17 @@ static atomic64_t perf_counter_id;
3424/** 3424/**
3425 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 3425 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3426 * 3426 *
3427 * @hw_event_uptr: event type attributes for monitoring/sampling 3427 * @attr_uptr: event type attributes for monitoring/sampling
3428 * @pid: target pid 3428 * @pid: target pid
3429 * @cpu: target cpu 3429 * @cpu: target cpu
3430 * @group_fd: group leader counter fd 3430 * @group_fd: group leader counter fd
3431 */ 3431 */
3432SYSCALL_DEFINE5(perf_counter_open, 3432SYSCALL_DEFINE5(perf_counter_open,
3433 const struct perf_counter_hw_event __user *, hw_event_uptr, 3433 const struct perf_counter_attr __user *, attr_uptr,
3434 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 3434 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3435{ 3435{
3436 struct perf_counter *counter, *group_leader; 3436 struct perf_counter *counter, *group_leader;
3437 struct perf_counter_hw_event hw_event; 3437 struct perf_counter_attr attr;
3438 struct perf_counter_context *ctx; 3438 struct perf_counter_context *ctx;
3439 struct file *counter_file = NULL; 3439 struct file *counter_file = NULL;
3440 struct file *group_file = NULL; 3440 struct file *group_file = NULL;
@@ -3446,7 +3446,7 @@ SYSCALL_DEFINE5(perf_counter_open,
3446 if (flags) 3446 if (flags)
3447 return -EINVAL; 3447 return -EINVAL;
3448 3448
3449 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) 3449 if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3450 return -EFAULT; 3450 return -EFAULT;
3451 3451
3452 /* 3452 /*
@@ -3484,11 +3484,11 @@ SYSCALL_DEFINE5(perf_counter_open,
3484 /* 3484 /*
3485 * Only a group leader can be exclusive or pinned 3485 * Only a group leader can be exclusive or pinned
3486 */ 3486 */
3487 if (hw_event.exclusive || hw_event.pinned) 3487 if (attr.exclusive || attr.pinned)
3488 goto err_put_context; 3488 goto err_put_context;
3489 } 3489 }
3490 3490
3491 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, 3491 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3492 GFP_KERNEL); 3492 GFP_KERNEL);
3493 ret = PTR_ERR(counter); 3493 ret = PTR_ERR(counter);
3494 if (IS_ERR(counter)) 3494 if (IS_ERR(counter))
@@ -3556,7 +3556,7 @@ inherit_counter(struct perf_counter *parent_counter,
3556 if (parent_counter->parent) 3556 if (parent_counter->parent)
3557 parent_counter = parent_counter->parent; 3557 parent_counter = parent_counter->parent;
3558 3558
3559 child_counter = perf_counter_alloc(&parent_counter->hw_event, 3559 child_counter = perf_counter_alloc(&parent_counter->attr,
3560 parent_counter->cpu, child_ctx, 3560 parent_counter->cpu, child_ctx,
3561 group_leader, GFP_KERNEL); 3561 group_leader, GFP_KERNEL);
3562 if (IS_ERR(child_counter)) 3562 if (IS_ERR(child_counter))
@@ -3565,7 +3565,7 @@ inherit_counter(struct perf_counter *parent_counter,
3565 3565
3566 /* 3566 /*
3567 * Make the child state follow the state of the parent counter, 3567 * Make the child state follow the state of the parent counter,
3568 * not its hw_event.disabled bit. We hold the parent's mutex, 3568 * not its attr.disabled bit. We hold the parent's mutex,
3569 * so we won't race with perf_counter_{en, dis}able_family. 3569 * so we won't race with perf_counter_{en, dis}able_family.
3570 */ 3570 */
3571 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) 3571 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
@@ -3582,7 +3582,7 @@ inherit_counter(struct perf_counter *parent_counter,
3582 /* 3582 /*
3583 * inherit into child's child as well: 3583 * inherit into child's child as well:
3584 */ 3584 */
3585 child_counter->hw_event.inherit = 1; 3585 child_counter->attr.inherit = 1;
3586 3586
3587 /* 3587 /*
3588 * Get a reference to the parent filp - we will fput it 3588 * Get a reference to the parent filp - we will fput it
@@ -3838,7 +3838,7 @@ int perf_counter_init_task(struct task_struct *child)
3838 if (counter != counter->group_leader) 3838 if (counter != counter->group_leader)
3839 continue; 3839 continue;
3840 3840
3841 if (!counter->hw_event.inherit) { 3841 if (!counter->attr.inherit) {
3842 inherited_all = 0; 3842 inherited_all = 0;
3843 continue; 3843 continue;
3844 } 3844 }