diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-02 13:22:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-02 15:45:33 -0400 |
commit | 0d48696f87e3618b0d35bd3e4e9d7c188d51e7de (patch) | |
tree | 633d37089c368c0cc2c4f8120014d57df215bb53 /kernel | |
parent | 08247e31ca79b8f02cce47b7e8120797a8726606 (diff) |
perf_counter: Rename perf_counter_hw_event => perf_counter_attr
The structure isn't hw only and when I read event, I think about those
things that fall out the other end. Rename the thing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
Cc: Stephane Eranian <eranian@googlemail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 116 |
1 files changed, 58 insertions, 58 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index abe2f3b6c424..317cef78a388 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -260,7 +260,7 @@ counter_sched_out(struct perf_counter *counter, | |||
260 | if (!is_software_counter(counter)) | 260 | if (!is_software_counter(counter)) |
261 | cpuctx->active_oncpu--; | 261 | cpuctx->active_oncpu--; |
262 | ctx->nr_active--; | 262 | ctx->nr_active--; |
263 | if (counter->hw_event.exclusive || !cpuctx->active_oncpu) | 263 | if (counter->attr.exclusive || !cpuctx->active_oncpu) |
264 | cpuctx->exclusive = 0; | 264 | cpuctx->exclusive = 0; |
265 | } | 265 | } |
266 | 266 | ||
@@ -282,7 +282,7 @@ group_sched_out(struct perf_counter *group_counter, | |||
282 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) | 282 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) |
283 | counter_sched_out(counter, cpuctx, ctx); | 283 | counter_sched_out(counter, cpuctx, ctx); |
284 | 284 | ||
285 | if (group_counter->hw_event.exclusive) | 285 | if (group_counter->attr.exclusive) |
286 | cpuctx->exclusive = 0; | 286 | cpuctx->exclusive = 0; |
287 | } | 287 | } |
288 | 288 | ||
@@ -550,7 +550,7 @@ counter_sched_in(struct perf_counter *counter, | |||
550 | cpuctx->active_oncpu++; | 550 | cpuctx->active_oncpu++; |
551 | ctx->nr_active++; | 551 | ctx->nr_active++; |
552 | 552 | ||
553 | if (counter->hw_event.exclusive) | 553 | if (counter->attr.exclusive) |
554 | cpuctx->exclusive = 1; | 554 | cpuctx->exclusive = 1; |
555 | 555 | ||
556 | return 0; | 556 | return 0; |
@@ -642,7 +642,7 @@ static int group_can_go_on(struct perf_counter *counter, | |||
642 | * If this group is exclusive and there are already | 642 | * If this group is exclusive and there are already |
643 | * counters on the CPU, it can't go on. | 643 | * counters on the CPU, it can't go on. |
644 | */ | 644 | */ |
645 | if (counter->hw_event.exclusive && cpuctx->active_oncpu) | 645 | if (counter->attr.exclusive && cpuctx->active_oncpu) |
646 | return 0; | 646 | return 0; |
647 | /* | 647 | /* |
648 | * Otherwise, try to add it if all previous groups were able | 648 | * Otherwise, try to add it if all previous groups were able |
@@ -725,7 +725,7 @@ static void __perf_install_in_context(void *info) | |||
725 | */ | 725 | */ |
726 | if (leader != counter) | 726 | if (leader != counter) |
727 | group_sched_out(leader, cpuctx, ctx); | 727 | group_sched_out(leader, cpuctx, ctx); |
728 | if (leader->hw_event.pinned) { | 728 | if (leader->attr.pinned) { |
729 | update_group_times(leader); | 729 | update_group_times(leader); |
730 | leader->state = PERF_COUNTER_STATE_ERROR; | 730 | leader->state = PERF_COUNTER_STATE_ERROR; |
731 | } | 731 | } |
@@ -849,7 +849,7 @@ static void __perf_counter_enable(void *info) | |||
849 | */ | 849 | */ |
850 | if (leader != counter) | 850 | if (leader != counter) |
851 | group_sched_out(leader, cpuctx, ctx); | 851 | group_sched_out(leader, cpuctx, ctx); |
852 | if (leader->hw_event.pinned) { | 852 | if (leader->attr.pinned) { |
853 | update_group_times(leader); | 853 | update_group_times(leader); |
854 | leader->state = PERF_COUNTER_STATE_ERROR; | 854 | leader->state = PERF_COUNTER_STATE_ERROR; |
855 | } | 855 | } |
@@ -927,7 +927,7 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh) | |||
927 | /* | 927 | /* |
928 | * not supported on inherited counters | 928 | * not supported on inherited counters |
929 | */ | 929 | */ |
930 | if (counter->hw_event.inherit) | 930 | if (counter->attr.inherit) |
931 | return -EINVAL; | 931 | return -EINVAL; |
932 | 932 | ||
933 | atomic_add(refresh, &counter->event_limit); | 933 | atomic_add(refresh, &counter->event_limit); |
@@ -1094,7 +1094,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, | |||
1094 | */ | 1094 | */ |
1095 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 1095 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { |
1096 | if (counter->state <= PERF_COUNTER_STATE_OFF || | 1096 | if (counter->state <= PERF_COUNTER_STATE_OFF || |
1097 | !counter->hw_event.pinned) | 1097 | !counter->attr.pinned) |
1098 | continue; | 1098 | continue; |
1099 | if (counter->cpu != -1 && counter->cpu != cpu) | 1099 | if (counter->cpu != -1 && counter->cpu != cpu) |
1100 | continue; | 1100 | continue; |
@@ -1122,7 +1122,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, | |||
1122 | * ignore pinned counters since we did them already. | 1122 | * ignore pinned counters since we did them already. |
1123 | */ | 1123 | */ |
1124 | if (counter->state <= PERF_COUNTER_STATE_OFF || | 1124 | if (counter->state <= PERF_COUNTER_STATE_OFF || |
1125 | counter->hw_event.pinned) | 1125 | counter->attr.pinned) |
1126 | continue; | 1126 | continue; |
1127 | 1127 | ||
1128 | /* | 1128 | /* |
@@ -1204,11 +1204,11 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) | |||
1204 | interrupts = 2*sysctl_perf_counter_limit/HZ; | 1204 | interrupts = 2*sysctl_perf_counter_limit/HZ; |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | if (!counter->hw_event.freq || !counter->hw_event.sample_freq) | 1207 | if (!counter->attr.freq || !counter->attr.sample_freq) |
1208 | continue; | 1208 | continue; |
1209 | 1209 | ||
1210 | events = HZ * interrupts * counter->hw.sample_period; | 1210 | events = HZ * interrupts * counter->hw.sample_period; |
1211 | period = div64_u64(events, counter->hw_event.sample_freq); | 1211 | period = div64_u64(events, counter->attr.sample_freq); |
1212 | 1212 | ||
1213 | delta = (s64)(1 + period - counter->hw.sample_period); | 1213 | delta = (s64)(1 + period - counter->hw.sample_period); |
1214 | delta >>= 1; | 1214 | delta >>= 1; |
@@ -1444,11 +1444,11 @@ static void free_counter(struct perf_counter *counter) | |||
1444 | perf_pending_sync(counter); | 1444 | perf_pending_sync(counter); |
1445 | 1445 | ||
1446 | atomic_dec(&nr_counters); | 1446 | atomic_dec(&nr_counters); |
1447 | if (counter->hw_event.mmap) | 1447 | if (counter->attr.mmap) |
1448 | atomic_dec(&nr_mmap_tracking); | 1448 | atomic_dec(&nr_mmap_tracking); |
1449 | if (counter->hw_event.munmap) | 1449 | if (counter->attr.munmap) |
1450 | atomic_dec(&nr_munmap_tracking); | 1450 | atomic_dec(&nr_munmap_tracking); |
1451 | if (counter->hw_event.comm) | 1451 | if (counter->attr.comm) |
1452 | atomic_dec(&nr_comm_tracking); | 1452 | atomic_dec(&nr_comm_tracking); |
1453 | 1453 | ||
1454 | if (counter->destroy) | 1454 | if (counter->destroy) |
@@ -1504,13 +1504,13 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1504 | mutex_lock(&counter->child_mutex); | 1504 | mutex_lock(&counter->child_mutex); |
1505 | values[0] = perf_counter_read(counter); | 1505 | values[0] = perf_counter_read(counter); |
1506 | n = 1; | 1506 | n = 1; |
1507 | if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1507 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
1508 | values[n++] = counter->total_time_enabled + | 1508 | values[n++] = counter->total_time_enabled + |
1509 | atomic64_read(&counter->child_total_time_enabled); | 1509 | atomic64_read(&counter->child_total_time_enabled); |
1510 | if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 1510 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
1511 | values[n++] = counter->total_time_running + | 1511 | values[n++] = counter->total_time_running + |
1512 | atomic64_read(&counter->child_total_time_running); | 1512 | atomic64_read(&counter->child_total_time_running); |
1513 | if (counter->hw_event.read_format & PERF_FORMAT_ID) | 1513 | if (counter->attr.read_format & PERF_FORMAT_ID) |
1514 | values[n++] = counter->id; | 1514 | values[n++] = counter->id; |
1515 | mutex_unlock(&counter->child_mutex); | 1515 | mutex_unlock(&counter->child_mutex); |
1516 | 1516 | ||
@@ -1611,7 +1611,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1611 | int ret = 0; | 1611 | int ret = 0; |
1612 | u64 value; | 1612 | u64 value; |
1613 | 1613 | ||
1614 | if (!counter->hw_event.sample_period) | 1614 | if (!counter->attr.sample_period) |
1615 | return -EINVAL; | 1615 | return -EINVAL; |
1616 | 1616 | ||
1617 | size = copy_from_user(&value, arg, sizeof(value)); | 1617 | size = copy_from_user(&value, arg, sizeof(value)); |
@@ -1622,15 +1622,15 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | |||
1622 | return -EINVAL; | 1622 | return -EINVAL; |
1623 | 1623 | ||
1624 | spin_lock_irq(&ctx->lock); | 1624 | spin_lock_irq(&ctx->lock); |
1625 | if (counter->hw_event.freq) { | 1625 | if (counter->attr.freq) { |
1626 | if (value > sysctl_perf_counter_limit) { | 1626 | if (value > sysctl_perf_counter_limit) { |
1627 | ret = -EINVAL; | 1627 | ret = -EINVAL; |
1628 | goto unlock; | 1628 | goto unlock; |
1629 | } | 1629 | } |
1630 | 1630 | ||
1631 | counter->hw_event.sample_freq = value; | 1631 | counter->attr.sample_freq = value; |
1632 | } else { | 1632 | } else { |
1633 | counter->hw_event.sample_period = value; | 1633 | counter->attr.sample_period = value; |
1634 | counter->hw.sample_period = value; | 1634 | counter->hw.sample_period = value; |
1635 | 1635 | ||
1636 | perf_log_period(counter, value); | 1636 | perf_log_period(counter, value); |
@@ -2299,7 +2299,7 @@ static void perf_output_end(struct perf_output_handle *handle) | |||
2299 | struct perf_counter *counter = handle->counter; | 2299 | struct perf_counter *counter = handle->counter; |
2300 | struct perf_mmap_data *data = handle->data; | 2300 | struct perf_mmap_data *data = handle->data; |
2301 | 2301 | ||
2302 | int wakeup_events = counter->hw_event.wakeup_events; | 2302 | int wakeup_events = counter->attr.wakeup_events; |
2303 | 2303 | ||
2304 | if (handle->overflow && wakeup_events) { | 2304 | if (handle->overflow && wakeup_events) { |
2305 | int events = atomic_inc_return(&data->events); | 2305 | int events = atomic_inc_return(&data->events); |
@@ -2339,7 +2339,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2339 | int nmi, struct pt_regs *regs, u64 addr) | 2339 | int nmi, struct pt_regs *regs, u64 addr) |
2340 | { | 2340 | { |
2341 | int ret; | 2341 | int ret; |
2342 | u64 sample_type = counter->hw_event.sample_type; | 2342 | u64 sample_type = counter->attr.sample_type; |
2343 | struct perf_output_handle handle; | 2343 | struct perf_output_handle handle; |
2344 | struct perf_event_header header; | 2344 | struct perf_event_header header; |
2345 | u64 ip; | 2345 | u64 ip; |
@@ -2441,7 +2441,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2441 | perf_output_put(&handle, addr); | 2441 | perf_output_put(&handle, addr); |
2442 | 2442 | ||
2443 | if (sample_type & PERF_SAMPLE_CONFIG) | 2443 | if (sample_type & PERF_SAMPLE_CONFIG) |
2444 | perf_output_put(&handle, counter->hw_event.config); | 2444 | perf_output_put(&handle, counter->attr.config); |
2445 | 2445 | ||
2446 | if (sample_type & PERF_SAMPLE_CPU) | 2446 | if (sample_type & PERF_SAMPLE_CPU) |
2447 | perf_output_put(&handle, cpu_entry); | 2447 | perf_output_put(&handle, cpu_entry); |
@@ -2512,7 +2512,7 @@ static void perf_counter_comm_output(struct perf_counter *counter, | |||
2512 | static int perf_counter_comm_match(struct perf_counter *counter, | 2512 | static int perf_counter_comm_match(struct perf_counter *counter, |
2513 | struct perf_comm_event *comm_event) | 2513 | struct perf_comm_event *comm_event) |
2514 | { | 2514 | { |
2515 | if (counter->hw_event.comm && | 2515 | if (counter->attr.comm && |
2516 | comm_event->event.header.type == PERF_EVENT_COMM) | 2516 | comm_event->event.header.type == PERF_EVENT_COMM) |
2517 | return 1; | 2517 | return 1; |
2518 | 2518 | ||
@@ -2623,11 +2623,11 @@ static void perf_counter_mmap_output(struct perf_counter *counter, | |||
2623 | static int perf_counter_mmap_match(struct perf_counter *counter, | 2623 | static int perf_counter_mmap_match(struct perf_counter *counter, |
2624 | struct perf_mmap_event *mmap_event) | 2624 | struct perf_mmap_event *mmap_event) |
2625 | { | 2625 | { |
2626 | if (counter->hw_event.mmap && | 2626 | if (counter->attr.mmap && |
2627 | mmap_event->event.header.type == PERF_EVENT_MMAP) | 2627 | mmap_event->event.header.type == PERF_EVENT_MMAP) |
2628 | return 1; | 2628 | return 1; |
2629 | 2629 | ||
2630 | if (counter->hw_event.munmap && | 2630 | if (counter->attr.munmap && |
2631 | mmap_event->event.header.type == PERF_EVENT_MUNMAP) | 2631 | mmap_event->event.header.type == PERF_EVENT_MUNMAP) |
2632 | return 1; | 2632 | return 1; |
2633 | 2633 | ||
@@ -2907,8 +2907,8 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
2907 | * In case we exclude kernel IPs or are somehow not in interrupt | 2907 | * In case we exclude kernel IPs or are somehow not in interrupt |
2908 | * context, provide the next best thing, the user IP. | 2908 | * context, provide the next best thing, the user IP. |
2909 | */ | 2909 | */ |
2910 | if ((counter->hw_event.exclude_kernel || !regs) && | 2910 | if ((counter->attr.exclude_kernel || !regs) && |
2911 | !counter->hw_event.exclude_user) | 2911 | !counter->attr.exclude_user) |
2912 | regs = task_pt_regs(current); | 2912 | regs = task_pt_regs(current); |
2913 | 2913 | ||
2914 | if (regs) { | 2914 | if (regs) { |
@@ -2982,14 +2982,14 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
2982 | if (!perf_swcounter_is_counting(counter)) | 2982 | if (!perf_swcounter_is_counting(counter)) |
2983 | return 0; | 2983 | return 0; |
2984 | 2984 | ||
2985 | if (counter->hw_event.config != event_config) | 2985 | if (counter->attr.config != event_config) |
2986 | return 0; | 2986 | return 0; |
2987 | 2987 | ||
2988 | if (regs) { | 2988 | if (regs) { |
2989 | if (counter->hw_event.exclude_user && user_mode(regs)) | 2989 | if (counter->attr.exclude_user && user_mode(regs)) |
2990 | return 0; | 2990 | return 0; |
2991 | 2991 | ||
2992 | if (counter->hw_event.exclude_kernel && !user_mode(regs)) | 2992 | if (counter->attr.exclude_kernel && !user_mode(regs)) |
2993 | return 0; | 2993 | return 0; |
2994 | } | 2994 | } |
2995 | 2995 | ||
@@ -3252,12 +3252,12 @@ extern void ftrace_profile_disable(int); | |||
3252 | 3252 | ||
3253 | static void tp_perf_counter_destroy(struct perf_counter *counter) | 3253 | static void tp_perf_counter_destroy(struct perf_counter *counter) |
3254 | { | 3254 | { |
3255 | ftrace_profile_disable(perf_event_id(&counter->hw_event)); | 3255 | ftrace_profile_disable(perf_event_id(&counter->attr)); |
3256 | } | 3256 | } |
3257 | 3257 | ||
3258 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3258 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
3259 | { | 3259 | { |
3260 | int event_id = perf_event_id(&counter->hw_event); | 3260 | int event_id = perf_event_id(&counter->attr); |
3261 | int ret; | 3261 | int ret; |
3262 | 3262 | ||
3263 | ret = ftrace_profile_enable(event_id); | 3263 | ret = ftrace_profile_enable(event_id); |
@@ -3265,7 +3265,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | |||
3265 | return NULL; | 3265 | return NULL; |
3266 | 3266 | ||
3267 | counter->destroy = tp_perf_counter_destroy; | 3267 | counter->destroy = tp_perf_counter_destroy; |
3268 | counter->hw.sample_period = counter->hw_event.sample_period; | 3268 | counter->hw.sample_period = counter->attr.sample_period; |
3269 | 3269 | ||
3270 | return &perf_ops_generic; | 3270 | return &perf_ops_generic; |
3271 | } | 3271 | } |
@@ -3287,7 +3287,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
3287 | * to be kernel events, and page faults are never hypervisor | 3287 | * to be kernel events, and page faults are never hypervisor |
3288 | * events. | 3288 | * events. |
3289 | */ | 3289 | */ |
3290 | switch (perf_event_id(&counter->hw_event)) { | 3290 | switch (perf_event_id(&counter->attr)) { |
3291 | case PERF_COUNT_CPU_CLOCK: | 3291 | case PERF_COUNT_CPU_CLOCK: |
3292 | pmu = &perf_ops_cpu_clock; | 3292 | pmu = &perf_ops_cpu_clock; |
3293 | 3293 | ||
@@ -3319,7 +3319,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
3319 | * Allocate and initialize a counter structure | 3319 | * Allocate and initialize a counter structure |
3320 | */ | 3320 | */ |
3321 | static struct perf_counter * | 3321 | static struct perf_counter * |
3322 | perf_counter_alloc(struct perf_counter_hw_event *hw_event, | 3322 | perf_counter_alloc(struct perf_counter_attr *attr, |
3323 | int cpu, | 3323 | int cpu, |
3324 | struct perf_counter_context *ctx, | 3324 | struct perf_counter_context *ctx, |
3325 | struct perf_counter *group_leader, | 3325 | struct perf_counter *group_leader, |
@@ -3352,36 +3352,36 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
3352 | mutex_init(&counter->mmap_mutex); | 3352 | mutex_init(&counter->mmap_mutex); |
3353 | 3353 | ||
3354 | counter->cpu = cpu; | 3354 | counter->cpu = cpu; |
3355 | counter->hw_event = *hw_event; | 3355 | counter->attr = *attr; |
3356 | counter->group_leader = group_leader; | 3356 | counter->group_leader = group_leader; |
3357 | counter->pmu = NULL; | 3357 | counter->pmu = NULL; |
3358 | counter->ctx = ctx; | 3358 | counter->ctx = ctx; |
3359 | counter->oncpu = -1; | 3359 | counter->oncpu = -1; |
3360 | 3360 | ||
3361 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 3361 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
3362 | if (hw_event->disabled) | 3362 | if (attr->disabled) |
3363 | counter->state = PERF_COUNTER_STATE_OFF; | 3363 | counter->state = PERF_COUNTER_STATE_OFF; |
3364 | 3364 | ||
3365 | pmu = NULL; | 3365 | pmu = NULL; |
3366 | 3366 | ||
3367 | hwc = &counter->hw; | 3367 | hwc = &counter->hw; |
3368 | if (hw_event->freq && hw_event->sample_freq) | 3368 | if (attr->freq && attr->sample_freq) |
3369 | hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq); | 3369 | hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq); |
3370 | else | 3370 | else |
3371 | hwc->sample_period = hw_event->sample_period; | 3371 | hwc->sample_period = attr->sample_period; |
3372 | 3372 | ||
3373 | /* | 3373 | /* |
3374 | * we currently do not support PERF_SAMPLE_GROUP on inherited counters | 3374 | * we currently do not support PERF_SAMPLE_GROUP on inherited counters |
3375 | */ | 3375 | */ |
3376 | if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP)) | 3376 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) |
3377 | goto done; | 3377 | goto done; |
3378 | 3378 | ||
3379 | if (perf_event_raw(hw_event)) { | 3379 | if (perf_event_raw(attr)) { |
3380 | pmu = hw_perf_counter_init(counter); | 3380 | pmu = hw_perf_counter_init(counter); |
3381 | goto done; | 3381 | goto done; |
3382 | } | 3382 | } |
3383 | 3383 | ||
3384 | switch (perf_event_type(hw_event)) { | 3384 | switch (perf_event_type(attr)) { |
3385 | case PERF_TYPE_HARDWARE: | 3385 | case PERF_TYPE_HARDWARE: |
3386 | pmu = hw_perf_counter_init(counter); | 3386 | pmu = hw_perf_counter_init(counter); |
3387 | break; | 3387 | break; |
@@ -3409,11 +3409,11 @@ done: | |||
3409 | counter->pmu = pmu; | 3409 | counter->pmu = pmu; |
3410 | 3410 | ||
3411 | atomic_inc(&nr_counters); | 3411 | atomic_inc(&nr_counters); |
3412 | if (counter->hw_event.mmap) | 3412 | if (counter->attr.mmap) |
3413 | atomic_inc(&nr_mmap_tracking); | 3413 | atomic_inc(&nr_mmap_tracking); |
3414 | if (counter->hw_event.munmap) | 3414 | if (counter->attr.munmap) |
3415 | atomic_inc(&nr_munmap_tracking); | 3415 | atomic_inc(&nr_munmap_tracking); |
3416 | if (counter->hw_event.comm) | 3416 | if (counter->attr.comm) |
3417 | atomic_inc(&nr_comm_tracking); | 3417 | atomic_inc(&nr_comm_tracking); |
3418 | 3418 | ||
3419 | return counter; | 3419 | return counter; |
@@ -3424,17 +3424,17 @@ static atomic64_t perf_counter_id; | |||
3424 | /** | 3424 | /** |
3425 | * sys_perf_counter_open - open a performance counter, associate it to a task/cpu | 3425 | * sys_perf_counter_open - open a performance counter, associate it to a task/cpu |
3426 | * | 3426 | * |
3427 | * @hw_event_uptr: event type attributes for monitoring/sampling | 3427 | * @attr_uptr: event type attributes for monitoring/sampling |
3428 | * @pid: target pid | 3428 | * @pid: target pid |
3429 | * @cpu: target cpu | 3429 | * @cpu: target cpu |
3430 | * @group_fd: group leader counter fd | 3430 | * @group_fd: group leader counter fd |
3431 | */ | 3431 | */ |
3432 | SYSCALL_DEFINE5(perf_counter_open, | 3432 | SYSCALL_DEFINE5(perf_counter_open, |
3433 | const struct perf_counter_hw_event __user *, hw_event_uptr, | 3433 | const struct perf_counter_attr __user *, attr_uptr, |
3434 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | 3434 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) |
3435 | { | 3435 | { |
3436 | struct perf_counter *counter, *group_leader; | 3436 | struct perf_counter *counter, *group_leader; |
3437 | struct perf_counter_hw_event hw_event; | 3437 | struct perf_counter_attr attr; |
3438 | struct perf_counter_context *ctx; | 3438 | struct perf_counter_context *ctx; |
3439 | struct file *counter_file = NULL; | 3439 | struct file *counter_file = NULL; |
3440 | struct file *group_file = NULL; | 3440 | struct file *group_file = NULL; |
@@ -3446,7 +3446,7 @@ SYSCALL_DEFINE5(perf_counter_open, | |||
3446 | if (flags) | 3446 | if (flags) |
3447 | return -EINVAL; | 3447 | return -EINVAL; |
3448 | 3448 | ||
3449 | if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) | 3449 | if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0) |
3450 | return -EFAULT; | 3450 | return -EFAULT; |
3451 | 3451 | ||
3452 | /* | 3452 | /* |
@@ -3484,11 +3484,11 @@ SYSCALL_DEFINE5(perf_counter_open, | |||
3484 | /* | 3484 | /* |
3485 | * Only a group leader can be exclusive or pinned | 3485 | * Only a group leader can be exclusive or pinned |
3486 | */ | 3486 | */ |
3487 | if (hw_event.exclusive || hw_event.pinned) | 3487 | if (attr.exclusive || attr.pinned) |
3488 | goto err_put_context; | 3488 | goto err_put_context; |
3489 | } | 3489 | } |
3490 | 3490 | ||
3491 | counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, | 3491 | counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, |
3492 | GFP_KERNEL); | 3492 | GFP_KERNEL); |
3493 | ret = PTR_ERR(counter); | 3493 | ret = PTR_ERR(counter); |
3494 | if (IS_ERR(counter)) | 3494 | if (IS_ERR(counter)) |
@@ -3556,7 +3556,7 @@ inherit_counter(struct perf_counter *parent_counter, | |||
3556 | if (parent_counter->parent) | 3556 | if (parent_counter->parent) |
3557 | parent_counter = parent_counter->parent; | 3557 | parent_counter = parent_counter->parent; |
3558 | 3558 | ||
3559 | child_counter = perf_counter_alloc(&parent_counter->hw_event, | 3559 | child_counter = perf_counter_alloc(&parent_counter->attr, |
3560 | parent_counter->cpu, child_ctx, | 3560 | parent_counter->cpu, child_ctx, |
3561 | group_leader, GFP_KERNEL); | 3561 | group_leader, GFP_KERNEL); |
3562 | if (IS_ERR(child_counter)) | 3562 | if (IS_ERR(child_counter)) |
@@ -3565,7 +3565,7 @@ inherit_counter(struct perf_counter *parent_counter, | |||
3565 | 3565 | ||
3566 | /* | 3566 | /* |
3567 | * Make the child state follow the state of the parent counter, | 3567 | * Make the child state follow the state of the parent counter, |
3568 | * not its hw_event.disabled bit. We hold the parent's mutex, | 3568 | * not its attr.disabled bit. We hold the parent's mutex, |
3569 | * so we won't race with perf_counter_{en, dis}able_family. | 3569 | * so we won't race with perf_counter_{en, dis}able_family. |
3570 | */ | 3570 | */ |
3571 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) | 3571 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) |
@@ -3582,7 +3582,7 @@ inherit_counter(struct perf_counter *parent_counter, | |||
3582 | /* | 3582 | /* |
3583 | * inherit into child's child as well: | 3583 | * inherit into child's child as well: |
3584 | */ | 3584 | */ |
3585 | child_counter->hw_event.inherit = 1; | 3585 | child_counter->attr.inherit = 1; |
3586 | 3586 | ||
3587 | /* | 3587 | /* |
3588 | * Get a reference to the parent filp - we will fput it | 3588 | * Get a reference to the parent filp - we will fput it |
@@ -3838,7 +3838,7 @@ int perf_counter_init_task(struct task_struct *child) | |||
3838 | if (counter != counter->group_leader) | 3838 | if (counter != counter->group_leader) |
3839 | continue; | 3839 | continue; |
3840 | 3840 | ||
3841 | if (!counter->hw_event.inherit) { | 3841 | if (!counter->attr.inherit) { |
3842 | inherited_all = 0; | 3842 | inherited_all = 0; |
3843 | continue; | 3843 | continue; |
3844 | } | 3844 | } |