diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 152 |
1 files changed, 72 insertions, 80 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a661e7991865..2f3fbf84215a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/file.h> | 16 | #include <linux/file.h> |
17 | #include <linux/poll.h> | 17 | #include <linux/poll.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
19 | #include <linux/dcache.h> | 20 | #include <linux/dcache.h> |
20 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
@@ -56,21 +57,6 @@ static atomic_t nr_task_events __read_mostly; | |||
56 | */ | 57 | */ |
57 | int sysctl_perf_event_paranoid __read_mostly = 1; | 58 | int sysctl_perf_event_paranoid __read_mostly = 1; |
58 | 59 | ||
59 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
60 | { | ||
61 | return sysctl_perf_event_paranoid > -1; | ||
62 | } | ||
63 | |||
64 | static inline bool perf_paranoid_cpu(void) | ||
65 | { | ||
66 | return sysctl_perf_event_paranoid > 0; | ||
67 | } | ||
68 | |||
69 | static inline bool perf_paranoid_kernel(void) | ||
70 | { | ||
71 | return sysctl_perf_event_paranoid > 1; | ||
72 | } | ||
73 | |||
74 | int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ | 60 | int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ |
75 | 61 | ||
76 | /* | 62 | /* |
@@ -96,10 +82,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
96 | void __weak hw_perf_disable(void) { barrier(); } | 82 | void __weak hw_perf_disable(void) { barrier(); } |
97 | void __weak hw_perf_enable(void) { barrier(); } | 83 | void __weak hw_perf_enable(void) { barrier(); } |
98 | 84 | ||
99 | void __weak hw_perf_event_setup(int cpu) { barrier(); } | ||
100 | void __weak hw_perf_event_setup_online(int cpu) { barrier(); } | ||
101 | void __weak hw_perf_event_setup_offline(int cpu) { barrier(); } | ||
102 | |||
103 | int __weak | 85 | int __weak |
104 | hw_perf_group_sched_in(struct perf_event *group_leader, | 86 | hw_perf_group_sched_in(struct perf_event *group_leader, |
105 | struct perf_cpu_context *cpuctx, | 87 | struct perf_cpu_context *cpuctx, |
@@ -112,25 +94,15 @@ void __weak perf_event_print_debug(void) { } | |||
112 | 94 | ||
113 | static DEFINE_PER_CPU(int, perf_disable_count); | 95 | static DEFINE_PER_CPU(int, perf_disable_count); |
114 | 96 | ||
115 | void __perf_disable(void) | ||
116 | { | ||
117 | __get_cpu_var(perf_disable_count)++; | ||
118 | } | ||
119 | |||
120 | bool __perf_enable(void) | ||
121 | { | ||
122 | return !--__get_cpu_var(perf_disable_count); | ||
123 | } | ||
124 | |||
125 | void perf_disable(void) | 97 | void perf_disable(void) |
126 | { | 98 | { |
127 | __perf_disable(); | 99 | if (!__get_cpu_var(perf_disable_count)++) |
128 | hw_perf_disable(); | 100 | hw_perf_disable(); |
129 | } | 101 | } |
130 | 102 | ||
131 | void perf_enable(void) | 103 | void perf_enable(void) |
132 | { | 104 | { |
133 | if (__perf_enable()) | 105 | if (!--__get_cpu_var(perf_disable_count)) |
134 | hw_perf_enable(); | 106 | hw_perf_enable(); |
135 | } | 107 | } |
136 | 108 | ||
@@ -1193,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1193 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1165 | struct perf_event_context *ctx = task->perf_event_ctxp; |
1194 | struct perf_event_context *next_ctx; | 1166 | struct perf_event_context *next_ctx; |
1195 | struct perf_event_context *parent; | 1167 | struct perf_event_context *parent; |
1196 | struct pt_regs *regs; | ||
1197 | int do_switch = 1; | 1168 | int do_switch = 1; |
1198 | 1169 | ||
1199 | regs = task_pt_regs(task); | 1170 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1200 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1201 | 1171 | ||
1202 | if (likely(!ctx || !cpuctx->task_ctx)) | 1172 | if (likely(!ctx || !cpuctx->task_ctx)) |
1203 | return; | 1173 | return; |
@@ -1553,12 +1523,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1553 | */ | 1523 | */ |
1554 | if (interrupts == MAX_INTERRUPTS) { | 1524 | if (interrupts == MAX_INTERRUPTS) { |
1555 | perf_log_throttle(event, 1); | 1525 | perf_log_throttle(event, 1); |
1526 | perf_disable(); | ||
1556 | event->pmu->unthrottle(event); | 1527 | event->pmu->unthrottle(event); |
1528 | perf_enable(); | ||
1557 | } | 1529 | } |
1558 | 1530 | ||
1559 | if (!event->attr.freq || !event->attr.sample_freq) | 1531 | if (!event->attr.freq || !event->attr.sample_freq) |
1560 | continue; | 1532 | continue; |
1561 | 1533 | ||
1534 | perf_disable(); | ||
1562 | event->pmu->read(event); | 1535 | event->pmu->read(event); |
1563 | now = atomic64_read(&event->count); | 1536 | now = atomic64_read(&event->count); |
1564 | delta = now - hwc->freq_count_stamp; | 1537 | delta = now - hwc->freq_count_stamp; |
@@ -1566,6 +1539,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1566 | 1539 | ||
1567 | if (delta > 0) | 1540 | if (delta > 0) |
1568 | perf_adjust_period(event, TICK_NSEC, delta); | 1541 | perf_adjust_period(event, TICK_NSEC, delta); |
1542 | perf_enable(); | ||
1569 | } | 1543 | } |
1570 | raw_spin_unlock(&ctx->lock); | 1544 | raw_spin_unlock(&ctx->lock); |
1571 | } | 1545 | } |
@@ -1575,9 +1549,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1575 | */ | 1549 | */ |
1576 | static void rotate_ctx(struct perf_event_context *ctx) | 1550 | static void rotate_ctx(struct perf_event_context *ctx) |
1577 | { | 1551 | { |
1578 | if (!ctx->nr_events) | ||
1579 | return; | ||
1580 | |||
1581 | raw_spin_lock(&ctx->lock); | 1552 | raw_spin_lock(&ctx->lock); |
1582 | 1553 | ||
1583 | /* Rotate the first entry last of non-pinned groups */ | 1554 | /* Rotate the first entry last of non-pinned groups */ |
@@ -1590,19 +1561,28 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1590 | { | 1561 | { |
1591 | struct perf_cpu_context *cpuctx; | 1562 | struct perf_cpu_context *cpuctx; |
1592 | struct perf_event_context *ctx; | 1563 | struct perf_event_context *ctx; |
1564 | int rotate = 0; | ||
1593 | 1565 | ||
1594 | if (!atomic_read(&nr_events)) | 1566 | if (!atomic_read(&nr_events)) |
1595 | return; | 1567 | return; |
1596 | 1568 | ||
1597 | cpuctx = &__get_cpu_var(perf_cpu_context); | 1569 | cpuctx = &__get_cpu_var(perf_cpu_context); |
1598 | ctx = curr->perf_event_ctxp; | 1570 | if (cpuctx->ctx.nr_events && |
1571 | cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | ||
1572 | rotate = 1; | ||
1599 | 1573 | ||
1600 | perf_disable(); | 1574 | ctx = curr->perf_event_ctxp; |
1575 | if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) | ||
1576 | rotate = 1; | ||
1601 | 1577 | ||
1602 | perf_ctx_adjust_freq(&cpuctx->ctx); | 1578 | perf_ctx_adjust_freq(&cpuctx->ctx); |
1603 | if (ctx) | 1579 | if (ctx) |
1604 | perf_ctx_adjust_freq(ctx); | 1580 | perf_ctx_adjust_freq(ctx); |
1605 | 1581 | ||
1582 | if (!rotate) | ||
1583 | return; | ||
1584 | |||
1585 | perf_disable(); | ||
1606 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 1586 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
1607 | if (ctx) | 1587 | if (ctx) |
1608 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); | 1588 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); |
@@ -1614,7 +1594,6 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1614 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); | 1594 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); |
1615 | if (ctx) | 1595 | if (ctx) |
1616 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); | 1596 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); |
1617 | |||
1618 | perf_enable(); | 1597 | perf_enable(); |
1619 | } | 1598 | } |
1620 | 1599 | ||
@@ -2610,7 +2589,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2610 | if (user_locked > user_lock_limit) | 2589 | if (user_locked > user_lock_limit) |
2611 | extra = user_locked - user_lock_limit; | 2590 | extra = user_locked - user_lock_limit; |
2612 | 2591 | ||
2613 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 2592 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
2614 | lock_limit >>= PAGE_SHIFT; | 2593 | lock_limit >>= PAGE_SHIFT; |
2615 | locked = vma->vm_mm->locked_vm + extra; | 2594 | locked = vma->vm_mm->locked_vm + extra; |
2616 | 2595 | ||
@@ -2806,6 +2785,12 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2806 | return NULL; | 2785 | return NULL; |
2807 | } | 2786 | } |
2808 | 2787 | ||
2788 | __weak | ||
2789 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | ||
2790 | { | ||
2791 | } | ||
2792 | |||
2793 | |||
2809 | /* | 2794 | /* |
2810 | * Output | 2795 | * Output |
2811 | */ | 2796 | */ |
@@ -3391,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event, | |||
3391 | struct perf_task_event *task_event) | 3376 | struct perf_task_event *task_event) |
3392 | { | 3377 | { |
3393 | struct perf_output_handle handle; | 3378 | struct perf_output_handle handle; |
3394 | int size; | ||
3395 | struct task_struct *task = task_event->task; | 3379 | struct task_struct *task = task_event->task; |
3396 | int ret; | 3380 | unsigned long flags; |
3381 | int size, ret; | ||
3382 | |||
3383 | /* | ||
3384 | * If this CPU attempts to acquire an rq lock held by a CPU spinning | ||
3385 | * in perf_output_lock() from interrupt context, it's game over. | ||
3386 | */ | ||
3387 | local_irq_save(flags); | ||
3397 | 3388 | ||
3398 | size = task_event->event_id.header.size; | 3389 | size = task_event->event_id.header.size; |
3399 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3390 | ret = perf_output_begin(&handle, event, size, 0, 0); |
3400 | 3391 | ||
3401 | if (ret) | 3392 | if (ret) { |
3393 | local_irq_restore(flags); | ||
3402 | return; | 3394 | return; |
3395 | } | ||
3403 | 3396 | ||
3404 | task_event->event_id.pid = perf_event_pid(event, task); | 3397 | task_event->event_id.pid = perf_event_pid(event, task); |
3405 | task_event->event_id.ppid = perf_event_pid(event, current); | 3398 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3410,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
3410 | perf_output_put(&handle, task_event->event_id); | 3403 | perf_output_put(&handle, task_event->event_id); |
3411 | 3404 | ||
3412 | perf_output_end(&handle); | 3405 | perf_output_end(&handle); |
3406 | local_irq_restore(flags); | ||
3413 | } | 3407 | } |
3414 | 3408 | ||
3415 | static int perf_event_task_match(struct perf_event *event) | 3409 | static int perf_event_task_match(struct perf_event *event) |
@@ -4123,8 +4117,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi, | |||
4123 | if (rctx < 0) | 4117 | if (rctx < 0) |
4124 | return; | 4118 | return; |
4125 | 4119 | ||
4126 | data.addr = addr; | 4120 | perf_sample_data_init(&data, addr); |
4127 | data.raw = NULL; | ||
4128 | 4121 | ||
4129 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); | 4122 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); |
4130 | 4123 | ||
@@ -4169,11 +4162,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
4169 | struct perf_event *event; | 4162 | struct perf_event *event; |
4170 | u64 period; | 4163 | u64 period; |
4171 | 4164 | ||
4172 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); | 4165 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); |
4173 | event->pmu->read(event); | 4166 | event->pmu->read(event); |
4174 | 4167 | ||
4175 | data.addr = 0; | 4168 | perf_sample_data_init(&data, 0); |
4176 | data.raw = NULL; | ||
4177 | data.period = event->hw.last_period; | 4169 | data.period = event->hw.last_period; |
4178 | regs = get_irq_regs(); | 4170 | regs = get_irq_regs(); |
4179 | /* | 4171 | /* |
@@ -4335,26 +4327,20 @@ static const struct pmu perf_ops_task_clock = { | |||
4335 | #ifdef CONFIG_EVENT_TRACING | 4327 | #ifdef CONFIG_EVENT_TRACING |
4336 | 4328 | ||
4337 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | 4329 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
4338 | int entry_size) | 4330 | int entry_size, struct pt_regs *regs) |
4339 | { | 4331 | { |
4332 | struct perf_sample_data data; | ||
4340 | struct perf_raw_record raw = { | 4333 | struct perf_raw_record raw = { |
4341 | .size = entry_size, | 4334 | .size = entry_size, |
4342 | .data = record, | 4335 | .data = record, |
4343 | }; | 4336 | }; |
4344 | 4337 | ||
4345 | struct perf_sample_data data = { | 4338 | perf_sample_data_init(&data, addr); |
4346 | .addr = addr, | 4339 | data.raw = &raw; |
4347 | .raw = &raw, | ||
4348 | }; | ||
4349 | |||
4350 | struct pt_regs *regs = get_irq_regs(); | ||
4351 | |||
4352 | if (!regs) | ||
4353 | regs = task_pt_regs(current); | ||
4354 | 4340 | ||
4355 | /* Trace events already protected against recursion */ | 4341 | /* Trace events already protected against recursion */ |
4356 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | 4342 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, |
4357 | &data, regs); | 4343 | &data, regs); |
4358 | } | 4344 | } |
4359 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4345 | EXPORT_SYMBOL_GPL(perf_tp_event); |
4360 | 4346 | ||
@@ -4370,7 +4356,7 @@ static int perf_tp_event_match(struct perf_event *event, | |||
4370 | 4356 | ||
4371 | static void tp_perf_event_destroy(struct perf_event *event) | 4357 | static void tp_perf_event_destroy(struct perf_event *event) |
4372 | { | 4358 | { |
4373 | ftrace_profile_disable(event->attr.config); | 4359 | perf_trace_disable(event->attr.config); |
4374 | } | 4360 | } |
4375 | 4361 | ||
4376 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4362 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
@@ -4384,7 +4370,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) | |||
4384 | !capable(CAP_SYS_ADMIN)) | 4370 | !capable(CAP_SYS_ADMIN)) |
4385 | return ERR_PTR(-EPERM); | 4371 | return ERR_PTR(-EPERM); |
4386 | 4372 | ||
4387 | if (ftrace_profile_enable(event->attr.config)) | 4373 | if (perf_trace_enable(event->attr.config)) |
4388 | return NULL; | 4374 | return NULL; |
4389 | 4375 | ||
4390 | event->destroy = tp_perf_event_destroy; | 4376 | event->destroy = tp_perf_event_destroy; |
@@ -4463,8 +4449,7 @@ void perf_bp_event(struct perf_event *bp, void *data) | |||
4463 | struct perf_sample_data sample; | 4449 | struct perf_sample_data sample; |
4464 | struct pt_regs *regs = data; | 4450 | struct pt_regs *regs = data; |
4465 | 4451 | ||
4466 | sample.raw = NULL; | 4452 | perf_sample_data_init(&sample, bp->attr.bp_addr); |
4467 | sample.addr = bp->attr.bp_addr; | ||
4468 | 4453 | ||
4469 | if (!perf_exclude_event(bp, regs)) | 4454 | if (!perf_exclude_event(bp, regs)) |
4470 | perf_swevent_add(bp, 1, 1, &sample, regs); | 4455 | perf_swevent_add(bp, 1, 1, &sample, regs); |
@@ -5392,18 +5377,26 @@ int perf_event_init_task(struct task_struct *child) | |||
5392 | return ret; | 5377 | return ret; |
5393 | } | 5378 | } |
5394 | 5379 | ||
5380 | static void __init perf_event_init_all_cpus(void) | ||
5381 | { | ||
5382 | int cpu; | ||
5383 | struct perf_cpu_context *cpuctx; | ||
5384 | |||
5385 | for_each_possible_cpu(cpu) { | ||
5386 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
5387 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
5388 | } | ||
5389 | } | ||
5390 | |||
5395 | static void __cpuinit perf_event_init_cpu(int cpu) | 5391 | static void __cpuinit perf_event_init_cpu(int cpu) |
5396 | { | 5392 | { |
5397 | struct perf_cpu_context *cpuctx; | 5393 | struct perf_cpu_context *cpuctx; |
5398 | 5394 | ||
5399 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 5395 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
5400 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
5401 | 5396 | ||
5402 | spin_lock(&perf_resource_lock); | 5397 | spin_lock(&perf_resource_lock); |
5403 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; | 5398 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; |
5404 | spin_unlock(&perf_resource_lock); | 5399 | spin_unlock(&perf_resource_lock); |
5405 | |||
5406 | hw_perf_event_setup(cpu); | ||
5407 | } | 5400 | } |
5408 | 5401 | ||
5409 | #ifdef CONFIG_HOTPLUG_CPU | 5402 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -5443,20 +5436,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
5443 | perf_event_init_cpu(cpu); | 5436 | perf_event_init_cpu(cpu); |
5444 | break; | 5437 | break; |
5445 | 5438 | ||
5446 | case CPU_ONLINE: | ||
5447 | case CPU_ONLINE_FROZEN: | ||
5448 | hw_perf_event_setup_online(cpu); | ||
5449 | break; | ||
5450 | |||
5451 | case CPU_DOWN_PREPARE: | 5439 | case CPU_DOWN_PREPARE: |
5452 | case CPU_DOWN_PREPARE_FROZEN: | 5440 | case CPU_DOWN_PREPARE_FROZEN: |
5453 | perf_event_exit_cpu(cpu); | 5441 | perf_event_exit_cpu(cpu); |
5454 | break; | 5442 | break; |
5455 | 5443 | ||
5456 | case CPU_DEAD: | ||
5457 | hw_perf_event_setup_offline(cpu); | ||
5458 | break; | ||
5459 | |||
5460 | default: | 5444 | default: |
5461 | break; | 5445 | break; |
5462 | } | 5446 | } |
@@ -5474,6 +5458,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { | |||
5474 | 5458 | ||
5475 | void __init perf_event_init(void) | 5459 | void __init perf_event_init(void) |
5476 | { | 5460 | { |
5461 | perf_event_init_all_cpus(); | ||
5477 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 5462 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
5478 | (void *)(long)smp_processor_id()); | 5463 | (void *)(long)smp_processor_id()); |
5479 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | 5464 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, |
@@ -5481,13 +5466,16 @@ void __init perf_event_init(void) | |||
5481 | register_cpu_notifier(&perf_cpu_nb); | 5466 | register_cpu_notifier(&perf_cpu_nb); |
5482 | } | 5467 | } |
5483 | 5468 | ||
5484 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | 5469 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, |
5470 | struct sysdev_class_attribute *attr, | ||
5471 | char *buf) | ||
5485 | { | 5472 | { |
5486 | return sprintf(buf, "%d\n", perf_reserved_percpu); | 5473 | return sprintf(buf, "%d\n", perf_reserved_percpu); |
5487 | } | 5474 | } |
5488 | 5475 | ||
5489 | static ssize_t | 5476 | static ssize_t |
5490 | perf_set_reserve_percpu(struct sysdev_class *class, | 5477 | perf_set_reserve_percpu(struct sysdev_class *class, |
5478 | struct sysdev_class_attribute *attr, | ||
5491 | const char *buf, | 5479 | const char *buf, |
5492 | size_t count) | 5480 | size_t count) |
5493 | { | 5481 | { |
@@ -5516,13 +5504,17 @@ perf_set_reserve_percpu(struct sysdev_class *class, | |||
5516 | return count; | 5504 | return count; |
5517 | } | 5505 | } |
5518 | 5506 | ||
5519 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | 5507 | static ssize_t perf_show_overcommit(struct sysdev_class *class, |
5508 | struct sysdev_class_attribute *attr, | ||
5509 | char *buf) | ||
5520 | { | 5510 | { |
5521 | return sprintf(buf, "%d\n", perf_overcommit); | 5511 | return sprintf(buf, "%d\n", perf_overcommit); |
5522 | } | 5512 | } |
5523 | 5513 | ||
5524 | static ssize_t | 5514 | static ssize_t |
5525 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | 5515 | perf_set_overcommit(struct sysdev_class *class, |
5516 | struct sysdev_class_attribute *attr, | ||
5517 | const char *buf, size_t count) | ||
5526 | { | 5518 | { |
5527 | unsigned long val; | 5519 | unsigned long val; |
5528 | int err; | 5520 | int err; |