aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-11 09:00:50 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-21 12:54:24 -0500
commitfae3fde65138b6071b1b0e0b567d4058a8b6a88c (patch)
treea99a98df4999f5a07dae0333c5c0545dd980d0a0
parent32132a3d0d5d6f127388be3e3fd7759f798c2eb4 (diff)
perf: Collapse and fix event_function_call() users
There is one common bug left in all the event_function_call() users, between loading ctx->task and getting to the remote_function(), ctx->task can already have been changed. Therefore we need to double check and retry if ctx->task != current. Insert another trampoline specific to event_function_call() that checks for this and further validates state. This also allows getting rid of the active/inactive functions. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--kernel/events/core.c365
-rw-r--r--kernel/events/hw_breakpoint.c2
3 files changed, 168 insertions, 201 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f9828a48f16a..6612732d8fd0 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1044,7 +1044,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
1044extern u64 perf_swevent_set_period(struct perf_event *event); 1044extern u64 perf_swevent_set_period(struct perf_event *event);
1045extern void perf_event_enable(struct perf_event *event); 1045extern void perf_event_enable(struct perf_event *event);
1046extern void perf_event_disable(struct perf_event *event); 1046extern void perf_event_disable(struct perf_event *event);
1047extern int __perf_event_disable(void *info); 1047extern void perf_event_disable_local(struct perf_event *event);
1048extern void perf_event_task_tick(void); 1048extern void perf_event_task_tick(void);
1049#else /* !CONFIG_PERF_EVENTS: */ 1049#else /* !CONFIG_PERF_EVENTS: */
1050static inline void * 1050static inline void *
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 66c9ad4f8707..6620432491f6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -126,6 +126,28 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
126 return data.ret; 126 return data.ret;
127} 127}
128 128
129static inline struct perf_cpu_context *
130__get_cpu_context(struct perf_event_context *ctx)
131{
132 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
133}
134
135static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
136 struct perf_event_context *ctx)
137{
138 raw_spin_lock(&cpuctx->ctx.lock);
139 if (ctx)
140 raw_spin_lock(&ctx->lock);
141}
142
143static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
144 struct perf_event_context *ctx)
145{
146 if (ctx)
147 raw_spin_unlock(&ctx->lock);
148 raw_spin_unlock(&cpuctx->ctx.lock);
149}
150
129/* 151/*
130 * On task ctx scheduling... 152 * On task ctx scheduling...
131 * 153 *
@@ -158,21 +180,96 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
158 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. 180 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
159 */ 181 */
160 182
161static void event_function_call(struct perf_event *event, 183typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
162 int (*active)(void *), 184 struct perf_event_context *, void *);
163 void (*inactive)(void *), 185
164 void *data) 186struct event_function_struct {
187 struct perf_event *event;
188 event_f func;
189 void *data;
190};
191
192static int event_function(void *info)
193{
194 struct event_function_struct *efs = info;
195 struct perf_event *event = efs->event;
196 struct perf_event_context *ctx = event->ctx;
197 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
198 struct perf_event_context *task_ctx = cpuctx->task_ctx;
199
200 WARN_ON_ONCE(!irqs_disabled());
201
202 /*
203 * Since we do the IPI call without holding ctx->lock things can have
204 * changed, double check we hit the task we set out to hit.
205 *
206 * If ctx->task == current, we know things must remain valid because
207 * we have IRQs disabled so we cannot schedule.
208 */
209 if (ctx->task) {
210 if (ctx->task != current)
211 return -EAGAIN;
212
213 WARN_ON_ONCE(task_ctx != ctx);
214 } else {
215 WARN_ON_ONCE(&cpuctx->ctx != ctx);
216 }
217
218 perf_ctx_lock(cpuctx, task_ctx);
219 /*
220 * Now that we hold locks, double check state. Paranoia pays.
221 */
222 if (task_ctx) {
223 WARN_ON_ONCE(task_ctx->task != current);
224 /*
225 * We only use event_function_call() on established contexts,
226 * and event_function() is only ever called when active (or
227 * rather, we'll have bailed in task_function_call() or the
228 * above ctx->task != current test), therefore we must have
229 * ctx->is_active here.
230 */
231 WARN_ON_ONCE(!ctx->is_active);
232 /*
233 * And since we have ctx->is_active, cpuctx->task_ctx must
234 * match.
235 */
236 WARN_ON_ONCE(cpuctx->task_ctx != task_ctx);
237 }
238 efs->func(event, cpuctx, ctx, efs->data);
239 perf_ctx_unlock(cpuctx, task_ctx);
240
241 return 0;
242}
243
244static void event_function_local(struct perf_event *event, event_f func, void *data)
245{
246 struct event_function_struct efs = {
247 .event = event,
248 .func = func,
249 .data = data,
250 };
251
252 int ret = event_function(&efs);
253 WARN_ON_ONCE(ret);
254}
255
256static void event_function_call(struct perf_event *event, event_f func, void *data)
165{ 257{
166 struct perf_event_context *ctx = event->ctx; 258 struct perf_event_context *ctx = event->ctx;
167 struct task_struct *task = ctx->task; 259 struct task_struct *task = ctx->task;
260 struct event_function_struct efs = {
261 .event = event,
262 .func = func,
263 .data = data,
264 };
168 265
169 if (!task) { 266 if (!task) {
170 cpu_function_call(event->cpu, active, data); 267 cpu_function_call(event->cpu, event_function, &efs);
171 return; 268 return;
172 } 269 }
173 270
174again: 271again:
175 if (!task_function_call(task, active, data)) 272 if (!task_function_call(task, event_function, &efs))
176 return; 273 return;
177 274
178 raw_spin_lock_irq(&ctx->lock); 275 raw_spin_lock_irq(&ctx->lock);
@@ -185,7 +282,7 @@ again:
185 raw_spin_unlock_irq(&ctx->lock); 282 raw_spin_unlock_irq(&ctx->lock);
186 goto again; 283 goto again;
187 } 284 }
188 inactive(data); 285 func(event, NULL, ctx, data);
189 raw_spin_unlock_irq(&ctx->lock); 286 raw_spin_unlock_irq(&ctx->lock);
190} 287}
191 288
@@ -400,28 +497,6 @@ static inline u64 perf_event_clock(struct perf_event *event)
400 return event->clock(); 497 return event->clock();
401} 498}
402 499
403static inline struct perf_cpu_context *
404__get_cpu_context(struct perf_event_context *ctx)
405{
406 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
407}
408
409static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
410 struct perf_event_context *ctx)
411{
412 raw_spin_lock(&cpuctx->ctx.lock);
413 if (ctx)
414 raw_spin_lock(&ctx->lock);
415}
416
417static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
418 struct perf_event_context *ctx)
419{
420 if (ctx)
421 raw_spin_unlock(&ctx->lock);
422 raw_spin_unlock(&cpuctx->ctx.lock);
423}
424
425#ifdef CONFIG_CGROUP_PERF 500#ifdef CONFIG_CGROUP_PERF
426 501
427static inline bool 502static inline bool
@@ -1684,38 +1759,22 @@ group_sched_out(struct perf_event *group_event,
1684 cpuctx->exclusive = 0; 1759 cpuctx->exclusive = 0;
1685} 1760}
1686 1761
1687struct remove_event {
1688 struct perf_event *event;
1689 bool detach_group;
1690};
1691
1692static void ___perf_remove_from_context(void *info)
1693{
1694 struct remove_event *re = info;
1695 struct perf_event *event = re->event;
1696 struct perf_event_context *ctx = event->ctx;
1697
1698 if (re->detach_group)
1699 perf_group_detach(event);
1700 list_del_event(event, ctx);
1701}
1702
1703/* 1762/*
1704 * Cross CPU call to remove a performance event 1763 * Cross CPU call to remove a performance event
1705 * 1764 *
1706 * We disable the event on the hardware level first. After that we 1765 * We disable the event on the hardware level first. After that we
1707 * remove it from the context list. 1766 * remove it from the context list.
1708 */ 1767 */
1709static int __perf_remove_from_context(void *info) 1768static void
1769__perf_remove_from_context(struct perf_event *event,
1770 struct perf_cpu_context *cpuctx,
1771 struct perf_event_context *ctx,
1772 void *info)
1710{ 1773{
1711 struct remove_event *re = info; 1774 bool detach_group = (unsigned long)info;
1712 struct perf_event *event = re->event;
1713 struct perf_event_context *ctx = event->ctx;
1714 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1715 1775
1716 raw_spin_lock(&ctx->lock);
1717 event_sched_out(event, cpuctx, ctx); 1776 event_sched_out(event, cpuctx, ctx);
1718 if (re->detach_group) 1777 if (detach_group)
1719 perf_group_detach(event); 1778 perf_group_detach(event);
1720 list_del_event(event, ctx); 1779 list_del_event(event, ctx);
1721 1780
@@ -1726,17 +1785,11 @@ static int __perf_remove_from_context(void *info)
1726 cpuctx->task_ctx = NULL; 1785 cpuctx->task_ctx = NULL;
1727 } 1786 }
1728 } 1787 }
1729 raw_spin_unlock(&ctx->lock);
1730
1731 return 0;
1732} 1788}
1733 1789
1734/* 1790/*
1735 * Remove the event from a task's (or a CPU's) list of events. 1791 * Remove the event from a task's (or a CPU's) list of events.
1736 * 1792 *
1737 * CPU events are removed with a smp call. For task events we only
1738 * call when the task is on a CPU.
1739 *
1740 * If event->ctx is a cloned context, callers must make sure that 1793 * If event->ctx is a cloned context, callers must make sure that
1741 * every task struct that event->ctx->task could possibly point to 1794 * every task struct that event->ctx->task could possibly point to
1742 * remains valid. This is OK when called from perf_release since 1795 * remains valid. This is OK when called from perf_release since
@@ -1746,71 +1799,31 @@ static int __perf_remove_from_context(void *info)
1746 */ 1799 */
1747static void perf_remove_from_context(struct perf_event *event, bool detach_group) 1800static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1748{ 1801{
1749 struct perf_event_context *ctx = event->ctx; 1802 lockdep_assert_held(&event->ctx->mutex);
1750 struct remove_event re = {
1751 .event = event,
1752 .detach_group = detach_group,
1753 };
1754
1755 lockdep_assert_held(&ctx->mutex);
1756 1803
1757 event_function_call(event, __perf_remove_from_context, 1804 event_function_call(event, __perf_remove_from_context,
1758 ___perf_remove_from_context, &re); 1805 (void *)(unsigned long)detach_group);
1759} 1806}
1760 1807
1761/* 1808/*
1762 * Cross CPU call to disable a performance event 1809 * Cross CPU call to disable a performance event
1763 */ 1810 */
1764int __perf_event_disable(void *info) 1811static void __perf_event_disable(struct perf_event *event,
1765{ 1812 struct perf_cpu_context *cpuctx,
1766 struct perf_event *event = info; 1813 struct perf_event_context *ctx,
1767 struct perf_event_context *ctx = event->ctx; 1814 void *info)
1768 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1769
1770 /*
1771 * If this is a per-task event, need to check whether this
1772 * event's task is the current task on this cpu.
1773 *
1774 * Can trigger due to concurrent perf_event_context_sched_out()
1775 * flipping contexts around.
1776 */
1777 if (ctx->task && cpuctx->task_ctx != ctx)
1778 return -EINVAL;
1779
1780 raw_spin_lock(&ctx->lock);
1781
1782 /*
1783 * If the event is on, turn it off.
1784 * If it is in error state, leave it in error state.
1785 */
1786 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1787 update_context_time(ctx);
1788 update_cgrp_time_from_event(event);
1789 update_group_times(event);
1790 if (event == event->group_leader)
1791 group_sched_out(event, cpuctx, ctx);
1792 else
1793 event_sched_out(event, cpuctx, ctx);
1794 event->state = PERF_EVENT_STATE_OFF;
1795 }
1796
1797 raw_spin_unlock(&ctx->lock);
1798
1799 return 0;
1800}
1801
1802void ___perf_event_disable(void *info)
1803{ 1815{
1804 struct perf_event *event = info; 1816 if (event->state < PERF_EVENT_STATE_INACTIVE)
1817 return;
1805 1818
1806 /* 1819 update_context_time(ctx);
1807 * Since we have the lock this context can't be scheduled 1820 update_cgrp_time_from_event(event);
1808 * in, so we can change the state safely. 1821 update_group_times(event);
1809 */ 1822 if (event == event->group_leader)
1810 if (event->state == PERF_EVENT_STATE_INACTIVE) { 1823 group_sched_out(event, cpuctx, ctx);
1811 update_group_times(event); 1824 else
1812 event->state = PERF_EVENT_STATE_OFF; 1825 event_sched_out(event, cpuctx, ctx);
1813 } 1826 event->state = PERF_EVENT_STATE_OFF;
1814} 1827}
1815 1828
1816/* 1829/*
@@ -1837,8 +1850,12 @@ static void _perf_event_disable(struct perf_event *event)
1837 } 1850 }
1838 raw_spin_unlock_irq(&ctx->lock); 1851 raw_spin_unlock_irq(&ctx->lock);
1839 1852
1840 event_function_call(event, __perf_event_disable, 1853 event_function_call(event, __perf_event_disable, NULL);
1841 ___perf_event_disable, event); 1854}
1855
1856void perf_event_disable_local(struct perf_event *event)
1857{
1858 event_function_local(event, __perf_event_disable, NULL);
1842} 1859}
1843 1860
1844/* 1861/*
@@ -2202,44 +2219,29 @@ static void __perf_event_mark_enabled(struct perf_event *event)
2202/* 2219/*
2203 * Cross CPU call to enable a performance event 2220 * Cross CPU call to enable a performance event
2204 */ 2221 */
2205static int __perf_event_enable(void *info) 2222static void __perf_event_enable(struct perf_event *event,
2223 struct perf_cpu_context *cpuctx,
2224 struct perf_event_context *ctx,
2225 void *info)
2206{ 2226{
2207 struct perf_event *event = info;
2208 struct perf_event_context *ctx = event->ctx;
2209 struct perf_event *leader = event->group_leader; 2227 struct perf_event *leader = event->group_leader;
2210 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2228 struct perf_event_context *task_ctx;
2211 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2212
2213 /*
2214 * There's a time window between 'ctx->is_active' check
2215 * in perf_event_enable function and this place having:
2216 * - IRQs on
2217 * - ctx->lock unlocked
2218 *
2219 * where the task could be killed and 'ctx' deactivated
2220 * by perf_event_exit_task.
2221 */
2222 if (!ctx->is_active)
2223 return -EINVAL;
2224
2225 perf_ctx_lock(cpuctx, task_ctx);
2226 WARN_ON_ONCE(&cpuctx->ctx != ctx && task_ctx != ctx);
2227 update_context_time(ctx);
2228 2229
2229 if (event->state >= PERF_EVENT_STATE_INACTIVE) 2230 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2230 goto unlock; 2231 return;
2231
2232 /*
2233 * set current task's cgroup time reference point
2234 */
2235 perf_cgroup_set_timestamp(current, ctx);
2236 2232
2233 update_context_time(ctx);
2237 __perf_event_mark_enabled(event); 2234 __perf_event_mark_enabled(event);
2238 2235
2236 if (!ctx->is_active)
2237 return;
2238
2239 if (!event_filter_match(event)) { 2239 if (!event_filter_match(event)) {
2240 if (is_cgroup_event(event)) 2240 if (is_cgroup_event(event)) {
2241 perf_cgroup_set_timestamp(current, ctx); // XXX ?
2241 perf_cgroup_defer_enabled(event); 2242 perf_cgroup_defer_enabled(event);
2242 goto unlock; 2243 }
2244 return;
2243 } 2245 }
2244 2246
2245 /* 2247 /*
@@ -2247,19 +2249,13 @@ static int __perf_event_enable(void *info)
2247 * then don't put it on unless the group is on. 2249 * then don't put it on unless the group is on.
2248 */ 2250 */
2249 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2251 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2250 goto unlock; 2252 return;
2251 2253
2252 ctx_resched(cpuctx, task_ctx); 2254 task_ctx = cpuctx->task_ctx;
2255 if (ctx->task)
2256 WARN_ON_ONCE(task_ctx != ctx);
2253 2257
2254unlock: 2258 ctx_resched(cpuctx, task_ctx);
2255 perf_ctx_unlock(cpuctx, task_ctx);
2256
2257 return 0;
2258}
2259
2260void ___perf_event_enable(void *info)
2261{
2262 __perf_event_mark_enabled((struct perf_event *)info);
2263} 2259}
2264 2260
2265/* 2261/*
@@ -2292,8 +2288,7 @@ static void _perf_event_enable(struct perf_event *event)
2292 event->state = PERF_EVENT_STATE_OFF; 2288 event->state = PERF_EVENT_STATE_OFF;
2293 raw_spin_unlock_irq(&ctx->lock); 2289 raw_spin_unlock_irq(&ctx->lock);
2294 2290
2295 event_function_call(event, __perf_event_enable, 2291 event_function_call(event, __perf_event_enable, NULL);
2296 ___perf_event_enable, event);
2297} 2292}
2298 2293
2299/* 2294/*
@@ -4095,36 +4090,14 @@ static void perf_event_for_each(struct perf_event *event,
4095 perf_event_for_each_child(sibling, func); 4090 perf_event_for_each_child(sibling, func);
4096} 4091}
4097 4092
4098struct period_event { 4093static void __perf_event_period(struct perf_event *event,
4099 struct perf_event *event; 4094 struct perf_cpu_context *cpuctx,
4100 u64 value; 4095 struct perf_event_context *ctx,
4101}; 4096 void *info)
4102
4103static void ___perf_event_period(void *info)
4104{
4105 struct period_event *pe = info;
4106 struct perf_event *event = pe->event;
4107 u64 value = pe->value;
4108
4109 if (event->attr.freq) {
4110 event->attr.sample_freq = value;
4111 } else {
4112 event->attr.sample_period = value;
4113 event->hw.sample_period = value;
4114 }
4115
4116 local64_set(&event->hw.period_left, 0);
4117}
4118
4119static int __perf_event_period(void *info)
4120{ 4097{
4121 struct period_event *pe = info; 4098 u64 value = *((u64 *)info);
4122 struct perf_event *event = pe->event;
4123 struct perf_event_context *ctx = event->ctx;
4124 u64 value = pe->value;
4125 bool active; 4099 bool active;
4126 4100
4127 raw_spin_lock(&ctx->lock);
4128 if (event->attr.freq) { 4101 if (event->attr.freq) {
4129 event->attr.sample_freq = value; 4102 event->attr.sample_freq = value;
4130 } else { 4103 } else {
@@ -4144,14 +4117,10 @@ static int __perf_event_period(void *info)
4144 event->pmu->start(event, PERF_EF_RELOAD); 4117 event->pmu->start(event, PERF_EF_RELOAD);
4145 perf_pmu_enable(ctx->pmu); 4118 perf_pmu_enable(ctx->pmu);
4146 } 4119 }
4147 raw_spin_unlock(&ctx->lock);
4148
4149 return 0;
4150} 4120}
4151 4121
4152static int perf_event_period(struct perf_event *event, u64 __user *arg) 4122static int perf_event_period(struct perf_event *event, u64 __user *arg)
4153{ 4123{
4154 struct period_event pe = { .event = event, };
4155 u64 value; 4124 u64 value;
4156 4125
4157 if (!is_sampling_event(event)) 4126 if (!is_sampling_event(event))
@@ -4166,10 +4135,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
4166 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4135 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4167 return -EINVAL; 4136 return -EINVAL;
4168 4137
4169 pe.value = value; 4138 event_function_call(event, __perf_event_period, &value);
4170
4171 event_function_call(event, __perf_event_period,
4172 ___perf_event_period, &pe);
4173 4139
4174 return 0; 4140 return 0;
4175} 4141}
@@ -4941,7 +4907,7 @@ static void perf_pending_event(struct irq_work *entry)
4941 4907
4942 if (event->pending_disable) { 4908 if (event->pending_disable) {
4943 event->pending_disable = 0; 4909 event->pending_disable = 0;
4944 __perf_event_disable(event); 4910 perf_event_disable_local(event);
4945 } 4911 }
4946 4912
4947 if (event->pending_wakeup) { 4913 if (event->pending_wakeup) {
@@ -9239,13 +9205,14 @@ static void perf_event_init_cpu(int cpu)
9239#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE 9205#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
9240static void __perf_event_exit_context(void *__info) 9206static void __perf_event_exit_context(void *__info)
9241{ 9207{
9242 struct remove_event re = { .detach_group = true };
9243 struct perf_event_context *ctx = __info; 9208 struct perf_event_context *ctx = __info;
9209 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
9210 struct perf_event *event;
9244 9211
9245 rcu_read_lock(); 9212 raw_spin_lock(&ctx->lock);
9246 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 9213 list_for_each_entry(event, &ctx->event_list, event_entry)
9247 __perf_remove_from_context(&re); 9214 __perf_remove_from_context(event, cpuctx, ctx, (void *)(unsigned long)true);
9248 rcu_read_unlock(); 9215 raw_spin_unlock(&ctx->lock);
9249} 9216}
9250 9217
9251static void perf_event_exit_cpu_context(int cpu) 9218static void perf_event_exit_cpu_context(int cpu)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 92ce5f4ccc26..3f8cb1e14588 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -444,7 +444,7 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
444 * current task. 444 * current task.
445 */ 445 */
446 if (irqs_disabled() && bp->ctx && bp->ctx->task == current) 446 if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
447 __perf_event_disable(bp); 447 perf_event_disable_local(bp);
448 else 448 else
449 perf_event_disable(bp); 449 perf_event_disable(bp);
450 450