aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
commit72eb6a791459c87a0340318840bb3bd9252b627b (patch)
tree3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /arch/x86/kernel/cpu/perf_event.c
parent23d69b09b78c4876e134f104a3814c30747c53f1 (diff)
parent55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff)
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits) gameport: use this_cpu_read instead of lookup x86: udelay: Use this_cpu_read to avoid address calculation x86: Use this_cpu_inc_return for nmi counter x86: Replace uses of current_cpu_data with this_cpu ops x86: Use this_cpu_ops to optimize code vmstat: User per cpu atomics to avoid interrupt disable / enable irq_work: Use per cpu atomics instead of regular atomics cpuops: Use cmpxchg for xchg to avoid lock semantics x86: this_cpu_cmpxchg and this_cpu_xchg operations percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support percpu,x86: relocate this_cpu_add_return() and friends connector: Use this_cpu operations xen: Use this_cpu_inc_return taskstats: Use this_cpu_ops random: Use this_cpu_inc_return fs: Use this_cpu_inc_return in buffer.c highmem: Use this_cpu_xx_return() operations vmstat: Use this_cpu_inc_return for vm statistics x86: Support for this_cpu_add, sub, dec, inc_return percpu: Generic support for this_cpu_add, sub, dec, inc_return ... Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c} as per Tejun.
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0a360d146596..04921017abe0 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event)
997 997
998static void x86_pmu_enable_event(struct perf_event *event) 998static void x86_pmu_enable_event(struct perf_event *event)
999{ 999{
1000 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1000 if (__this_cpu_read(cpu_hw_events.enabled))
1001 if (cpuc->enabled)
1002 __x86_pmu_enable_event(&event->hw, 1001 __x86_pmu_enable_event(&event->hw,
1003 ARCH_PERFMON_EVENTSEL_ENABLE); 1002 ARCH_PERFMON_EVENTSEL_ENABLE);
1004} 1003}
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self,
1272 break; 1271 break;
1273 case DIE_NMIUNKNOWN: 1272 case DIE_NMIUNKNOWN:
1274 this_nmi = percpu_read(irq_stat.__nmi_count); 1273 this_nmi = percpu_read(irq_stat.__nmi_count);
1275 if (this_nmi != __get_cpu_var(pmu_nmi).marked) 1274 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1276 /* let the kernel handle the unknown nmi */ 1275 /* let the kernel handle the unknown nmi */
1277 return NOTIFY_DONE; 1276 return NOTIFY_DONE;
1278 /* 1277 /*
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self,
1296 this_nmi = percpu_read(irq_stat.__nmi_count); 1295 this_nmi = percpu_read(irq_stat.__nmi_count);
1297 if ((handled > 1) || 1296 if ((handled > 1) ||
1298 /* the next nmi could be a back-to-back nmi */ 1297 /* the next nmi could be a back-to-back nmi */
1299 ((__get_cpu_var(pmu_nmi).marked == this_nmi) && 1298 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1300 (__get_cpu_var(pmu_nmi).handled > 1))) { 1299 (__this_cpu_read(pmu_nmi.handled) > 1))) {
1301 /* 1300 /*
1302 * We could have two subsequent back-to-back nmis: The 1301 * We could have two subsequent back-to-back nmis: The
1303 * first handles more than one counter, the 2nd 1302 * first handles more than one counter, the 2nd
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self,
1308 * handling more than one counter. We will mark the 1307 * handling more than one counter. We will mark the
1309 * next (3rd) and then drop it if unhandled. 1308 * next (3rd) and then drop it if unhandled.
1310 */ 1309 */
1311 __get_cpu_var(pmu_nmi).marked = this_nmi + 1; 1310 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1312 __get_cpu_var(pmu_nmi).handled = handled; 1311 __this_cpu_write(pmu_nmi.handled, handled);
1313 } 1312 }
1314 1313
1315 return NOTIFY_STOP; 1314 return NOTIFY_STOP;
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event)
1484 */ 1483 */
1485static void x86_pmu_start_txn(struct pmu *pmu) 1484static void x86_pmu_start_txn(struct pmu *pmu)
1486{ 1485{
1487 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1488
1489 perf_pmu_disable(pmu); 1486 perf_pmu_disable(pmu);
1490 cpuc->group_flag |= PERF_EVENT_TXN; 1487 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1491 cpuc->n_txn = 0; 1488 __this_cpu_write(cpu_hw_events.n_txn, 0);
1492} 1489}
1493 1490
1494/* 1491/*
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
1498 */ 1495 */
1499static void x86_pmu_cancel_txn(struct pmu *pmu) 1496static void x86_pmu_cancel_txn(struct pmu *pmu)
1500{ 1497{
1501 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1498 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1502
1503 cpuc->group_flag &= ~PERF_EVENT_TXN;
1504 /* 1499 /*
1505 * Truncate the collected events. 1500 * Truncate the collected events.
1506 */ 1501 */
1507 cpuc->n_added -= cpuc->n_txn; 1502 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1508 cpuc->n_events -= cpuc->n_txn; 1503 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1509 perf_pmu_enable(pmu); 1504 perf_pmu_enable(pmu);
1510} 1505}
1511 1506