aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
commit72eb6a791459c87a0340318840bb3bd9252b627b (patch)
tree3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /arch/x86/kernel
parent23d69b09b78c4876e134f104a3814c30747c53f1 (diff)
parent55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff)
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits) gameport: use this_cpu_read instead of lookup x86: udelay: Use this_cpu_read to avoid address calculation x86: Use this_cpu_inc_return for nmi counter x86: Replace uses of current_cpu_data with this_cpu ops x86: Use this_cpu_ops to optimize code vmstat: User per cpu atomics to avoid interrupt disable / enable irq_work: Use per cpu atomics instead of regular atomics cpuops: Use cmpxchg for xchg to avoid lock semantics x86: this_cpu_cmpxchg and this_cpu_xchg operations percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support percpu,x86: relocate this_cpu_add_return() and friends connector: Use this_cpu operations xen: Use this_cpu_inc_return taskstats: Use this_cpu_ops random: Use this_cpu_inc_return fs: Use this_cpu_inc_return in buffer.c highmem: Use this_cpu_xx_return() operations vmstat: Use this_cpu_inc_return for vm statistics x86: Support for this_cpu_add, sub, dec, inc_return percpu: Generic support for this_cpu_add, sub, dec, inc_return ... Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c} as per Tejun.
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c4
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/hw_breakpoint.c12
-rw-r--r--arch/x86/kernel/irq.c6
-rw-r--r--arch/x86/kernel/irq_32.c4
-rw-r--r--arch/x86/kernel/kprobes.c14
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/smpboot.c14
-rw-r--r--arch/x86/kernel/tsc.c2
18 files changed, 67 insertions, 72 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ce65d449b750..79e6baa8aa0a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void)
516{ 516{
517 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 517 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
518 518
519 if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) { 519 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
520 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; 520 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
521 /* Make LAPIC timer preferrable over percpu HPET */ 521 /* Make LAPIC timer preferrable over percpu HPET */
522 lapic_clockevent.rating = 150; 522 lapic_clockevent.rating = 150;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 52735a710c30..697dc34b7b87 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2329,7 +2329,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2329 unsigned int irr; 2329 unsigned int irr;
2330 struct irq_desc *desc; 2330 struct irq_desc *desc;
2331 struct irq_cfg *cfg; 2331 struct irq_cfg *cfg;
2332 irq = __get_cpu_var(vector_irq)[vector]; 2332 irq = __this_cpu_read(vector_irq[vector]);
2333 2333
2334 if (irq == -1) 2334 if (irq == -1)
2335 continue; 2335 continue;
@@ -2363,7 +2363,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2363 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2363 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2364 goto unlock; 2364 goto unlock;
2365 } 2365 }
2366 __get_cpu_var(vector_irq)[vector] = -1; 2366 __this_cpu_write(vector_irq[vector], -1);
2367unlock: 2367unlock:
2368 raw_spin_unlock(&desc->lock); 2368 raw_spin_unlock(&desc->lock);
2369 } 2369 }
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 2a3f2a7db243..ecca5f41ad2c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -120,8 +120,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
120 else if (!strcmp(oem_table_id, "UVX")) 120 else if (!strcmp(oem_table_id, "UVX"))
121 uv_system_type = UV_X2APIC; 121 uv_system_type = UV_X2APIC;
122 else if (!strcmp(oem_table_id, "UVH")) { 122 else if (!strcmp(oem_table_id, "UVH")) {
123 __get_cpu_var(x2apic_extra_bits) = 123 __this_cpu_write(x2apic_extra_bits,
124 pnodeid << uvh_apicid.s.pnode_shift; 124 pnodeid << uvh_apicid.s.pnode_shift);
125 uv_system_type = UV_NON_UNIQUE_APIC; 125 uv_system_type = UV_NON_UNIQUE_APIC;
126 uv_set_apicid_hibit(); 126 uv_set_apicid_hibit();
127 return 1; 127 return 1;
@@ -286,7 +286,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x)
286 unsigned int id; 286 unsigned int id;
287 287
288 WARN_ON(preemptible() && num_online_cpus() > 1); 288 WARN_ON(preemptible() && num_online_cpus() > 1);
289 id = x | __get_cpu_var(x2apic_extra_bits); 289 id = x | __this_cpu_read(x2apic_extra_bits);
290 290
291 return id; 291 return id;
292} 292}
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = {
378 378
379static __cpuinit void set_x2apic_extra_bits(int pnode) 379static __cpuinit void set_x2apic_extra_bits(int pnode)
380{ 380{
381 __get_cpu_var(x2apic_extra_bits) = (pnode << 6); 381 __this_cpu_write(x2apic_extra_bits, (pnode << 6));
382} 382}
383 383
384/* 384/*
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9e093f8fe78c..7c7bedb83c5a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383);
668 668
669bool cpu_has_amd_erratum(const int *erratum) 669bool cpu_has_amd_erratum(const int *erratum)
670{ 670{
671 struct cpuinfo_x86 *cpu = &current_cpu_data; 671 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
672 int osvw_id = *erratum++; 672 int osvw_id = *erratum++;
673 u32 range; 673 u32 range;
674 u32 ms; 674 u32 ms;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 491977baf6c0..35c7e65e59be 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc)
521 521
522 *rc = -ENODEV; 522 *rc = -ENODEV;
523 523
524 if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) 524 if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
525 return; 525 return;
526 526
527 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 527 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
@@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1377static void query_values_on_cpu(void *_err) 1377static void query_values_on_cpu(void *_err)
1378{ 1378{
1379 int *err = _err; 1379 int *err = _err;
1380 struct powernow_k8_data *data = __get_cpu_var(powernow_data); 1380 struct powernow_k8_data *data = __this_cpu_read(powernow_data);
1381 1381
1382 *err = query_current_values_with_pending_wait(data); 1382 *err = query_current_values_with_pending_wait(data);
1383} 1383}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9ecf81f9b90f..7283e98deaae 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -265,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
265 line_size = l2.line_size; 265 line_size = l2.line_size;
266 lines_per_tag = l2.lines_per_tag; 266 lines_per_tag = l2.lines_per_tag;
267 /* cpu_data has errata corrections for K7 applied */ 267 /* cpu_data has errata corrections for K7 applied */
268 size_in_kb = current_cpu_data.x86_cache_size; 268 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
269 break; 269 break;
270 case 3: 270 case 3:
271 if (!l3.val) 271 if (!l3.val)
@@ -287,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
287 eax->split.type = types[leaf]; 287 eax->split.type = types[leaf];
288 eax->split.level = levels[leaf]; 288 eax->split.level = levels[leaf];
289 eax->split.num_threads_sharing = 0; 289 eax->split.num_threads_sharing = 0;
290 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; 290 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
291 291
292 292
293 if (assoc == 0xffff) 293 if (assoc == 0xffff)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7a35b72d7c03..d916183b7f9c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
326 326
327static int msr_to_offset(u32 msr) 327static int msr_to_offset(u32 msr)
328{ 328{
329 unsigned bank = __get_cpu_var(injectm.bank); 329 unsigned bank = __this_cpu_read(injectm.bank);
330 330
331 if (msr == rip_msr) 331 if (msr == rip_msr)
332 return offsetof(struct mce, ip); 332 return offsetof(struct mce, ip);
@@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr)
346{ 346{
347 u64 v; 347 u64 v;
348 348
349 if (__get_cpu_var(injectm).finished) { 349 if (__this_cpu_read(injectm.finished)) {
350 int offset = msr_to_offset(msr); 350 int offset = msr_to_offset(msr);
351 351
352 if (offset < 0) 352 if (offset < 0)
@@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr)
369 369
370static void mce_wrmsrl(u32 msr, u64 v) 370static void mce_wrmsrl(u32 msr, u64 v)
371{ 371{
372 if (__get_cpu_var(injectm).finished) { 372 if (__this_cpu_read(injectm.finished)) {
373 int offset = msr_to_offset(msr); 373 int offset = msr_to_offset(msr);
374 374
375 if (offset >= 0) 375 if (offset >= 0)
@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data)
1159 1159
1160 WARN_ON(smp_processor_id() != data); 1160 WARN_ON(smp_processor_id() != data);
1161 1161
1162 if (mce_available(&current_cpu_data)) { 1162 if (mce_available(__this_cpu_ptr(&cpu_info))) {
1163 machine_check_poll(MCP_TIMESTAMP, 1163 machine_check_poll(MCP_TIMESTAMP,
1164 &__get_cpu_var(mce_poll_banks)); 1164 &__get_cpu_var(mce_poll_banks));
1165 } 1165 }
@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev)
1767static int mce_resume(struct sys_device *dev) 1767static int mce_resume(struct sys_device *dev)
1768{ 1768{
1769 __mcheck_cpu_init_generic(); 1769 __mcheck_cpu_init_generic();
1770 __mcheck_cpu_init_vendor(&current_cpu_data); 1770 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
1771 1771
1772 return 0; 1772 return 0;
1773} 1773}
@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev)
1775static void mce_cpu_restart(void *data) 1775static void mce_cpu_restart(void *data)
1776{ 1776{
1777 del_timer_sync(&__get_cpu_var(mce_timer)); 1777 del_timer_sync(&__get_cpu_var(mce_timer));
1778 if (!mce_available(&current_cpu_data)) 1778 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1779 return; 1779 return;
1780 __mcheck_cpu_init_generic(); 1780 __mcheck_cpu_init_generic();
1781 __mcheck_cpu_init_timer(); 1781 __mcheck_cpu_init_timer();
@@ -1790,7 +1790,7 @@ static void mce_restart(void)
1790/* Toggle features for corrected errors */ 1790/* Toggle features for corrected errors */
1791static void mce_disable_ce(void *all) 1791static void mce_disable_ce(void *all)
1792{ 1792{
1793 if (!mce_available(&current_cpu_data)) 1793 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1794 return; 1794 return;
1795 if (all) 1795 if (all)
1796 del_timer_sync(&__get_cpu_var(mce_timer)); 1796 del_timer_sync(&__get_cpu_var(mce_timer));
@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all)
1799 1799
1800static void mce_enable_ce(void *all) 1800static void mce_enable_ce(void *all)
1801{ 1801{
1802 if (!mce_available(&current_cpu_data)) 1802 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1803 return; 1803 return;
1804 cmci_reenable(); 1804 cmci_reenable();
1805 cmci_recheck(); 1805 cmci_recheck();
@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2022 unsigned long action = *(unsigned long *)h; 2022 unsigned long action = *(unsigned long *)h;
2023 int i; 2023 int i;
2024 2024
2025 if (!mce_available(&current_cpu_data)) 2025 if (!mce_available(__this_cpu_ptr(&cpu_info)))
2026 return; 2026 return;
2027 2027
2028 if (!(action & CPU_TASKS_FROZEN)) 2028 if (!(action & CPU_TASKS_FROZEN))
@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2040 unsigned long action = *(unsigned long *)h; 2040 unsigned long action = *(unsigned long *)h;
2041 int i; 2041 int i;
2042 2042
2043 if (!mce_available(&current_cpu_data)) 2043 if (!mce_available(__this_cpu_ptr(&cpu_info)))
2044 return; 2044 return;
2045 2045
2046 if (!(action & CPU_TASKS_FROZEN)) 2046 if (!(action & CPU_TASKS_FROZEN))
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 6fcd0936194f..8694ef56459d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -130,7 +130,7 @@ void cmci_recheck(void)
130 unsigned long flags; 130 unsigned long flags;
131 int banks; 131 int banks;
132 132
133 if (!mce_available(&current_cpu_data) || !cmci_supported(&banks)) 133 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
134 return; 134 return;
135 local_irq_save(flags); 135 local_irq_save(flags);
136 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 136 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0a360d146596..04921017abe0 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event)
997 997
998static void x86_pmu_enable_event(struct perf_event *event) 998static void x86_pmu_enable_event(struct perf_event *event)
999{ 999{
1000 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1000 if (__this_cpu_read(cpu_hw_events.enabled))
1001 if (cpuc->enabled)
1002 __x86_pmu_enable_event(&event->hw, 1001 __x86_pmu_enable_event(&event->hw,
1003 ARCH_PERFMON_EVENTSEL_ENABLE); 1002 ARCH_PERFMON_EVENTSEL_ENABLE);
1004} 1003}
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self,
1272 break; 1271 break;
1273 case DIE_NMIUNKNOWN: 1272 case DIE_NMIUNKNOWN:
1274 this_nmi = percpu_read(irq_stat.__nmi_count); 1273 this_nmi = percpu_read(irq_stat.__nmi_count);
1275 if (this_nmi != __get_cpu_var(pmu_nmi).marked) 1274 if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1276 /* let the kernel handle the unknown nmi */ 1275 /* let the kernel handle the unknown nmi */
1277 return NOTIFY_DONE; 1276 return NOTIFY_DONE;
1278 /* 1277 /*
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self,
1296 this_nmi = percpu_read(irq_stat.__nmi_count); 1295 this_nmi = percpu_read(irq_stat.__nmi_count);
1297 if ((handled > 1) || 1296 if ((handled > 1) ||
1298 /* the next nmi could be a back-to-back nmi */ 1297 /* the next nmi could be a back-to-back nmi */
1299 ((__get_cpu_var(pmu_nmi).marked == this_nmi) && 1298 ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1300 (__get_cpu_var(pmu_nmi).handled > 1))) { 1299 (__this_cpu_read(pmu_nmi.handled) > 1))) {
1301 /* 1300 /*
1302 * We could have two subsequent back-to-back nmis: The 1301 * We could have two subsequent back-to-back nmis: The
1303 * first handles more than one counter, the 2nd 1302 * first handles more than one counter, the 2nd
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self,
1308 * handling more than one counter. We will mark the 1307 * handling more than one counter. We will mark the
1309 * next (3rd) and then drop it if unhandled. 1308 * next (3rd) and then drop it if unhandled.
1310 */ 1309 */
1311 __get_cpu_var(pmu_nmi).marked = this_nmi + 1; 1310 __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1312 __get_cpu_var(pmu_nmi).handled = handled; 1311 __this_cpu_write(pmu_nmi.handled, handled);
1313 } 1312 }
1314 1313
1315 return NOTIFY_STOP; 1314 return NOTIFY_STOP;
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event)
1484 */ 1483 */
1485static void x86_pmu_start_txn(struct pmu *pmu) 1484static void x86_pmu_start_txn(struct pmu *pmu)
1486{ 1485{
1487 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1488
1489 perf_pmu_disable(pmu); 1486 perf_pmu_disable(pmu);
1490 cpuc->group_flag |= PERF_EVENT_TXN; 1487 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1491 cpuc->n_txn = 0; 1488 __this_cpu_write(cpu_hw_events.n_txn, 0);
1492} 1489}
1493 1490
1494/* 1491/*
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
1498 */ 1495 */
1499static void x86_pmu_cancel_txn(struct pmu *pmu) 1496static void x86_pmu_cancel_txn(struct pmu *pmu)
1500{ 1497{
1501 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1498 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1502
1503 cpuc->group_flag &= ~PERF_EVENT_TXN;
1504 /* 1499 /*
1505 * Truncate the collected events. 1500 * Truncate the collected events.
1506 */ 1501 */
1507 cpuc->n_added -= cpuc->n_txn; 1502 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1508 cpuc->n_events -= cpuc->n_txn; 1503 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1509 perf_pmu_enable(pmu); 1504 perf_pmu_enable(pmu);
1510} 1505}
1511 1506
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 24e390e40f2e..008835c1d79c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
649 struct hw_perf_event *hwc = &event->hw; 649 struct hw_perf_event *hwc = &event->hw;
650 650
651 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { 651 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
652 if (!__get_cpu_var(cpu_hw_events).enabled) 652 if (!__this_cpu_read(cpu_hw_events.enabled))
653 return; 653 return;
654 654
655 intel_pmu_enable_bts(hwc->config); 655 intel_pmu_enable_bts(hwc->config);
@@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
679 679
680static void intel_pmu_reset(void) 680static void intel_pmu_reset(void)
681{ 681{
682 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; 682 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
683 unsigned long flags; 683 unsigned long flags;
684 int idx; 684 int idx;
685 685
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 298448656b60..382eb2936d4d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -170,9 +170,9 @@ static void ftrace_mod_code(void)
170 170
171void ftrace_nmi_enter(void) 171void ftrace_nmi_enter(void)
172{ 172{
173 __get_cpu_var(save_modifying_code) = modifying_code; 173 __this_cpu_write(save_modifying_code, modifying_code);
174 174
175 if (!__get_cpu_var(save_modifying_code)) 175 if (!__this_cpu_read(save_modifying_code))
176 return; 176 return;
177 177
178 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { 178 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
@@ -186,7 +186,7 @@ void ftrace_nmi_enter(void)
186 186
187void ftrace_nmi_exit(void) 187void ftrace_nmi_exit(void)
188{ 188{
189 if (!__get_cpu_var(save_modifying_code)) 189 if (!__this_cpu_read(save_modifying_code))
190 return; 190 return;
191 191
192 /* Finish all executions before clearing nmi_running */ 192 /* Finish all executions before clearing nmi_running */
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 42c594254507..02f07634d265 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
122 return -EBUSY; 122 return -EBUSY;
123 123
124 set_debugreg(info->address, i); 124 set_debugreg(info->address, i);
125 __get_cpu_var(cpu_debugreg[i]) = info->address; 125 __this_cpu_write(cpu_debugreg[i], info->address);
126 126
127 dr7 = &__get_cpu_var(cpu_dr7); 127 dr7 = &__get_cpu_var(cpu_dr7);
128 *dr7 |= encode_dr7(i, info->len, info->type); 128 *dr7 |= encode_dr7(i, info->len, info->type);
@@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
397 397
398void hw_breakpoint_restore(void) 398void hw_breakpoint_restore(void)
399{ 399{
400 set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); 400 set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
401 set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); 401 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
402 set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); 402 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
403 set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); 403 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
404 set_debugreg(current->thread.debugreg6, 6); 404 set_debugreg(current->thread.debugreg6, 6);
405 set_debugreg(__get_cpu_var(cpu_dr7), 7); 405 set_debugreg(__this_cpu_read(cpu_dr7), 7);
406} 406}
407EXPORT_SYMBOL_GPL(hw_breakpoint_restore); 407EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
408 408
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 83ec0175f986..3a43caa3beb7 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
234 exit_idle(); 234 exit_idle();
235 irq_enter(); 235 irq_enter();
236 236
237 irq = __get_cpu_var(vector_irq)[vector]; 237 irq = __this_cpu_read(vector_irq[vector]);
238 238
239 if (!handle_irq(irq, regs)) { 239 if (!handle_irq(irq, regs)) {
240 ack_APIC_irq(); 240 ack_APIC_irq();
@@ -350,12 +350,12 @@ void fixup_irqs(void)
350 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 350 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
351 unsigned int irr; 351 unsigned int irr;
352 352
353 if (__get_cpu_var(vector_irq)[vector] < 0) 353 if (__this_cpu_read(vector_irq[vector]) < 0)
354 continue; 354 continue;
355 355
356 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 356 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
357 if (irr & (1 << (vector % 32))) { 357 if (irr & (1 << (vector % 32))) {
358 irq = __get_cpu_var(vector_irq)[vector]; 358 irq = __this_cpu_read(vector_irq[vector]);
359 359
360 data = irq_get_irq_data(irq); 360 data = irq_get_irq_data(irq);
361 raw_spin_lock(&desc->lock); 361 raw_spin_lock(&desc->lock);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 96656f207751..48ff6dcffa02 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
79 u32 *isp, arg1, arg2; 79 u32 *isp, arg1, arg2;
80 80
81 curctx = (union irq_ctx *) current_thread_info(); 81 curctx = (union irq_ctx *) current_thread_info();
82 irqctx = __get_cpu_var(hardirq_ctx); 82 irqctx = __this_cpu_read(hardirq_ctx);
83 83
84 /* 84 /*
85 * this is where we switch to the IRQ stack. However, if we are 85 * this is where we switch to the IRQ stack. However, if we are
@@ -166,7 +166,7 @@ asmlinkage void do_softirq(void)
166 166
167 if (local_softirq_pending()) { 167 if (local_softirq_pending()) {
168 curctx = current_thread_info(); 168 curctx = current_thread_info();
169 irqctx = __get_cpu_var(softirq_ctx); 169 irqctx = __this_cpu_read(softirq_ctx);
170 irqctx->tinfo.task = curctx->task; 170 irqctx->tinfo.task = curctx->task;
171 irqctx->tinfo.previous_esp = current_stack_pointer; 171 irqctx->tinfo.previous_esp = current_stack_pointer;
172 172
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 5940282bd2f9..d91c477b3f62 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
403 403
404static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 404static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
405{ 405{
406 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 406 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
407 kcb->kprobe_status = kcb->prev_kprobe.status; 407 kcb->kprobe_status = kcb->prev_kprobe.status;
408 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; 408 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
409 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; 409 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
@@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
412static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 412static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
413 struct kprobe_ctlblk *kcb) 413 struct kprobe_ctlblk *kcb)
414{ 414{
415 __get_cpu_var(current_kprobe) = p; 415 __this_cpu_write(current_kprobe, p);
416 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 416 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
417 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); 417 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
418 if (is_IF_modifier(p->ainsn.insn)) 418 if (is_IF_modifier(p->ainsn.insn))
@@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
586 preempt_enable_no_resched(); 586 preempt_enable_no_resched();
587 return 1; 587 return 1;
588 } else if (kprobe_running()) { 588 } else if (kprobe_running()) {
589 p = __get_cpu_var(current_kprobe); 589 p = __this_cpu_read(current_kprobe);
590 if (p->break_handler && p->break_handler(p, regs)) { 590 if (p->break_handler && p->break_handler(p, regs)) {
591 setup_singlestep(p, regs, kcb, 0); 591 setup_singlestep(p, regs, kcb, 0);
592 return 1; 592 return 1;
@@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
759 759
760 orig_ret_address = (unsigned long)ri->ret_addr; 760 orig_ret_address = (unsigned long)ri->ret_addr;
761 if (ri->rp && ri->rp->handler) { 761 if (ri->rp && ri->rp->handler) {
762 __get_cpu_var(current_kprobe) = &ri->rp->kp; 762 __this_cpu_write(current_kprobe, &ri->rp->kp);
763 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 763 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
764 ri->ret_addr = correct_ret_addr; 764 ri->ret_addr = correct_ret_addr;
765 ri->rp->handler(ri, regs); 765 ri->rp->handler(ri, regs);
766 __get_cpu_var(current_kprobe) = NULL; 766 __this_cpu_write(current_kprobe, NULL);
767 } 767 }
768 768
769 recycle_rp_inst(ri, &empty_rp); 769 recycle_rp_inst(ri, &empty_rp);
@@ -1202,10 +1202,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1202 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; 1202 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
1203 regs->orig_ax = ~0UL; 1203 regs->orig_ax = ~0UL;
1204 1204
1205 __get_cpu_var(current_kprobe) = &op->kp; 1205 __this_cpu_write(current_kprobe, &op->kp);
1206 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1206 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1207 opt_pre_handler(&op->kp, regs); 1207 opt_pre_handler(&op->kp, regs);
1208 __get_cpu_var(current_kprobe) = NULL; 1208 __this_cpu_write(current_kprobe, NULL);
1209 } 1209 }
1210 preempt_enable_no_resched(); 1210 preempt_enable_no_resched();
1211} 1211}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c852041bfc3d..09c08a1c706f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -446,7 +446,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); 446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
447 trace_cpu_idle((ax>>4)+1, smp_processor_id()); 447 trace_cpu_idle((ax>>4)+1, smp_processor_id());
448 if (!need_resched()) { 448 if (!need_resched()) {
449 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 449 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
450 clflush((void *)&current_thread_info()->flags); 450 clflush((void *)&current_thread_info()->flags);
451 451
452 __monitor((void *)&current_thread_info()->flags, 0, 0); 452 __monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -462,7 +462,7 @@ static void mwait_idle(void)
462 if (!need_resched()) { 462 if (!need_resched()) {
463 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 463 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
464 trace_cpu_idle(1, smp_processor_id()); 464 trace_cpu_idle(1, smp_processor_id());
465 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 465 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
466 clflush((void *)&current_thread_info()->flags); 466 clflush((void *)&current_thread_info()->flags);
467 467
468 __monitor((void *)&current_thread_info()->flags, 0, 0); 468 __monitor((void *)&current_thread_info()->flags, 0, 0);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ee886fe10ef4..c7149c96d079 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -427,7 +427,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
427 427
428 cpumask_set_cpu(cpu, c->llc_shared_map); 428 cpumask_set_cpu(cpu, c->llc_shared_map);
429 429
430 if (current_cpu_data.x86_max_cores == 1) { 430 if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
431 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); 431 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
432 c->booted_cores = 1; 432 c->booted_cores = 1;
433 return; 433 return;
@@ -1089,7 +1089,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1089 1089
1090 preempt_disable(); 1090 preempt_disable();
1091 smp_cpu_index_default(); 1091 smp_cpu_index_default();
1092 current_cpu_data = boot_cpu_data; 1092 memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
1093 cpumask_copy(cpu_callin_mask, cpumask_of(0)); 1093 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1094 mb(); 1094 mb();
1095 /* 1095 /*
@@ -1383,7 +1383,7 @@ void play_dead_common(void)
1383 1383
1384 mb(); 1384 mb();
1385 /* Ack it */ 1385 /* Ack it */
1386 __get_cpu_var(cpu_state) = CPU_DEAD; 1386 __this_cpu_write(cpu_state, CPU_DEAD);
1387 1387
1388 /* 1388 /*
1389 * With physical CPU hotplug, we should halt the cpu 1389 * With physical CPU hotplug, we should halt the cpu
@@ -1403,11 +1403,11 @@ static inline void mwait_play_dead(void)
1403 int i; 1403 int i;
1404 void *mwait_ptr; 1404 void *mwait_ptr;
1405 1405
1406 if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT)) 1406 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
1407 return; 1407 return;
1408 if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH)) 1408 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
1409 return; 1409 return;
1410 if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 1410 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1411 return; 1411 return;
1412 1412
1413 eax = CPUID_MWAIT_LEAF; 1413 eax = CPUID_MWAIT_LEAF;
@@ -1458,7 +1458,7 @@ static inline void mwait_play_dead(void)
1458 1458
1459static inline void hlt_play_dead(void) 1459static inline void hlt_play_dead(void)
1460{ 1460{
1461 if (current_cpu_data.x86 >= 4) 1461 if (__this_cpu_read(cpu_info.x86) >= 4)
1462 wbinvd(); 1462 wbinvd();
1463 1463
1464 while (1) { 1464 while (1) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 356a0d455cf9..03d2ea82f35a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -659,7 +659,7 @@ void restore_sched_clock_state(void)
659 659
660 local_irq_save(flags); 660 local_irq_save(flags);
661 661
662 __get_cpu_var(cyc2ns_offset) = 0; 662 __this_cpu_write(cyc2ns_offset, 0);
663 offset = cyc2ns_suspend - sched_clock(); 663 offset = cyc2ns_suspend - sched_clock();
664 664
665 for_each_possible_cpu(cpu) 665 for_each_possible_cpu(cpu)