aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-12-18 10:30:05 -0500
committerTejun Heo <tj@kernel.org>2010-12-30 06:22:03 -0500
commit7b543a5334ff4ea2e3ad3b777fc23cdb8072a988 (patch)
tree8fbdf5275411190f78f3bdee3c40e87285bf8e30
parent0a3aee0da4402aa19b66e458038533c896fb80c6 (diff)
x86: Replace uses of current_cpu_data with this_cpu ops
Replace all uses of current_cpu_data with this_cpu operations on the per cpu structure cpu_info. The scala accesses are replaced with the matching this_cpu ops which results in smaller and more efficient code. In the long run, it might be a good idea to remove cpu_data() macro too and use per_cpu macro directly. tj: updated description Cc: Yinghai Lu <yinghai@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: H. Peter Anvin <hpa@zytor.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c2
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/smpboot.c12
-rw-r--r--arch/x86/oprofile/op_model_ppro.c8
-rw-r--r--drivers/staging/lirc/lirc_serial.c4
11 files changed, 28 insertions, 29 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cae9c3cb95cf..c6efecf85a6a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -141,10 +141,9 @@ extern __u32 cpu_caps_set[NCAPINTS];
141#ifdef CONFIG_SMP 141#ifdef CONFIG_SMP
142DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 142DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
143#define cpu_data(cpu) per_cpu(cpu_info, cpu) 143#define cpu_data(cpu) per_cpu(cpu_info, cpu)
144#define current_cpu_data __get_cpu_var(cpu_info)
145#else 144#else
145#define cpu_info boot_cpu_data
146#define cpu_data(cpu) boot_cpu_data 146#define cpu_data(cpu) boot_cpu_data
147#define current_cpu_data boot_cpu_data
148#endif 147#endif
149 148
150extern const struct seq_operations cpuinfo_op; 149extern const struct seq_operations cpuinfo_op;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3f838d537392..8accfe3b34d7 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void)
516{ 516{
517 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 517 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
518 518
519 if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) { 519 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
520 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; 520 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
521 /* Make LAPIC timer preferrable over percpu HPET */ 521 /* Make LAPIC timer preferrable over percpu HPET */
522 lapic_clockevent.rating = 150; 522 lapic_clockevent.rating = 150;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9e093f8fe78c..7c7bedb83c5a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383);
668 668
669bool cpu_has_amd_erratum(const int *erratum) 669bool cpu_has_amd_erratum(const int *erratum)
670{ 670{
671 struct cpuinfo_x86 *cpu = &current_cpu_data; 671 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
672 int osvw_id = *erratum++; 672 int osvw_id = *erratum++;
673 u32 range; 673 u32 range;
674 u32 ms; 674 u32 ms;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 42a36046823e..35c7e65e59be 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc)
521 521
522 *rc = -ENODEV; 522 *rc = -ENODEV;
523 523
524 if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) 524 if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
525 return; 525 return;
526 526
527 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 527 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 17ad03366211..453c616e923d 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -266,7 +266,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
266 line_size = l2.line_size; 266 line_size = l2.line_size;
267 lines_per_tag = l2.lines_per_tag; 267 lines_per_tag = l2.lines_per_tag;
268 /* cpu_data has errata corrections for K7 applied */ 268 /* cpu_data has errata corrections for K7 applied */
269 size_in_kb = current_cpu_data.x86_cache_size; 269 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
270 break; 270 break;
271 case 3: 271 case 3:
272 if (!l3.val) 272 if (!l3.val)
@@ -288,7 +288,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
288 eax->split.type = types[leaf]; 288 eax->split.type = types[leaf];
289 eax->split.level = levels[leaf]; 289 eax->split.level = levels[leaf];
290 eax->split.num_threads_sharing = 0; 290 eax->split.num_threads_sharing = 0;
291 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; 291 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
292 292
293 293
294 if (assoc == 0xffff) 294 if (assoc == 0xffff)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 0c746af6c5eb..d916183b7f9c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data)
1159 1159
1160 WARN_ON(smp_processor_id() != data); 1160 WARN_ON(smp_processor_id() != data);
1161 1161
1162 if (mce_available(&current_cpu_data)) { 1162 if (mce_available(__this_cpu_ptr(&cpu_info))) {
1163 machine_check_poll(MCP_TIMESTAMP, 1163 machine_check_poll(MCP_TIMESTAMP,
1164 &__get_cpu_var(mce_poll_banks)); 1164 &__get_cpu_var(mce_poll_banks));
1165 } 1165 }
@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev)
1767static int mce_resume(struct sys_device *dev) 1767static int mce_resume(struct sys_device *dev)
1768{ 1768{
1769 __mcheck_cpu_init_generic(); 1769 __mcheck_cpu_init_generic();
1770 __mcheck_cpu_init_vendor(&current_cpu_data); 1770 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
1771 1771
1772 return 0; 1772 return 0;
1773} 1773}
@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev)
1775static void mce_cpu_restart(void *data) 1775static void mce_cpu_restart(void *data)
1776{ 1776{
1777 del_timer_sync(&__get_cpu_var(mce_timer)); 1777 del_timer_sync(&__get_cpu_var(mce_timer));
1778 if (!mce_available(&current_cpu_data)) 1778 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1779 return; 1779 return;
1780 __mcheck_cpu_init_generic(); 1780 __mcheck_cpu_init_generic();
1781 __mcheck_cpu_init_timer(); 1781 __mcheck_cpu_init_timer();
@@ -1790,7 +1790,7 @@ static void mce_restart(void)
1790/* Toggle features for corrected errors */ 1790/* Toggle features for corrected errors */
1791static void mce_disable_ce(void *all) 1791static void mce_disable_ce(void *all)
1792{ 1792{
1793 if (!mce_available(&current_cpu_data)) 1793 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1794 return; 1794 return;
1795 if (all) 1795 if (all)
1796 del_timer_sync(&__get_cpu_var(mce_timer)); 1796 del_timer_sync(&__get_cpu_var(mce_timer));
@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all)
1799 1799
1800static void mce_enable_ce(void *all) 1800static void mce_enable_ce(void *all)
1801{ 1801{
1802 if (!mce_available(&current_cpu_data)) 1802 if (!mce_available(__this_cpu_ptr(&cpu_info)))
1803 return; 1803 return;
1804 cmci_reenable(); 1804 cmci_reenable();
1805 cmci_recheck(); 1805 cmci_recheck();
@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2022 unsigned long action = *(unsigned long *)h; 2022 unsigned long action = *(unsigned long *)h;
2023 int i; 2023 int i;
2024 2024
2025 if (!mce_available(&current_cpu_data)) 2025 if (!mce_available(__this_cpu_ptr(&cpu_info)))
2026 return; 2026 return;
2027 2027
2028 if (!(action & CPU_TASKS_FROZEN)) 2028 if (!(action & CPU_TASKS_FROZEN))
@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2040 unsigned long action = *(unsigned long *)h; 2040 unsigned long action = *(unsigned long *)h;
2041 int i; 2041 int i;
2042 2042
2043 if (!mce_available(&current_cpu_data)) 2043 if (!mce_available(__this_cpu_ptr(&cpu_info)))
2044 return; 2044 return;
2045 2045
2046 if (!(action & CPU_TASKS_FROZEN)) 2046 if (!(action & CPU_TASKS_FROZEN))
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 6fcd0936194f..8694ef56459d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -130,7 +130,7 @@ void cmci_recheck(void)
130 unsigned long flags; 130 unsigned long flags;
131 int banks; 131 int banks;
132 132
133 if (!mce_available(&current_cpu_data) || !cmci_supported(&banks)) 133 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
134 return; 134 return;
135 local_irq_save(flags); 135 local_irq_save(flags);
136 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); 136 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 57d1868a86aa..dae1c0766d9a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -445,7 +445,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
445{ 445{
446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); 446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
447 if (!need_resched()) { 447 if (!need_resched()) {
448 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 448 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
449 clflush((void *)&current_thread_info()->flags); 449 clflush((void *)&current_thread_info()->flags);
450 450
451 __monitor((void *)&current_thread_info()->flags, 0, 0); 451 __monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -460,7 +460,7 @@ static void mwait_idle(void)
460{ 460{
461 if (!need_resched()) { 461 if (!need_resched()) {
462 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 462 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
463 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) 463 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
464 clflush((void *)&current_thread_info()->flags); 464 clflush((void *)&current_thread_info()->flags);
465 465
466 __monitor((void *)&current_thread_info()->flags, 0, 0); 466 __monitor((void *)&current_thread_info()->flags, 0, 0);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ff4e5a113a5b..0720071086d1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -430,7 +430,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
430 430
431 cpumask_set_cpu(cpu, c->llc_shared_map); 431 cpumask_set_cpu(cpu, c->llc_shared_map);
432 432
433 if (current_cpu_data.x86_max_cores == 1) { 433 if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
434 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); 434 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
435 c->booted_cores = 1; 435 c->booted_cores = 1;
436 return; 436 return;
@@ -1094,7 +1094,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1094 1094
1095 preempt_disable(); 1095 preempt_disable();
1096 smp_cpu_index_default(); 1096 smp_cpu_index_default();
1097 current_cpu_data = boot_cpu_data; 1097 memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
1098 cpumask_copy(cpu_callin_mask, cpumask_of(0)); 1098 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1099 mb(); 1099 mb();
1100 /* 1100 /*
@@ -1397,11 +1397,11 @@ static inline void mwait_play_dead(void)
1397 int i; 1397 int i;
1398 void *mwait_ptr; 1398 void *mwait_ptr;
1399 1399
1400 if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT)) 1400 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
1401 return; 1401 return;
1402 if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH)) 1402 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
1403 return; 1403 return;
1404 if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 1404 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1405 return; 1405 return;
1406 1406
1407 eax = CPUID_MWAIT_LEAF; 1407 eax = CPUID_MWAIT_LEAF;
@@ -1452,7 +1452,7 @@ static inline void mwait_play_dead(void)
1452 1452
1453static inline void hlt_play_dead(void) 1453static inline void hlt_play_dead(void)
1454{ 1454{
1455 if (current_cpu_data.x86 >= 4) 1455 if (__this_cpu_read(cpu_info.x86) >= 4)
1456 wbinvd(); 1456 wbinvd();
1457 1457
1458 while (1) { 1458 while (1) {
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index d769cda54082..94b745045e45 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
95 * counter width: 95 * counter width:
96 */ 96 */
97 if (!(eax.split.version_id == 0 && 97 if (!(eax.split.version_id == 0 &&
98 current_cpu_data.x86 == 6 && 98 __this_cpu_read(cpu_info.x86) == 6 &&
99 current_cpu_data.x86_model == 15)) { 99 __this_cpu_read(cpu_info.x86_model) == 15)) {
100 100
101 if (counter_width < eax.split.bit_width) 101 if (counter_width < eax.split.bit_width)
102 counter_width = eax.split.bit_width; 102 counter_width = eax.split.bit_width;
@@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void)
235 eax.full = cpuid_eax(0xa); 235 eax.full = cpuid_eax(0xa);
236 236
237 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ 237 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
238 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && 238 if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
239 current_cpu_data.x86_model == 15) { 239 __this_cpu_read(cpu_info.x86_model) == 15) {
240 eax.split.version_id = 2; 240 eax.split.version_id = 2;
241 eax.split.num_counters = 2; 241 eax.split.num_counters = 2;
242 eax.split.bit_width = 40; 242 eax.split.bit_width = 40;
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 971844bbee28..9bcf149c4260 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -377,7 +377,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
377 duty_cycle = new_duty_cycle; 377 duty_cycle = new_duty_cycle;
378 freq = new_freq; 378 freq = new_freq;
379 379
380 loops_per_sec = current_cpu_data.loops_per_jiffy; 380 loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
381 loops_per_sec *= HZ; 381 loops_per_sec *= HZ;
382 382
383 /* How many clocks in a microsecond?, avoiding long long divide */ 383 /* How many clocks in a microsecond?, avoiding long long divide */
@@ -398,7 +398,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
398 dprintk("in init_timing_params, freq=%d, duty_cycle=%d, " 398 dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
399 "clk/jiffy=%ld, pulse=%ld, space=%ld, " 399 "clk/jiffy=%ld, pulse=%ld, space=%ld, "
400 "conv_us_to_clocks=%ld\n", 400 "conv_us_to_clocks=%ld\n",
401 freq, duty_cycle, current_cpu_data.loops_per_jiffy, 401 freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
402 pulse_width, space_width, conv_us_to_clocks); 402 pulse_width, space_width, conv_us_to_clocks);
403 return 0; 403 return 0;
404} 404}