aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/kernel/acpi.c33
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/setup.c27
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c129
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c8
-rw-r--r--arch/ia64/xen/irq_xen.c131
-rw-r--r--arch/ia64/xen/time.c22
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/m68k/sun3/mmu_emu.c8
-rw-r--r--arch/mn10300/kernel/kprobes.c61
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c14
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c4
-rw-r--r--arch/sparc/kernel/nmi.c8
-rw-r--r--arch/x86/include/asm/percpu.h104
-rw-r--r--arch/x86/kernel/apic/nmi.c8
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/time.c24
37 files changed, 592 insertions, 372 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1ee596cd942f..2d7f56a98e0f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
87 bool 87 bool
88 default y 88 default y
89 89
90config HAVE_LEGACY_PER_CPU_AREA
91 def_bool y
92
93config HAVE_SETUP_PER_CPU_AREA 90config HAVE_SETUP_PER_CPU_AREA
94 def_bool y 91 def_bool y
95 92
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 688a812c017d..61c7b1750b16 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
61 61
62#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
64 extern unsigned long vmalloc_end; 64 extern unsigned long VMALLOC_END;
65 extern struct page *vmem_map; 65 extern struct page *vmem_map;
66 extern int find_largest_hole(u64 start, u64 end, void *arg); 66 extern int find_largest_hole(u64 start, u64 end, void *arg);
67 extern int create_mem_map_page_table(u64 start, u64 end, void *arg); 67 extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 8840a690d1e7..69bf13857a9f 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
228#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 228#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
229#ifdef CONFIG_VIRTUAL_MEM_MAP 229#ifdef CONFIG_VIRTUAL_MEM_MAP
230# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 230# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
231# define VMALLOC_END vmalloc_end 231extern unsigned long VMALLOC_END;
232 extern unsigned long vmalloc_end;
233#else 232#else
234#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) 233#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
235/* SPARSEMEM_VMEMMAP uses half of vmalloc... */ 234/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 3eaeedf1aef2..7fa90f73f6be 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
229#endif 229#endif
230}; 230};
231 231
232DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); 232DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
233 233
234/* 234/*
235 * The "local" data variable. It refers to the per-CPU data of the currently executing 235 * The "local" data variable. It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
237 * Do not use the address of local_cpu_data, since it will be different from 237 * Do not use the address of local_cpu_data, since it will be different from
238 * cpu_data(smp_processor_id())! 238 * cpu_data(smp_processor_id())!
239 */ 239 */
240#define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) 240#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
241#define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) 241#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
242 242
243extern void print_cpu_info (struct cpuinfo_ia64 *); 243extern void print_cpu_info (struct cpuinfo_ia64 *);
244 244
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index baec6f00f7f3..40574ae11401 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
702 printk(KERN_ERR PREFIX 702 printk(KERN_ERR PREFIX
703 "Error parsing MADT - no LAPIC entries\n"); 703 "Error parsing MADT - no LAPIC entries\n");
704 704
705#ifdef CONFIG_SMP
706 if (available_cpus == 0) {
707 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
708 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
709 smp_boot_data.cpu_phys_id[available_cpus] =
710 hard_smp_processor_id();
711 available_cpus = 1; /* We've got at least one of these, no? */
712 }
713 smp_boot_data.cpu_count = available_cpus;
714#endif
715 /* Make boot-up look pretty */
716 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
717 total_cpus);
718
705 return 0; 719 return 0;
706} 720}
707 721
708
709
710int __init acpi_boot_init(void) 722int __init acpi_boot_init(void)
711{ 723{
712 724
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
769 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) 781 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
770 printk(KERN_ERR PREFIX "Can't find FADT\n"); 782 printk(KERN_ERR PREFIX "Can't find FADT\n");
771 783
784#ifdef CONFIG_ACPI_NUMA
772#ifdef CONFIG_SMP 785#ifdef CONFIG_SMP
773 if (available_cpus == 0) {
774 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
775 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
776 smp_boot_data.cpu_phys_id[available_cpus] =
777 hard_smp_processor_id();
778 available_cpus = 1; /* We've got at least one of these, no? */
779 }
780 smp_boot_data.cpu_count = available_cpus;
781
782 smp_build_cpu_map();
783# ifdef CONFIG_ACPI_NUMA
784 if (srat_num_cpus == 0) { 786 if (srat_num_cpus == 0) {
785 int cpu, i = 1; 787 int cpu, i = 1;
786 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 788 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
789 node_cpuid[i++].phys_id = 791 node_cpuid[i++].phys_id =
790 smp_boot_data.cpu_phys_id[cpu]; 792 smp_boot_data.cpu_phys_id[cpu];
791 } 793 }
792# endif
793#endif 794#endif
794#ifdef CONFIG_ACPI_NUMA
795 build_cpu_to_node_map(); 795 build_cpu_to_node_map();
796#endif 796#endif
797 /* Make boot-up look pretty */
798 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
799 total_cpus);
800 return 0; 797 return 0;
801} 798}
802 799
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 696eff28a0c4..17a9fba38930 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
1051 * intermediate precision so that we can produce a full 64-bit result. 1051 * intermediate precision so that we can produce a full 64-bit result.
1052 */ 1052 */
1053GLOBAL_ENTRY(ia64_native_sched_clock) 1053GLOBAL_ENTRY(ia64_native_sched_clock)
1054 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1054 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) 1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
1056 ;; 1056 ;;
1057 ldf8 f8=[r8] 1057 ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1078GLOBAL_ENTRY(cycle_to_cputime) 1078GLOBAL_ENTRY(cycle_to_cputime)
1079 alloc r16=ar.pfs,1,0,0,0 1079 alloc r16=ar.pfs,1,0,0,0
1080 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1080 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1081 ;; 1081 ;;
1082 ldf8 f8=[r8] 1082 ldf8 f8=[r8]
1083 ;; 1083 ;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 14d39e300627..461b99902bf6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
30#endif 30#endif
31 31
32#include <asm/processor.h> 32#include <asm/processor.h>
33EXPORT_SYMBOL(per_cpu__cpu_info); 33EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
34#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); 35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
36#endif 36#endif
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 7461d2573d41..d5bdf9de36b6 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -59,7 +59,7 @@
59ia64_do_tlb_purge: 59ia64_do_tlb_purge:
60#define O(member) IA64_CPUINFO_##member##_OFFSET 60#define O(member) IA64_CPUINFO_##member##_OFFSET
61 61
62 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 62 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
63 ;; 63 ;;
64 addl r17=O(PTCE_STRIDE),r2 64 addl r17=O(PTCE_STRIDE),r2
65 addl r2=O(PTCE_BASE),r2 65 addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 32f6fc131fbe..c370e02f0061 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
61 61
62 // purge all TC entries 62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET 63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 64 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
65 ;; 65 ;;
66 addl r17=O(PTCE_STRIDE),r2 66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2 67 addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1de86c96801d..a1ea87919777 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
74EXPORT_SYMBOL(__per_cpu_offset); 74EXPORT_SYMBOL(__per_cpu_offset);
75#endif 75#endif
76 76
77DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
78DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
79unsigned long ia64_cycles_per_usec; 79unsigned long ia64_cycles_per_usec;
80struct ia64_boot_param *ia64_boot_param; 80struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
566 early_acpi_boot_init(); 566 early_acpi_boot_init();
567# ifdef CONFIG_ACPI_NUMA 567# ifdef CONFIG_ACPI_NUMA
568 acpi_numa_init(); 568 acpi_numa_init();
569#ifdef CONFIG_ACPI_HOTPLUG_CPU 569# ifdef CONFIG_ACPI_HOTPLUG_CPU
570 prefill_possible_map(); 570 prefill_possible_map();
571#endif 571# endif
572 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 572 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
573 32 : cpus_weight(early_cpu_possible_map)), 573 32 : cpus_weight(early_cpu_possible_map)),
574 additional_cpus > 0 ? additional_cpus : 0); 574 additional_cpus > 0 ? additional_cpus : 0);
575# endif 575# endif
576#else
577# ifdef CONFIG_SMP
578 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
579# endif
580#endif /* CONFIG_APCI_BOOT */ 576#endif /* CONFIG_APCI_BOOT */
581 577
578#ifdef CONFIG_SMP
579 smp_build_cpu_map();
580#endif
582 find_memory(); 581 find_memory();
583 582
584 /* process SAL system table: */ 583 /* process SAL system table: */
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
856} 855}
857 856
858/* 857/*
859 * In UP configuration, setup_per_cpu_areas() is defined in
860 * include/linux/percpu.h
861 */
862#ifdef CONFIG_SMP
863void __init
864setup_per_cpu_areas (void)
865{
866 /* start_kernel() requires this... */
867}
868#endif
869
870/*
871 * Do the following calculations: 858 * Do the following calculations:
872 * 859 *
873 * 1. the max. cache line size. 860 * 1. the max. cache line size.
@@ -980,7 +967,7 @@ cpu_init (void)
980 * depends on the data returned by identify_cpu(). We break the dependency by 967 * depends on the data returned by identify_cpu(). We break the dependency by
981 * accessing cpu_data() through the canonical per-CPU address. 968 * accessing cpu_data() through the canonical per-CPU address.
982 */ 969 */
983 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 970 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
984 identify_cpu(cpu_info); 971 identify_cpu(cpu_info);
985 972
986#ifdef CONFIG_MCKINLEY 973#ifdef CONFIG_MCKINLEY
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b2c988..1295ba327f6f 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
166 } 166 }
167#endif 167#endif
168 168
169#ifdef CONFIG_SMP
170 . = ALIGN(PERCPU_PAGE_SIZE);
171 __cpu0_per_cpu = .;
172 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
173#endif
174
169 . = ALIGN(PAGE_SIZE); 175 . = ALIGN(PAGE_SIZE);
170 __init_end = .; 176 __init_end = .;
171 177
@@ -198,11 +204,6 @@ SECTIONS
198 data : { } :data 204 data : { } :data
199 .data : AT(ADDR(.data) - LOAD_OFFSET) 205 .data : AT(ADDR(.data) - LOAD_OFFSET)
200 { 206 {
201#ifdef CONFIG_SMP
202 . = ALIGN(PERCPU_PAGE_SIZE);
203 __cpu0_per_cpu = .;
204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
205#endif
206 INIT_TASK_DATA(PAGE_SIZE) 207 INIT_TASK_DATA(PAGE_SIZE)
207 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 208 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
208 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 209 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf299..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
154void * __cpuinit 154void * __cpuinit
155per_cpu_init (void) 155per_cpu_init (void)
156{ 156{
157 int cpu; 157 static bool first_time = true;
158 static int first_time=1; 158 void *cpu0_data = __cpu0_per_cpu;
159 unsigned int cpu;
160
161 if (!first_time)
162 goto skip;
163 first_time = false;
159 164
160 /* 165 /*
161 * get_free_pages() cannot be used before cpu_init() done. BSP 166 * get_free_pages() cannot be used before cpu_init() done.
162 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls 167 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
163 * get_zeroed_page(). 168 * to avoid that AP calls get_zeroed_page().
164 */ 169 */
165 if (first_time) { 170 for_each_possible_cpu(cpu) {
166 void *cpu0_data = __cpu0_per_cpu; 171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
167 172
168 first_time=0; 173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
174 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
175 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
169 176
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; 177 /*
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; 178 * percpu area for cpu0 is moved from the __init area
179 * which is setup by head.S and used till this point.
180 * Update ar.k3. This move is ensures that percpu
181 * area for cpu0 is on the correct node and its
182 * virtual address isn't insanely far from other
183 * percpu areas which is important for congruent
184 * percpu allocator.
185 */
186 if (cpu == 0)
187 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
188 (unsigned long)__per_cpu_start);
172 189
173 for (cpu = 1; cpu < NR_CPUS; cpu++) { 190 cpu_data += PERCPU_PAGE_SIZE;
174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
176 cpu_data += PERCPU_PAGE_SIZE;
177 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
178 }
179 } 191 }
192skip:
180 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 193 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
181} 194}
182 195
183static inline void 196static inline void
184alloc_per_cpu_data(void) 197alloc_per_cpu_data(void)
185{ 198{
186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, 199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
188} 201}
202
203/**
204 * setup_per_cpu_areas - setup percpu areas
205 *
206 * Arch code has already allocated and initialized percpu areas. All
207 * this function has to do is to teach the determined layout to the
208 * dynamic percpu allocator, which happens to be more complex than
209 * creating whole new ones using helpers.
210 */
211void __init
212setup_per_cpu_areas(void)
213{
214 struct pcpu_alloc_info *ai;
215 struct pcpu_group_info *gi;
216 unsigned int cpu;
217 ssize_t static_size, reserved_size, dyn_size;
218 int rc;
219
220 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
221 if (!ai)
222 panic("failed to allocate pcpu_alloc_info");
223 gi = &ai->groups[0];
224
225 /* units are assigned consecutively to possible cpus */
226 for_each_possible_cpu(cpu)
227 gi->cpu_map[gi->nr_units++] = cpu;
228
229 /* set parameters */
230 static_size = __per_cpu_end - __per_cpu_start;
231 reserved_size = PERCPU_MODULE_RESERVE;
232 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
233 if (dyn_size < 0)
234 panic("percpu area overflow static=%zd reserved=%zd\n",
235 static_size, reserved_size);
236
237 ai->static_size = static_size;
238 ai->reserved_size = reserved_size;
239 ai->dyn_size = dyn_size;
240 ai->unit_size = PERCPU_PAGE_SIZE;
241 ai->atom_size = PAGE_SIZE;
242 ai->alloc_size = PERCPU_PAGE_SIZE;
243
244 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
245 if (rc)
246 panic("failed to setup percpu area (err=%d)", rc);
247
248 pcpu_free_alloc_info(ai);
249}
189#else 250#else
190#define alloc_per_cpu_data() do { } while (0) 251#define alloc_per_cpu_data() do { } while (0)
191#endif /* CONFIG_SMP */ 252#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
270 331
271 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 332 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
272 sizeof(struct page)); 333 sizeof(struct page));
273 vmalloc_end -= map_size; 334 VMALLOC_END -= map_size;
274 vmem_map = (struct page *) vmalloc_end; 335 vmem_map = (struct page *) VMALLOC_END;
275 efi_memmap_walk(create_mem_map_page_table, NULL); 336 efi_memmap_walk(create_mem_map_page_table, NULL);
276 337
277 /* 338 /*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..19c4b2195dce 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143 int cpu; 143 int cpu;
144 144
145 for_each_possible_early_cpu(cpu) { 145 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 146 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
147 void *cpu0_data = __cpu0_per_cpu; 147
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 148 if (node != node_cpuid[cpu].nid)
149 __per_cpu_start; 149 continue;
150 } else if (node == node_cpuid[cpu].nid) { 150
151 memcpy(__va(cpu_data), __phys_per_cpu_start, 151 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
152 __per_cpu_end - __per_cpu_start); 152 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 153 __per_cpu_start;
154 __per_cpu_start; 154
155 cpu_data += PERCPU_PAGE_SIZE; 155 /*
156 } 156 * percpu area for cpu0 is moved from the __init area
157 * which is setup by head.S and used till this point.
158 * Update ar.k3. This move is ensures that percpu
159 * area for cpu0 is on the correct node and its
160 * virtual address isn't insanely far from other
161 * percpu areas which is important for congruent
162 * percpu allocator.
163 */
164 if (cpu == 0)
165 ia64_set_kr(IA64_KR_PER_CPU_DATA,
166 (unsigned long)cpu_data -
167 (unsigned long)__per_cpu_start);
168
169 cpu_data += PERCPU_PAGE_SIZE;
157 } 170 }
158#endif 171#endif
159 return cpu_data; 172 return cpu_data;
160} 173}
161 174
175#ifdef CONFIG_SMP
176/**
177 * setup_per_cpu_areas - setup percpu areas
178 *
179 * Arch code has already allocated and initialized percpu areas. All
180 * this function has to do is to teach the determined layout to the
181 * dynamic percpu allocator, which happens to be more complex than
182 * creating whole new ones using helpers.
183 */
184void __init setup_per_cpu_areas(void)
185{
186 struct pcpu_alloc_info *ai;
187 struct pcpu_group_info *uninitialized_var(gi);
188 unsigned int *cpu_map;
189 void *base;
190 unsigned long base_offset;
191 unsigned int cpu;
192 ssize_t static_size, reserved_size, dyn_size;
193 int node, prev_node, unit, nr_units, rc;
194
195 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
196 if (!ai)
197 panic("failed to allocate pcpu_alloc_info");
198 cpu_map = ai->groups[0].cpu_map;
199
200 /* determine base */
201 base = (void *)ULONG_MAX;
202 for_each_possible_cpu(cpu)
203 base = min(base,
204 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
205 base_offset = (void *)__per_cpu_start - base;
206
207 /* build cpu_map, units are grouped by node */
208 unit = 0;
209 for_each_node(node)
210 for_each_possible_cpu(cpu)
211 if (node == node_cpuid[cpu].nid)
212 cpu_map[unit++] = cpu;
213 nr_units = unit;
214
215 /* set basic parameters */
216 static_size = __per_cpu_end - __per_cpu_start;
217 reserved_size = PERCPU_MODULE_RESERVE;
218 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
219 if (dyn_size < 0)
220 panic("percpu area overflow static=%zd reserved=%zd\n",
221 static_size, reserved_size);
222
223 ai->static_size = static_size;
224 ai->reserved_size = reserved_size;
225 ai->dyn_size = dyn_size;
226 ai->unit_size = PERCPU_PAGE_SIZE;
227 ai->atom_size = PAGE_SIZE;
228 ai->alloc_size = PERCPU_PAGE_SIZE;
229
230 /*
231 * CPUs are put into groups according to node. Walk cpu_map
232 * and create new groups at node boundaries.
233 */
234 prev_node = -1;
235 ai->nr_groups = 0;
236 for (unit = 0; unit < nr_units; unit++) {
237 cpu = cpu_map[unit];
238 node = node_cpuid[cpu].nid;
239
240 if (node == prev_node) {
241 gi->nr_units++;
242 continue;
243 }
244 prev_node = node;
245
246 gi = &ai->groups[ai->nr_groups++];
247 gi->nr_units = 1;
248 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
249 gi->cpu_map = &cpu_map[unit];
250 }
251
252 rc = pcpu_setup_first_chunk(ai, base);
253 if (rc)
254 panic("failed to setup percpu area (err=%d)", rc);
255
256 pcpu_free_alloc_info(ai);
257}
258#endif
259
162/** 260/**
163 * fill_pernode - initialize pernode data. 261 * fill_pernode - initialize pernode data.
164 * @node: the node id. 262 * @node: the node id.
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
352 /* Set the node_data pointer for each per-cpu struct */ 450 /* Set the node_data pointer for each per-cpu struct */
353 for_each_possible_early_cpu(cpu) { 451 for_each_possible_early_cpu(cpu) {
354 node = node_cpuid[cpu].nid; 452 node = node_cpuid[cpu].nid;
355 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 453 per_cpu(ia64_cpu_info, cpu).node_data =
454 mem_data[node].node_data;
356 } 455 }
357#else 456#else
358 { 457 {
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
360 cpu = 0; 459 cpu = 0;
361 node = node_cpuid[cpu].nid; 460 node = node_cpuid[cpu].nid;
362 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
363 ((char *)&per_cpu__cpu_info - __per_cpu_start)); 462 ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
364 cpu0_cpu_info->node_data = mem_data[node].node_data; 463 cpu0_cpu_info->node_data = mem_data[node].node_data;
365 } 464 }
366#endif /* CONFIG_SMP */ 465#endif /* CONFIG_SMP */
@@ -666,9 +765,9 @@ void __init paging_init(void)
666 sparse_init(); 765 sparse_init();
667 766
668#ifdef CONFIG_VIRTUAL_MEM_MAP 767#ifdef CONFIG_VIRTUAL_MEM_MAP
669 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 768 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
670 sizeof(struct page)); 769 sizeof(struct page));
671 vmem_map = (struct page *) vmalloc_end; 770 vmem_map = (struct page *) VMALLOC_END;
672 efi_memmap_walk(create_mem_map_page_table, NULL); 771 efi_memmap_walk(create_mem_map_page_table, NULL);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 772 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
674#endif 773#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c1..b9609c69343a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45 45
46#ifdef CONFIG_VIRTUAL_MEM_MAP 46#ifdef CONFIG_VIRTUAL_MEM_MAP
47unsigned long vmalloc_end = VMALLOC_END_INIT; 47unsigned long VMALLOC_END = VMALLOC_END_INIT;
48EXPORT_SYMBOL(vmalloc_end); 48EXPORT_SYMBOL(VMALLOC_END);
49struct page *vmem_map; 49struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map); 50EXPORT_SYMBOL(vmem_map);
51#endif 51#endif
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 1176506b2bae..e884ba4e031d 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
498 stat->deadlocks, 498 stat->deadlocks,
499 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 499 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
500 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 500 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
501 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, 501 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
502 stat->shub_ptc_flushes_not_my_mm, 502 stat->shub_ptc_flushes_not_my_mm,
503 stat->deadlocks2, 503 stat->deadlocks2,
504 stat->shub_ipi_flushes, 504 stat->shub_ipi_flushes,
505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); 505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
506 } 506 }
507 return 0; 507 return 0;
508} 508}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index f042e192d2fe..a3fb7cf9ae1d 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
63} 63}
64 64
65 65
66static DEFINE_PER_CPU(int, timer_irq) = -1; 66static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
67static DEFINE_PER_CPU(int, ipi_irq) = -1; 67static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
68static DEFINE_PER_CPU(int, resched_irq) = -1; 68static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
69static DEFINE_PER_CPU(int, cmc_irq) = -1; 69static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
70static DEFINE_PER_CPU(int, cmcp_irq) = -1; 70static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
71static DEFINE_PER_CPU(int, cpep_irq) = -1; 71static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
72#define NAME_SIZE 15 72#define NAME_SIZE 15
73static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); 73static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
74static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); 74static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
75static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); 75static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
76static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); 76static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
77static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); 77static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
78static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); 78static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
79#undef NAME_SIZE 79#undef NAME_SIZE
80 80
81struct saved_irq { 81struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
144 if (xen_slab_ready) { 144 if (xen_slab_ready) {
145 switch (vec) { 145 switch (vec) {
146 case IA64_TIMER_VECTOR: 146 case IA64_TIMER_VECTOR:
147 snprintf(per_cpu(timer_name, cpu), 147 snprintf(per_cpu(xen_timer_name, cpu),
148 sizeof(per_cpu(timer_name, cpu)), 148 sizeof(per_cpu(xen_timer_name, cpu)),
149 "%s%d", action->name, cpu); 149 "%s%d", action->name, cpu);
150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, 150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
151 action->handler, action->flags, 151 action->handler, action->flags,
152 per_cpu(timer_name, cpu), action->dev_id); 152 per_cpu(xen_timer_name, cpu), action->dev_id);
153 per_cpu(timer_irq, cpu) = irq; 153 per_cpu(xen_timer_irq, cpu) = irq;
154 break; 154 break;
155 case IA64_IPI_RESCHEDULE: 155 case IA64_IPI_RESCHEDULE:
156 snprintf(per_cpu(resched_name, cpu), 156 snprintf(per_cpu(xen_resched_name, cpu),
157 sizeof(per_cpu(resched_name, cpu)), 157 sizeof(per_cpu(xen_resched_name, cpu)),
158 "%s%d", action->name, cpu); 158 "%s%d", action->name, cpu);
159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, 159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
160 action->handler, action->flags, 160 action->handler, action->flags,
161 per_cpu(resched_name, cpu), action->dev_id); 161 per_cpu(xen_resched_name, cpu), action->dev_id);
162 per_cpu(resched_irq, cpu) = irq; 162 per_cpu(xen_resched_irq, cpu) = irq;
163 break; 163 break;
164 case IA64_IPI_VECTOR: 164 case IA64_IPI_VECTOR:
165 snprintf(per_cpu(ipi_name, cpu), 165 snprintf(per_cpu(xen_ipi_name, cpu),
166 sizeof(per_cpu(ipi_name, cpu)), 166 sizeof(per_cpu(xen_ipi_name, cpu)),
167 "%s%d", action->name, cpu); 167 "%s%d", action->name, cpu);
168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, 168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
169 action->handler, action->flags, 169 action->handler, action->flags,
170 per_cpu(ipi_name, cpu), action->dev_id); 170 per_cpu(xen_ipi_name, cpu), action->dev_id);
171 per_cpu(ipi_irq, cpu) = irq; 171 per_cpu(xen_ipi_irq, cpu) = irq;
172 break; 172 break;
173 case IA64_CMC_VECTOR: 173 case IA64_CMC_VECTOR:
174 snprintf(per_cpu(cmc_name, cpu), 174 snprintf(per_cpu(xen_cmc_name, cpu),
175 sizeof(per_cpu(cmc_name, cpu)), 175 sizeof(per_cpu(xen_cmc_name, cpu)),
176 "%s%d", action->name, cpu); 176 "%s%d", action->name, cpu);
177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, 177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
178 action->handler, 178 action->handler,
179 action->flags, 179 action->flags,
180 per_cpu(cmc_name, cpu), 180 per_cpu(xen_cmc_name, cpu),
181 action->dev_id); 181 action->dev_id);
182 per_cpu(cmc_irq, cpu) = irq; 182 per_cpu(xen_cmc_irq, cpu) = irq;
183 break; 183 break;
184 case IA64_CMCP_VECTOR: 184 case IA64_CMCP_VECTOR:
185 snprintf(per_cpu(cmcp_name, cpu), 185 snprintf(per_cpu(xen_cmcp_name, cpu),
186 sizeof(per_cpu(cmcp_name, cpu)), 186 sizeof(per_cpu(xen_cmcp_name, cpu)),
187 "%s%d", action->name, cpu); 187 "%s%d", action->name, cpu);
188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, 188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
189 action->handler, 189 action->handler,
190 action->flags, 190 action->flags,
191 per_cpu(cmcp_name, cpu), 191 per_cpu(xen_cmcp_name, cpu),
192 action->dev_id); 192 action->dev_id);
193 per_cpu(cmcp_irq, cpu) = irq; 193 per_cpu(xen_cmcp_irq, cpu) = irq;
194 break; 194 break;
195 case IA64_CPEP_VECTOR: 195 case IA64_CPEP_VECTOR:
196 snprintf(per_cpu(cpep_name, cpu), 196 snprintf(per_cpu(xen_cpep_name, cpu),
197 sizeof(per_cpu(cpep_name, cpu)), 197 sizeof(per_cpu(xen_cpep_name, cpu)),
198 "%s%d", action->name, cpu); 198 "%s%d", action->name, cpu);
199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, 199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
200 action->handler, 200 action->handler,
201 action->flags, 201 action->flags,
202 per_cpu(cpep_name, cpu), 202 per_cpu(xen_cpep_name, cpu),
203 action->dev_id); 203 action->dev_id);
204 per_cpu(cpep_irq, cpu) = irq; 204 per_cpu(xen_cpep_irq, cpu) = irq;
205 break; 205 break;
206 case IA64_CPE_VECTOR: 206 case IA64_CPE_VECTOR:
207 case IA64_MCA_RENDEZ_VECTOR: 207 case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
275 275
276 if (action == CPU_DEAD) { 276 if (action == CPU_DEAD) {
277 /* Unregister evtchn. */ 277 /* Unregister evtchn. */
278 if (per_cpu(cpep_irq, cpu) >= 0) { 278 if (per_cpu(xen_cpep_irq, cpu) >= 0) {
279 unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); 279 unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
280 per_cpu(cpep_irq, cpu) = -1; 280 NULL);
281 per_cpu(xen_cpep_irq, cpu) = -1;
281 } 282 }
282 if (per_cpu(cmcp_irq, cpu) >= 0) { 283 if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
283 unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); 284 unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
284 per_cpu(cmcp_irq, cpu) = -1; 285 NULL);
286 per_cpu(xen_cmcp_irq, cpu) = -1;
285 } 287 }
286 if (per_cpu(cmc_irq, cpu) >= 0) { 288 if (per_cpu(xen_cmc_irq, cpu) >= 0) {
287 unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); 289 unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
288 per_cpu(cmc_irq, cpu) = -1; 290 per_cpu(xen_cmc_irq, cpu) = -1;
289 } 291 }
290 if (per_cpu(ipi_irq, cpu) >= 0) { 292 if (per_cpu(xen_ipi_irq, cpu) >= 0) {
291 unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); 293 unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
292 per_cpu(ipi_irq, cpu) = -1; 294 per_cpu(xen_ipi_irq, cpu) = -1;
293 } 295 }
294 if (per_cpu(resched_irq, cpu) >= 0) { 296 if (per_cpu(xen_resched_irq, cpu) >= 0) {
295 unbind_from_irqhandler(per_cpu(resched_irq, cpu), 297 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
296 NULL); 298 NULL);
297 per_cpu(resched_irq, cpu) = -1; 299 per_cpu(xen_resched_irq, cpu) = -1;
298 } 300 }
299 if (per_cpu(timer_irq, cpu) >= 0) { 301 if (per_cpu(xen_timer_irq, cpu) >= 0) {
300 unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); 302 unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
301 per_cpu(timer_irq, cpu) = -1; 303 NULL);
304 per_cpu(xen_timer_irq, cpu) = -1;
302 } 305 }
303 } 306 }
304 return NOTIFY_OK; 307 return NOTIFY_OK;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index dbeadb9c8e20..c1c544513e8d 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -34,15 +34,15 @@
34 34
35#include "../kernel/fsyscall_gtod_data.h" 35#include "../kernel/fsyscall_gtod_data.h"
36 36
37DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
38DEFINE_PER_CPU(unsigned long, processed_stolen_time); 38static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
39DEFINE_PER_CPU(unsigned long, processed_blocked_time); 39static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
40 40
41/* taken from i386/kernel/time-xen.c */ 41/* taken from i386/kernel/time-xen.c */
42static void xen_init_missing_ticks_accounting(int cpu) 42static void xen_init_missing_ticks_accounting(int cpu)
43{ 43{
44 struct vcpu_register_runstate_memory_area area; 44 struct vcpu_register_runstate_memory_area area;
45 struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); 45 struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
46 int rc; 46 int rc;
47 47
48 memset(runstate, 0, sizeof(*runstate)); 48 memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
52 &area); 52 &area);
53 WARN_ON(rc && rc != -ENOSYS); 53 WARN_ON(rc && rc != -ENOSYS);
54 54
55 per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; 55 per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
56 per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] 56 per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
57 + runstate->time[RUNSTATE_offline]; 57 + runstate->time[RUNSTATE_offline];
58} 58}
59 59
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
68 68
69 BUG_ON(preemptible()); 69 BUG_ON(preemptible());
70 70
71 state = &__get_cpu_var(runstate); 71 state = &__get_cpu_var(xen_runstate);
72 72
73 /* 73 /*
74 * The runstate info is always updated by the hypervisor on 74 * The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
103 * This function just checks and reject this effect. 103 * This function just checks and reject this effect.
104 */ 104 */
105 if (!time_after_eq(runstate.time[RUNSTATE_blocked], 105 if (!time_after_eq(runstate.time[RUNSTATE_blocked],
106 per_cpu(processed_blocked_time, cpu))) 106 per_cpu(xen_blocked_time, cpu)))
107 blocked = 0; 107 blocked = 0;
108 108
109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] + 109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
110 runstate.time[RUNSTATE_offline], 110 runstate.time[RUNSTATE_offline],
111 per_cpu(processed_stolen_time, cpu))) 111 per_cpu(xen_stolen_time, cpu)))
112 stolen = 0; 112 stolen = 0;
113 113
114 if (!time_after(delta_itm + new_itm, ia64_get_itc())) 114 if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
147 } else { 147 } else {
148 local_cpu_data->itm_next = delta_itm + new_itm; 148 local_cpu_data->itm_next = delta_itm + new_itm;
149 } 149 }
150 per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; 150 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
151 per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; 151 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
152 } 152 }
153 return delta_itm; 153 return delta_itm;
154} 154}
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index fe60e1abaee8..aca0e28581c7 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -83,9 +83,9 @@
83#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 83#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
84#define VMALLOC_END KMAP_START 84#define VMALLOC_END KMAP_START
85#else 85#else
86extern unsigned long vmalloc_end; 86extern unsigned long m68k_vmalloc_end;
87#define VMALLOC_START 0x0f800000 87#define VMALLOC_START 0x0f800000
88#define VMALLOC_END vmalloc_end 88#define VMALLOC_END m68k_vmalloc_end
89#endif /* CONFIG_SUN3 */ 89#endif /* CONFIG_SUN3 */
90 90
91/* zero page used for uninitialized stuff */ 91/* zero page used for uninitialized stuff */
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 3cd19390aae5..94f81ecfe3f8 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -45,8 +45,8 @@
45** Globals 45** Globals
46*/ 46*/
47 47
48unsigned long vmalloc_end; 48unsigned long m68k_vmalloc_end;
49EXPORT_SYMBOL(vmalloc_end); 49EXPORT_SYMBOL(m68k_vmalloc_end);
50 50
51unsigned long pmeg_vaddr[PMEGS_NUM]; 51unsigned long pmeg_vaddr[PMEGS_NUM];
52unsigned char pmeg_alloc[PMEGS_NUM]; 52unsigned char pmeg_alloc[PMEGS_NUM];
@@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
172#endif 172#endif
173 // the lowest mapping here is the end of our 173 // the lowest mapping here is the end of our
174 // vmalloc region 174 // vmalloc region
175 if(!vmalloc_end) 175 if (!m68k_vmalloc_end)
176 vmalloc_end = seg; 176 m68k_vmalloc_end = seg;
177 177
178 // mark the segmap alloc'd, and reserve any 178 // mark the segmap alloc'd, and reserve any
179 // of the first 0xbff pages the hardware is 179 // of the first 0xbff pages the hardware is
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index dacafab00eb2..67e6389d625a 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
31#define KPROBE_HIT_ACTIVE 0x00000001 31#define KPROBE_HIT_ACTIVE 0x00000001
32#define KPROBE_HIT_SS 0x00000002 32#define KPROBE_HIT_SS 0x00000002
33 33
34static struct kprobe *current_kprobe; 34static struct kprobe *cur_kprobe;
35static unsigned long current_kprobe_orig_pc; 35static unsigned long cur_kprobe_orig_pc;
36static unsigned long current_kprobe_next_pc; 36static unsigned long cur_kprobe_next_pc;
37static int current_kprobe_ss_flags; 37static int cur_kprobe_ss_flags;
38static unsigned long kprobe_status; 38static unsigned long kprobe_status;
39static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2]; 39static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
40static unsigned long current_kprobe_bp_addr; 40static unsigned long cur_kprobe_bp_addr;
41 41
42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 43
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
399{ 399{
400 unsigned long nextpc; 400 unsigned long nextpc;
401 401
402 current_kprobe_orig_pc = regs->pc; 402 cur_kprobe_orig_pc = regs->pc;
403 memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); 403 memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
404 regs->pc = (unsigned long) current_kprobe_ss_buf; 404 regs->pc = (unsigned long) cur_kprobe_ss_buf;
405 405
406 nextpc = find_nextpc(regs, &current_kprobe_ss_flags); 406 nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
407 if (current_kprobe_ss_flags & SINGLESTEP_PCREL) 407 if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
408 current_kprobe_next_pc = 408 cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
409 current_kprobe_orig_pc + (nextpc - regs->pc);
410 else 409 else
411 current_kprobe_next_pc = nextpc; 410 cur_kprobe_next_pc = nextpc;
412 411
413 /* branching instructions need special handling */ 412 /* branching instructions need special handling */
414 if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) 413 if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
415 nextpc = singlestep_branch_setup(regs); 414 nextpc = singlestep_branch_setup(regs);
416 415
417 current_kprobe_bp_addr = nextpc; 416 cur_kprobe_bp_addr = nextpc;
418 417
419 *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; 418 *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
420 mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf, 419 mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
421 sizeof(current_kprobe_ss_buf)); 420 sizeof(cur_kprobe_ss_buf));
422 mn10300_icache_inv(); 421 mn10300_icache_inv();
423} 422}
424 423
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
440 disarm_kprobe(p, regs); 439 disarm_kprobe(p, regs);
441 ret = 1; 440 ret = 1;
442 } else { 441 } else {
443 p = current_kprobe; 442 p = cur_kprobe;
444 if (p->break_handler && p->break_handler(p, regs)) 443 if (p->break_handler && p->break_handler(p, regs))
445 goto ss_probe; 444 goto ss_probe;
446 } 445 }
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
464 } 463 }
465 464
466 kprobe_status = KPROBE_HIT_ACTIVE; 465 kprobe_status = KPROBE_HIT_ACTIVE;
467 current_kprobe = p; 466 cur_kprobe = p;
468 if (p->pre_handler(p, regs)) { 467 if (p->pre_handler(p, regs)) {
469 /* handler has already set things up, so skip ss setup */ 468 /* handler has already set things up, so skip ss setup */
470 return 1; 469 return 1;
@@ -491,8 +490,8 @@ no_kprobe:
491static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
492{ 491{
493 /* we may need to fixup regs/stack after singlestepping a call insn */ 492 /* we may need to fixup regs/stack after singlestepping a call insn */
494 if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) { 493 if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
495 regs->pc = current_kprobe_orig_pc; 494 regs->pc = cur_kprobe_orig_pc;
496 switch (p->ainsn.insn[0]) { 495 switch (p->ainsn.insn[0]) {
497 case 0xcd: /* CALL (d16,PC) */ 496 case 0xcd: /* CALL (d16,PC) */
498 *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; 497 *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
523 } 522 }
524 } 523 }
525 524
526 regs->pc = current_kprobe_next_pc; 525 regs->pc = cur_kprobe_next_pc;
527 current_kprobe_bp_addr = 0; 526 cur_kprobe_bp_addr = 0;
528} 527}
529 528
530static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) 529static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
532 if (!kprobe_running()) 531 if (!kprobe_running())
533 return 0; 532 return 0;
534 533
535 if (current_kprobe->post_handler) 534 if (cur_kprobe->post_handler)
536 current_kprobe->post_handler(current_kprobe, regs, 0); 535 cur_kprobe->post_handler(cur_kprobe, regs, 0);
537 536
538 resume_execution(current_kprobe, regs); 537 resume_execution(cur_kprobe, regs);
539 reset_current_kprobe(); 538 reset_current_kprobe();
540 preempt_enable_no_resched(); 539 preempt_enable_no_resched();
541 return 1; 540 return 1;
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
545static inline 544static inline
546int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 545int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
547{ 546{
548 if (current_kprobe->fault_handler && 547 if (cur_kprobe->fault_handler &&
549 current_kprobe->fault_handler(current_kprobe, regs, trapnr)) 548 cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
550 return 1; 549 return 1;
551 550
552 if (kprobe_status & KPROBE_HIT_SS) { 551 if (kprobe_status & KPROBE_HIT_SS) {
553 resume_execution(current_kprobe, regs); 552 resume_execution(cur_kprobe, regs);
554 reset_current_kprobe(); 553 reset_current_kprobe();
555 preempt_enable_no_resched(); 554 preempt_enable_no_resched();
556 } 555 }
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
567 566
568 switch (val) { 567 switch (val) {
569 case DIE_BREAKPOINT: 568 case DIE_BREAKPOINT:
570 if (current_kprobe_bp_addr != args->regs->pc) { 569 if (cur_kprobe_bp_addr != args->regs->pc) {
571 if (kprobe_handler(args->regs)) 570 if (kprobe_handler(args->regs))
572 return NOTIFY_STOP; 571 return NOTIFY_STOP;
573 } else { 572 } else {
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d9ea8d39c342..1d3b270d3083 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -37,7 +37,7 @@ extern void cpu_die(void);
37extern void smp_send_debugger_break(int cpu); 37extern void smp_send_debugger_break(int cpu);
38extern void smp_message_recv(int); 38extern void smp_message_recv(int);
39 39
40DECLARE_PER_CPU(unsigned int, pvr); 40DECLARE_PER_CPU(unsigned int, cpu_pvr);
41 41
42#ifdef CONFIG_HOTPLUG_CPU 42#ifdef CONFIG_HOTPLUG_CPU
43extern void fixup_irqs(cpumask_t map); 43extern void fixup_irqs(cpumask_t map);
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 936f04dbfc6f..a3c11cac3d71 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
487 * Since we can't get PMU interrupts inside a PMU interrupt handler, 487 * Since we can't get PMU interrupts inside a PMU interrupt handler,
488 * we don't need separate irq and nmi entries here. 488 * we don't need separate irq and nmi entries here.
489 */ 489 */
490static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); 490static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
491 491
492struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) 492struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
493{ 493{
494 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 494 struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
495 495
496 entry->nr = 0; 496 entry->nr = 0;
497 497
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 845c72ab7357..03dd6a248198 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
157#endif /* CONFIG_TAU */ 157#endif /* CONFIG_TAU */
158 158
159#ifdef CONFIG_SMP 159#ifdef CONFIG_SMP
160DEFINE_PER_CPU(unsigned int, pvr); 160DEFINE_PER_CPU(unsigned int, cpu_pvr);
161#endif 161#endif
162 162
163static int show_cpuinfo(struct seq_file *m, void *v) 163static int show_cpuinfo(struct seq_file *m, void *v)
@@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
209 } 209 }
210 210
211#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
212 pvr = per_cpu(pvr, cpu_id); 212 pvr = per_cpu(cpu_pvr, cpu_id);
213#else 213#else
214 pvr = mfspr(SPRN_PVR); 214 pvr = mfspr(SPRN_PVR);
215#endif 215#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 97196eefef3e..a521fb8a40ee 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
235 235
236static void __devinit smp_store_cpu_info(int id) 236static void __devinit smp_store_cpu_info(int id)
237{ 237{
238 per_cpu(pvr, id) = mfspr(SPRN_PVR); 238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 239}
240 240
241static void __init smp_create_idle(unsigned int cpu) 241static void __init smp_create_idle(unsigned int cpu)
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index f9dbf76a763f..7267effc8078 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -54,7 +54,7 @@ struct iic {
54 struct device_node *node; 54 struct device_node *node;
55}; 55};
56 56
57static DEFINE_PER_CPU(struct iic, iic); 57static DEFINE_PER_CPU(struct iic, cpu_iic);
58#define IIC_NODE_COUNT 2 58#define IIC_NODE_COUNT 2
59static struct irq_host *iic_host; 59static struct irq_host *iic_host;
60 60
@@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq)
82 82
83static void iic_eoi(unsigned int irq) 83static void iic_eoi(unsigned int irq)
84{ 84{
85 struct iic *iic = &__get_cpu_var(iic); 85 struct iic *iic = &__get_cpu_var(cpu_iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); 86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0); 87 BUG_ON(iic->eoi_ptr < 0);
88} 88}
@@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
146 struct iic *iic; 146 struct iic *iic;
147 unsigned int virq; 147 unsigned int virq;
148 148
149 iic = &__get_cpu_var(iic); 149 iic = &__get_cpu_var(cpu_iic);
150 *(unsigned long *) &pending = 150 *(unsigned long *) &pending =
151 in_be64((u64 __iomem *) &iic->regs->pending_destr); 151 in_be64((u64 __iomem *) &iic->regs->pending_destr);
152 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 152 if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
161 161
162void iic_setup_cpu(void) 162void iic_setup_cpu(void)
163{ 163{
164 out_be64(&__get_cpu_var(iic).regs->prio, 0xff); 164 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
165} 165}
166 166
167u8 iic_get_target_id(int cpu) 167u8 iic_get_target_id(int cpu)
168{ 168{
169 return per_cpu(iic, cpu).target_id; 169 return per_cpu(cpu_iic, cpu).target_id;
170} 170}
171 171
172EXPORT_SYMBOL_GPL(iic_get_target_id); 172EXPORT_SYMBOL_GPL(iic_get_target_id);
@@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
181 181
182void iic_cause_IPI(int cpu, int mesg) 182void iic_cause_IPI(int cpu, int mesg)
183{ 183{
184 out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4); 184 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
185} 185}
186 186
187struct irq_host *iic_get_irq_host(int node) 187struct irq_host *iic_get_irq_host(int node)
@@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
348 /* XXX FIXME: should locate the linux CPU number from the HW cpu 348 /* XXX FIXME: should locate the linux CPU number from the HW cpu
349 * number properly. We are lucky for now 349 * number properly. We are lucky for now
350 */ 350 */
351 struct iic *iic = &per_cpu(iic, hw_cpu); 351 struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
352 352
353 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); 353 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
354 BUG_ON(iic->regs == NULL); 354 BUG_ON(iic->regs == NULL);
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 937a544a236d..c5f3116b6ca5 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -54,7 +54,7 @@ struct dtl {
54 int buf_entries; 54 int buf_entries;
55 u64 last_idx; 55 u64 last_idx;
56}; 56};
57static DEFINE_PER_CPU(struct dtl, dtl); 57static DEFINE_PER_CPU(struct dtl, cpu_dtl);
58 58
59/* 59/*
60 * Dispatch trace log event mask: 60 * Dispatch trace log event mask:
@@ -261,7 +261,7 @@ static int dtl_init(void)
261 261
262 /* set up the per-cpu log structures */ 262 /* set up the per-cpu log structures */
263 for_each_possible_cpu(i) { 263 for_each_possible_cpu(i) {
264 struct dtl *dtl = &per_cpu(dtl, i); 264 struct dtl *dtl = &per_cpu(cpu_dtl, i);
265 dtl->cpu = i; 265 dtl->cpu = i;
266 266
267 rc = dtl_setup_file(dtl); 267 rc = dtl_setup_file(dtl);
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b129611590a4..f30f4a1ead23 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
47static int endflag __initdata; 47static int endflag __initdata;
48 48
49static DEFINE_PER_CPU(unsigned int, last_irq_sum); 49static DEFINE_PER_CPU(unsigned int, last_irq_sum);
50static DEFINE_PER_CPU(local_t, alert_counter); 50static DEFINE_PER_CPU(long, alert_counter);
51static DEFINE_PER_CPU(int, nmi_touch); 51static DEFINE_PER_CPU(int, nmi_touch);
52 52
53void touch_nmi_watchdog(void) 53void touch_nmi_watchdog(void)
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
112 touched = 1; 112 touched = 1;
113 } 113 }
114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
115 local_inc(&__get_cpu_var(alert_counter)); 115 __this_cpu_inc(per_cpu_var(alert_counter));
116 if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) 116 if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
118 regs, panic_on_timeout); 118 regs, panic_on_timeout);
119 } else { 119 } else {
120 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
121 local_set(&__get_cpu_var(alert_counter), 0); 121 __this_cpu_write(per_cpu_var(alert_counter), 0);
122 } 122 }
123 if (__get_cpu_var(wd_enabled)) { 123 if (__get_cpu_var(wd_enabled)) {
124 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b65a36defeb7..0c44196b78ac 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
74 74
75#define percpu_to_op(op, var, val) \ 75#define percpu_to_op(op, var, val) \
76do { \ 76do { \
77 typedef typeof(var) T__; \ 77 typedef typeof(var) pto_T__; \
78 if (0) { \ 78 if (0) { \
79 T__ tmp__; \ 79 pto_T__ pto_tmp__; \
80 tmp__ = (val); \ 80 pto_tmp__ = (val); \
81 } \ 81 } \
82 switch (sizeof(var)) { \ 82 switch (sizeof(var)) { \
83 case 1: \ 83 case 1: \
84 asm(op "b %1,"__percpu_arg(0) \ 84 asm(op "b %1,"__percpu_arg(0) \
85 : "+m" (var) \ 85 : "+m" (var) \
86 : "qi" ((T__)(val))); \ 86 : "qi" ((pto_T__)(val))); \
87 break; \ 87 break; \
88 case 2: \ 88 case 2: \
89 asm(op "w %1,"__percpu_arg(0) \ 89 asm(op "w %1,"__percpu_arg(0) \
90 : "+m" (var) \ 90 : "+m" (var) \
91 : "ri" ((T__)(val))); \ 91 : "ri" ((pto_T__)(val))); \
92 break; \ 92 break; \
93 case 4: \ 93 case 4: \
94 asm(op "l %1,"__percpu_arg(0) \ 94 asm(op "l %1,"__percpu_arg(0) \
95 : "+m" (var) \ 95 : "+m" (var) \
96 : "ri" ((T__)(val))); \ 96 : "ri" ((pto_T__)(val))); \
97 break; \ 97 break; \
98 case 8: \ 98 case 8: \
99 asm(op "q %1,"__percpu_arg(0) \ 99 asm(op "q %1,"__percpu_arg(0) \
100 : "+m" (var) \ 100 : "+m" (var) \
101 : "re" ((T__)(val))); \ 101 : "re" ((pto_T__)(val))); \
102 break; \ 102 break; \
103 default: __bad_percpu_size(); \ 103 default: __bad_percpu_size(); \
104 } \ 104 } \
@@ -106,31 +106,31 @@ do { \
106 106
107#define percpu_from_op(op, var, constraint) \ 107#define percpu_from_op(op, var, constraint) \
108({ \ 108({ \
109 typeof(var) ret__; \ 109 typeof(var) pfo_ret__; \
110 switch (sizeof(var)) { \ 110 switch (sizeof(var)) { \
111 case 1: \ 111 case 1: \
112 asm(op "b "__percpu_arg(1)",%0" \ 112 asm(op "b "__percpu_arg(1)",%0" \
113 : "=q" (ret__) \ 113 : "=q" (pfo_ret__) \
114 : constraint); \ 114 : constraint); \
115 break; \ 115 break; \
116 case 2: \ 116 case 2: \
117 asm(op "w "__percpu_arg(1)",%0" \ 117 asm(op "w "__percpu_arg(1)",%0" \
118 : "=r" (ret__) \ 118 : "=r" (pfo_ret__) \
119 : constraint); \ 119 : constraint); \
120 break; \ 120 break; \
121 case 4: \ 121 case 4: \
122 asm(op "l "__percpu_arg(1)",%0" \ 122 asm(op "l "__percpu_arg(1)",%0" \
123 : "=r" (ret__) \ 123 : "=r" (pfo_ret__) \
124 : constraint); \ 124 : constraint); \
125 break; \ 125 break; \
126 case 8: \ 126 case 8: \
127 asm(op "q "__percpu_arg(1)",%0" \ 127 asm(op "q "__percpu_arg(1)",%0" \
128 : "=r" (ret__) \ 128 : "=r" (pfo_ret__) \
129 : constraint); \ 129 : constraint); \
130 break; \ 130 break; \
131 default: __bad_percpu_size(); \ 131 default: __bad_percpu_size(); \
132 } \ 132 } \
133 ret__; \ 133 pfo_ret__; \
134}) 134})
135 135
136/* 136/*
@@ -153,6 +153,84 @@ do { \
153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) 153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) 154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
155 155
156#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
157#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
158#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
159
160#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
161#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
162#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
163#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
164#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
165#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
166#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
167#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
168#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
169#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
170#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
171#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
172#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
173#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
174#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
175
176#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
177#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
178#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
179#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
180#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
181#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
182#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
183#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
184#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
185#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
186#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
187#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
188#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
189#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
190#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
191#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
192#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
193#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
194
195#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
196#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
197#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
198#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
199#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
200#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
201#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
202#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
203#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
204#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
205#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
206#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
207
208/*
209 * Per cpu atomic 64 bit operations are only available under 64 bit.
210 * 32 bit must fall back to generic operations.
211 */
212#ifdef CONFIG_X86_64
213#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
214#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
215#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
216#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
217#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
218#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
219
220#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
221#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
222#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
223#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
224#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
225#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
226
227#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
228#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
229#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
230#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
231
232#endif
233
156/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 234/* This is not atomic against other CPUs -- CPU preemption needs to be off */
157#define x86_test_and_clear_bit_percpu(bit, var) \ 235#define x86_test_and_clear_bit_percpu(bit, var) \
158({ \ 236({ \
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 6389432a9dbf..0159a69396cb 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
361 */ 361 */
362 362
363static DEFINE_PER_CPU(unsigned, last_irq_sum); 363static DEFINE_PER_CPU(unsigned, last_irq_sum);
364static DEFINE_PER_CPU(local_t, alert_counter); 364static DEFINE_PER_CPU(long, alert_counter);
365static DEFINE_PER_CPU(int, nmi_touch); 365static DEFINE_PER_CPU(int, nmi_touch);
366 366
367void touch_nmi_watchdog(void) 367void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
438 * Ayiee, looks like this CPU is stuck ... 438 * Ayiee, looks like this CPU is stuck ...
439 * wait a few IRQs (5 seconds) before doing the oops ... 439 * wait a few IRQs (5 seconds) before doing the oops ...
440 */ 440 */
441 local_inc(&__get_cpu_var(alert_counter)); 441 __this_cpu_inc(per_cpu_var(alert_counter));
442 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) 442 if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
443 /* 443 /*
444 * die_nmi will return ONLY if NOTIFY_STOP happens.. 444 * die_nmi will return ONLY if NOTIFY_STOP happens..
445 */ 445 */
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
447 regs, panic_on_timeout); 447 regs, panic_on_timeout);
448 } else { 448 } else {
449 __get_cpu_var(last_irq_sum) = sum; 449 __get_cpu_var(last_irq_sum) = sum;
450 local_set(&__get_cpu_var(alert_counter), 0); 450 __this_cpu_write(per_cpu_var(alert_counter), 0);
451 } 451 }
452 452
453 /* see if the nmi watchdog went off */ 453 /* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1afa990a6c8..20399b7b0c3f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
1093 1093
1094void __cpuinit cpu_init(void) 1094void __cpuinit cpu_init(void)
1095{ 1095{
1096 struct orig_ist *orig_ist; 1096 struct orig_ist *oist;
1097 struct task_struct *me; 1097 struct task_struct *me;
1098 struct tss_struct *t; 1098 struct tss_struct *t;
1099 unsigned long v; 1099 unsigned long v;
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
1102 1102
1103 cpu = stack_smp_processor_id(); 1103 cpu = stack_smp_processor_id();
1104 t = &per_cpu(init_tss, cpu); 1104 t = &per_cpu(init_tss, cpu);
1105 orig_ist = &per_cpu(orig_ist, cpu); 1105 oist = &per_cpu(orig_ist, cpu);
1106 1106
1107#ifdef CONFIG_NUMA 1107#ifdef CONFIG_NUMA
1108 if (cpu != 0 && percpu_read(node_number) == 0 && 1108 if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
1143 /* 1143 /*
1144 * set up and load the per-CPU TSS 1144 * set up and load the per-CPU TSS
1145 */ 1145 */
1146 if (!orig_ist->ist[0]) { 1146 if (!oist->ist[0]) {
1147 char *estacks = per_cpu(exception_stacks, cpu); 1147 char *estacks = per_cpu(exception_stacks, cpu);
1148 1148
1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1150 estacks += exception_stack_sizes[v]; 1150 estacks += exception_stack_sizes[v];
1151 orig_ist->ist[v] = t->x86_tss.ist[v] = 1151 oist->ist[v] = t->x86_tss.ist[v] =
1152 (unsigned long)estacks; 1152 (unsigned long)estacks;
1153 } 1153 }
1154 } 1154 }
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c03999..b368cd862997 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpud_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
38 38
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
531 531
532 /* Already intialized */ 532 /* Already intialized */
533 if (file == CPU_INDEX_BIT) 533 if (file == CPU_INDEX_BIT)
534 if (per_cpu(cpu_arr[type].init, cpu)) 534 if (per_cpu(cpud_arr[type].init, cpu))
535 return 0; 535 return 0;
536 536
537 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 537 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
543 priv->reg = reg; 543 priv->reg = reg;
544 priv->file = file; 544 priv->file = file;
545 mutex_lock(&cpu_debug_lock); 545 mutex_lock(&cpu_debug_lock);
546 per_cpu(priv_arr[type], cpu) = priv; 546 per_cpu(cpud_priv_arr[type], cpu) = priv;
547 per_cpu(cpu_priv_count, cpu)++; 547 per_cpu(cpud_priv_count, cpu)++;
548 mutex_unlock(&cpu_debug_lock); 548 mutex_unlock(&cpu_debug_lock);
549 549
550 if (file) 550 if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
552 dentry, (void *)priv, &cpu_fops); 552 dentry, (void *)priv, &cpu_fops);
553 else { 553 else {
554 debugfs_create_file(cpu_base[type].name, S_IRUGO, 554 debugfs_create_file(cpu_base[type].name, S_IRUGO,
555 per_cpu(cpu_arr[type].dentry, cpu), 555 per_cpu(cpud_arr[type].dentry, cpu),
556 (void *)priv, &cpu_fops); 556 (void *)priv, &cpu_fops);
557 mutex_lock(&cpu_debug_lock); 557 mutex_lock(&cpu_debug_lock);
558 per_cpu(cpu_arr[type].init, cpu) = 1; 558 per_cpu(cpud_arr[type].init, cpu) = 1;
559 mutex_unlock(&cpu_debug_lock); 559 mutex_unlock(&cpu_debug_lock);
560 } 560 }
561 561
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
615 if (!is_typeflag_valid(cpu, cpu_base[type].flag)) 615 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
616 continue; 616 continue;
617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); 617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
618 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; 618 per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
619 619
620 if (type < CPU_TSS_BIT) 620 if (type < CPU_TSS_BIT)
621 err = cpu_init_msr(cpu, type, cpu_dentry); 621 err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
647 err = cpu_init_allreg(cpu, cpu_dentry); 647 err = cpu_init_allreg(cpu, cpu_dentry);
648 648
649 pr_info("cpu%d(%d) debug files %d\n", 649 pr_info("cpu%d(%d) debug files %d\n",
650 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); 650 cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
651 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { 651 if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
652 pr_err("Register files count %d exceeds limit %d\n", 652 pr_err("Register files count %d exceeds limit %d\n",
653 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); 653 per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
654 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; 654 per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
655 err = -ENFILE; 655 err = -ENFILE;
656 } 656 }
657 if (err) 657 if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
676 debugfs_remove_recursive(cpu_debugfs_dir); 676 debugfs_remove_recursive(cpu_debugfs_dir);
677 677
678 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 678 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
679 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) 679 for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
680 kfree(per_cpu(priv_arr[i], cpu)); 680 kfree(per_cpu(cpud_priv_arr[i], cpu));
681} 681}
682 682
683module_init(cpu_debug_init); 683module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index d2e7c77c1ea4..f28decf8dde3 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
68 unsigned int cpu_feature; 68 unsigned int cpu_feature;
69}; 69};
70 70
71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
72 72
73static DEFINE_PER_CPU(struct aperfmperf, old_perf); 73static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
74 74
75/* acpi_perf_data is a pointer to percpu data. */ 75/* acpi_perf_data is a pointer to percpu data. */
76static struct acpi_processor_performance *acpi_perf_data; 76static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
214 if (unlikely(cpumask_empty(mask))) 214 if (unlikely(cpumask_empty(mask)))
215 return 0; 215 return 0;
216 216
217 switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { 217 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
218 case SYSTEM_INTEL_MSR_CAPABLE: 218 case SYSTEM_INTEL_MSR_CAPABLE:
219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
221 break; 221 break;
222 case SYSTEM_IO_CAPABLE: 222 case SYSTEM_IO_CAPABLE:
223 cmd.type = SYSTEM_IO_CAPABLE; 223 cmd.type = SYSTEM_IO_CAPABLE;
224 perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; 224 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
225 cmd.addr.io.port = perf->control_register.address; 225 cmd.addr.io.port = perf->control_register.address;
226 cmd.addr.io.bit_width = perf->control_register.bit_width; 226 cmd.addr.io.bit_width = perf->control_register.bit_width;
227 break; 227 break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) 268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
269 return 0; 269 return 0;
270 270
271 ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); 271 ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
272 per_cpu(old_perf, cpu) = perf; 272 per_cpu(acfreq_old_perf, cpu) = perf;
273 273
274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; 274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
275 275
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
278 278
279static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 279static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
280{ 280{
281 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 281 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
282 unsigned int freq; 282 unsigned int freq;
283 unsigned int cached_freq; 283 unsigned int cached_freq;
284 284
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
322static int acpi_cpufreq_target(struct cpufreq_policy *policy, 322static int acpi_cpufreq_target(struct cpufreq_policy *policy,
323 unsigned int target_freq, unsigned int relation) 323 unsigned int target_freq, unsigned int relation)
324{ 324{
325 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 325 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
326 struct acpi_processor_performance *perf; 326 struct acpi_processor_performance *perf;
327 struct cpufreq_freqs freqs; 327 struct cpufreq_freqs freqs;
328 struct drv_cmd cmd; 328 struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
416 416
417static int acpi_cpufreq_verify(struct cpufreq_policy *policy) 417static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
418{ 418{
419 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 419 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
420 420
421 dprintk("acpi_cpufreq_verify\n"); 421 dprintk("acpi_cpufreq_verify\n");
422 422
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
574 return -ENOMEM; 574 return -ENOMEM;
575 575
576 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); 576 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
577 per_cpu(drv_data, cpu) = data; 577 per_cpu(acfreq_data, cpu) = data;
578 578
579 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 579 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
580 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 580 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
725 acpi_processor_unregister_performance(perf, cpu); 725 acpi_processor_unregister_performance(perf, cpu);
726err_free: 726err_free:
727 kfree(data); 727 kfree(data);
728 per_cpu(drv_data, cpu) = NULL; 728 per_cpu(acfreq_data, cpu) = NULL;
729 729
730 return result; 730 return result;
731} 731}
732 732
733static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 733static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
734{ 734{
735 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 735 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
736 736
737 dprintk("acpi_cpufreq_cpu_exit\n"); 737 dprintk("acpi_cpufreq_cpu_exit\n");
738 738
739 if (data) { 739 if (data) {
740 cpufreq_frequency_table_put_attr(policy->cpu); 740 cpufreq_frequency_table_put_attr(policy->cpu);
741 per_cpu(drv_data, policy->cpu) = NULL; 741 per_cpu(acfreq_data, policy->cpu) = NULL;
742 acpi_processor_unregister_performance(data->acpi_data, 742 acpi_processor_unregister_performance(data->acpi_data,
743 policy->cpu); 743 policy->cpu);
744 kfree(data); 744 kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
749 749
750static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 750static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
751{ 751{
752 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 752 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
753 753
754 dprintk("acpi_cpufreq_resume\n"); 754 dprintk("acpi_cpufreq_resume\n");
755 755
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6c40f6b5b340..0c06bca2a1dc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
499#ifdef CONFIG_SYSFS 499#ifdef CONFIG_SYSFS
500 500
501/* pointer to _cpuid4_info array (for each cache leaf) */ 501/* pointer to _cpuid4_info array (for each cache leaf) */
502static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 502static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
503#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 503#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
504 504
505#ifdef CONFIG_SMP 505#ifdef CONFIG_SMP
506static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 506static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
514 struct cpuinfo_x86 *d; 514 struct cpuinfo_x86 *d;
515 for_each_online_cpu(i) { 515 for_each_online_cpu(i) {
516 if (!per_cpu(cpuid4_info, i)) 516 if (!per_cpu(ici_cpuid4_info, i))
517 continue; 517 continue;
518 d = &cpu_data(i); 518 d = &cpu_data(i);
519 this_leaf = CPUID4_INFO_IDX(i, index); 519 this_leaf = CPUID4_INFO_IDX(i, index);
@@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
535 c->apicid >> index_msb) { 535 c->apicid >> index_msb) {
536 cpumask_set_cpu(i, 536 cpumask_set_cpu(i,
537 to_cpumask(this_leaf->shared_cpu_map)); 537 to_cpumask(this_leaf->shared_cpu_map));
538 if (i != cpu && per_cpu(cpuid4_info, i)) { 538 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
539 sibling_leaf = 539 sibling_leaf =
540 CPUID4_INFO_IDX(i, index); 540 CPUID4_INFO_IDX(i, index);
541 cpumask_set_cpu(cpu, to_cpumask( 541 cpumask_set_cpu(cpu, to_cpumask(
@@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
574 for (i = 0; i < num_cache_leaves; i++) 574 for (i = 0; i < num_cache_leaves; i++)
575 cache_remove_shared_cpu_map(cpu, i); 575 cache_remove_shared_cpu_map(cpu, i);
576 576
577 kfree(per_cpu(cpuid4_info, cpu)); 577 kfree(per_cpu(ici_cpuid4_info, cpu));
578 per_cpu(cpuid4_info, cpu) = NULL; 578 per_cpu(ici_cpuid4_info, cpu) = NULL;
579} 579}
580 580
581static int 581static int
@@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
614 if (num_cache_leaves == 0) 614 if (num_cache_leaves == 0)
615 return -ENOENT; 615 return -ENOENT;
616 616
617 per_cpu(cpuid4_info, cpu) = kzalloc( 617 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
618 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 618 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
619 if (per_cpu(cpuid4_info, cpu) == NULL) 619 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
620 return -ENOMEM; 620 return -ENOMEM;
621 621
622 smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 622 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
623 if (retval) { 623 if (retval) {
624 kfree(per_cpu(cpuid4_info, cpu)); 624 kfree(per_cpu(ici_cpuid4_info, cpu));
625 per_cpu(cpuid4_info, cpu) = NULL; 625 per_cpu(ici_cpuid4_info, cpu) = NULL;
626 } 626 }
627 627
628 return retval; 628 return retval;
@@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
634extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 634extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
635 635
636/* pointer to kobject for cpuX/cache */ 636/* pointer to kobject for cpuX/cache */
637static DEFINE_PER_CPU(struct kobject *, cache_kobject); 637static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
638 638
639struct _index_kobject { 639struct _index_kobject {
640 struct kobject kobj; 640 struct kobject kobj;
@@ -643,8 +643,8 @@ struct _index_kobject {
643}; 643};
644 644
645/* pointer to array of kobjects for cpuX/cache/indexY */ 645/* pointer to array of kobjects for cpuX/cache/indexY */
646static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 646static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
647#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 647#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
648 648
649#define show_one_plus(file_name, object, val) \ 649#define show_one_plus(file_name, object, val) \
650static ssize_t show_##file_name \ 650static ssize_t show_##file_name \
@@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = {
863 863
864static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 864static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
865{ 865{
866 kfree(per_cpu(cache_kobject, cpu)); 866 kfree(per_cpu(ici_cache_kobject, cpu));
867 kfree(per_cpu(index_kobject, cpu)); 867 kfree(per_cpu(ici_index_kobject, cpu));
868 per_cpu(cache_kobject, cpu) = NULL; 868 per_cpu(ici_cache_kobject, cpu) = NULL;
869 per_cpu(index_kobject, cpu) = NULL; 869 per_cpu(ici_index_kobject, cpu) = NULL;
870 free_cache_attributes(cpu); 870 free_cache_attributes(cpu);
871} 871}
872 872
@@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
882 return err; 882 return err;
883 883
884 /* Allocate all required memory */ 884 /* Allocate all required memory */
885 per_cpu(cache_kobject, cpu) = 885 per_cpu(ici_cache_kobject, cpu) =
886 kzalloc(sizeof(struct kobject), GFP_KERNEL); 886 kzalloc(sizeof(struct kobject), GFP_KERNEL);
887 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 887 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
888 goto err_out; 888 goto err_out;
889 889
890 per_cpu(index_kobject, cpu) = kzalloc( 890 per_cpu(ici_index_kobject, cpu) = kzalloc(
891 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); 891 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
892 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 892 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
893 goto err_out; 893 goto err_out;
894 894
895 return 0; 895 return 0;
@@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
913 if (unlikely(retval < 0)) 913 if (unlikely(retval < 0))
914 return retval; 914 return retval;
915 915
916 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 916 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
917 &ktype_percpu_entry, 917 &ktype_percpu_entry,
918 &sys_dev->kobj, "%s", "cache"); 918 &sys_dev->kobj, "%s", "cache");
919 if (retval < 0) { 919 if (retval < 0) {
@@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
927 this_object->index = i; 927 this_object->index = i;
928 retval = kobject_init_and_add(&(this_object->kobj), 928 retval = kobject_init_and_add(&(this_object->kobj),
929 &ktype_cache, 929 &ktype_cache,
930 per_cpu(cache_kobject, cpu), 930 per_cpu(ici_cache_kobject, cpu),
931 "index%1lu", i); 931 "index%1lu", i);
932 if (unlikely(retval)) { 932 if (unlikely(retval)) {
933 for (j = 0; j < i; j++) 933 for (j = 0; j < i; j++)
934 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); 934 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
935 kobject_put(per_cpu(cache_kobject, cpu)); 935 kobject_put(per_cpu(ici_cache_kobject, cpu));
936 cpuid4_cache_sysfs_exit(cpu); 936 cpuid4_cache_sysfs_exit(cpu);
937 return retval; 937 return retval;
938 } 938 }
@@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
940 } 940 }
941 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); 941 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
942 942
943 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 943 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
944 return 0; 944 return 0;
945} 945}
946 946
@@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
949 unsigned int cpu = sys_dev->id; 949 unsigned int cpu = sys_dev->id;
950 unsigned long i; 950 unsigned long i;
951 951
952 if (per_cpu(cpuid4_info, cpu) == NULL) 952 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
953 return; 953 return;
954 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) 954 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
955 return; 955 return;
@@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
957 957
958 for (i = 0; i < num_cache_leaves; i++) 958 for (i = 0; i < num_cache_leaves; i++)
959 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); 959 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
960 kobject_put(per_cpu(cache_kobject, cpu)); 960 kobject_put(per_cpu(ici_cache_kobject, cpu));
961 cpuid4_cache_sysfs_exit(cpu); 961 cpuid4_cache_sysfs_exit(cpu);
962} 962}
963 963
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a6..1c47390dd0e5 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
265 int cpu; 265 int cpu;
266}; 266};
267 267
268static DEFINE_PER_CPU(struct ds_context *, cpu_context); 268static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
269 269
270 270
271static struct ds_context *ds_get_context(struct task_struct *task, int cpu) 271static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
272{ 272{
273 struct ds_context **p_context = 273 struct ds_context **p_context =
274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); 274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
275 struct ds_context *context = NULL; 275 struct ds_context *context = NULL;
276 struct ds_context *new_context = NULL; 276 struct ds_context *new_context = NULL;
277 277
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3de0b37ec038..1d9b33843c80 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
316static int svm_hardware_enable(void *garbage) 316static int svm_hardware_enable(void *garbage)
317{ 317{
318 318
319 struct svm_cpu_data *svm_data; 319 struct svm_cpu_data *sd;
320 uint64_t efer; 320 uint64_t efer;
321 struct descriptor_table gdt_descr; 321 struct descriptor_table gdt_descr;
322 struct desc_struct *gdt; 322 struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
331 me); 331 me);
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 svm_data = per_cpu(svm_data, me); 334 sd = per_cpu(svm_data, me);
335 335
336 if (!svm_data) { 336 if (!sd) {
337 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", 337 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
338 me); 338 me);
339 return -EINVAL; 339 return -EINVAL;
340 } 340 }
341 341
342 svm_data->asid_generation = 1; 342 sd->asid_generation = 1;
343 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 343 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
344 svm_data->next_asid = svm_data->max_asid + 1; 344 sd->next_asid = sd->max_asid + 1;
345 345
346 kvm_get_gdt(&gdt_descr); 346 kvm_get_gdt(&gdt_descr);
347 gdt = (struct desc_struct *)gdt_descr.base; 347 gdt = (struct desc_struct *)gdt_descr.base;
348 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 348 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
349 349
350 wrmsrl(MSR_EFER, efer | EFER_SVME); 350 wrmsrl(MSR_EFER, efer | EFER_SVME);
351 351
352 wrmsrl(MSR_VM_HSAVE_PA, 352 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
353 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
354 353
355 return 0; 354 return 0;
356} 355}
357 356
358static void svm_cpu_uninit(int cpu) 357static void svm_cpu_uninit(int cpu)
359{ 358{
360 struct svm_cpu_data *svm_data 359 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
361 = per_cpu(svm_data, raw_smp_processor_id());
362 360
363 if (!svm_data) 361 if (!sd)
364 return; 362 return;
365 363
366 per_cpu(svm_data, raw_smp_processor_id()) = NULL; 364 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
367 __free_page(svm_data->save_area); 365 __free_page(sd->save_area);
368 kfree(svm_data); 366 kfree(sd);
369} 367}
370 368
371static int svm_cpu_init(int cpu) 369static int svm_cpu_init(int cpu)
372{ 370{
373 struct svm_cpu_data *svm_data; 371 struct svm_cpu_data *sd;
374 int r; 372 int r;
375 373
376 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); 374 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
377 if (!svm_data) 375 if (!sd)
378 return -ENOMEM; 376 return -ENOMEM;
379 svm_data->cpu = cpu; 377 sd->cpu = cpu;
380 svm_data->save_area = alloc_page(GFP_KERNEL); 378 sd->save_area = alloc_page(GFP_KERNEL);
381 r = -ENOMEM; 379 r = -ENOMEM;
382 if (!svm_data->save_area) 380 if (!sd->save_area)
383 goto err_1; 381 goto err_1;
384 382
385 per_cpu(svm_data, cpu) = svm_data; 383 per_cpu(svm_data, cpu) = sd;
386 384
387 return 0; 385 return 0;
388 386
389err_1: 387err_1:
390 kfree(svm_data); 388 kfree(sd);
391 return r; 389 return r;
392 390
393} 391}
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
1092#endif 1090#endif
1093} 1091}
1094 1092
1095static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) 1093static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1096{ 1094{
1097 if (svm_data->next_asid > svm_data->max_asid) { 1095 if (sd->next_asid > sd->max_asid) {
1098 ++svm_data->asid_generation; 1096 ++sd->asid_generation;
1099 svm_data->next_asid = 1; 1097 sd->next_asid = 1;
1100 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1098 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1101 } 1099 }
1102 1100
1103 svm->asid_generation = svm_data->asid_generation; 1101 svm->asid_generation = sd->asid_generation;
1104 svm->vmcb->control.asid = svm_data->next_asid++; 1102 svm->vmcb->control.asid = sd->next_asid++;
1105} 1103}
1106 1104
1107static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1105static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
2429{ 2427{
2430 int cpu = raw_smp_processor_id(); 2428 int cpu = raw_smp_processor_id();
2431 2429
2432 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2430 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2433 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ 2431 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2434 load_TR_desc(); 2432 load_TR_desc();
2435} 2433}
2436 2434
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
2438{ 2436{
2439 int cpu = raw_smp_processor_id(); 2437 int cpu = raw_smp_processor_id();
2440 2438
2441 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2439 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2442 2440
2443 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 2441 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2444 /* FIXME: handle wraparound of asid_generation */ 2442 /* FIXME: handle wraparound of asid_generation */
2445 if (svm->asid_generation != svm_data->asid_generation) 2443 if (svm->asid_generation != sd->asid_generation)
2446 new_asid(svm, svm_data); 2444 new_asid(svm, sd);
2447} 2445}
2448 2446
2449static void svm_inject_nmi(struct kvm_vcpu *vcpu) 2447static void svm_inject_nmi(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 64757c0ba5fc..563d20504988 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,10 +35,10 @@
35 35
36cpumask_var_t xen_cpu_initialized_map; 36cpumask_var_t xen_cpu_initialized_map;
37 37
38static DEFINE_PER_CPU(int, resched_irq); 38static DEFINE_PER_CPU(int, xen_resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq); 39static DEFINE_PER_CPU(int, xen_callfunc_irq);
40static DEFINE_PER_CPU(int, callfuncsingle_irq); 40static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
41static DEFINE_PER_CPU(int, debug_irq) = -1; 41static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
42 42
43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
103 NULL); 103 NULL);
104 if (rc < 0) 104 if (rc < 0)
105 goto fail; 105 goto fail;
106 per_cpu(resched_irq, cpu) = rc; 106 per_cpu(xen_resched_irq, cpu) = rc;
107 107
108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
114 NULL); 114 NULL);
115 if (rc < 0) 115 if (rc < 0)
116 goto fail; 116 goto fail;
117 per_cpu(callfunc_irq, cpu) = rc; 117 per_cpu(xen_callfunc_irq, cpu) = rc;
118 118
119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
122 debug_name, NULL); 122 debug_name, NULL);
123 if (rc < 0) 123 if (rc < 0)
124 goto fail; 124 goto fail;
125 per_cpu(debug_irq, cpu) = rc; 125 per_cpu(xen_debug_irq, cpu) = rc;
126 126
127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
133 NULL); 133 NULL);
134 if (rc < 0) 134 if (rc < 0)
135 goto fail; 135 goto fail;
136 per_cpu(callfuncsingle_irq, cpu) = rc; 136 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
137 137
138 return 0; 138 return 0;
139 139
140 fail: 140 fail:
141 if (per_cpu(resched_irq, cpu) >= 0) 141 if (per_cpu(xen_resched_irq, cpu) >= 0)
142 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); 142 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
143 if (per_cpu(callfunc_irq, cpu) >= 0) 143 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
144 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 144 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
145 if (per_cpu(debug_irq, cpu) >= 0) 145 if (per_cpu(xen_debug_irq, cpu) >= 0)
146 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 146 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
147 if (per_cpu(callfuncsingle_irq, cpu) >= 0) 147 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
148 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); 148 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
149 NULL);
149 150
150 return rc; 151 return rc;
151} 152}
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
349 current->state = TASK_UNINTERRUPTIBLE; 350 current->state = TASK_UNINTERRUPTIBLE;
350 schedule_timeout(HZ/10); 351 schedule_timeout(HZ/10);
351 } 352 }
352 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); 353 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
353 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 354 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
354 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 355 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
355 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); 356 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
356 xen_uninit_lock_cpu(cpu); 357 xen_uninit_lock_cpu(cpu);
357 xen_teardown_timer(cpu); 358 xen_teardown_timer(cpu);
358 359
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9d1f853120d8..0d3f07cd1b5f 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,14 +31,14 @@
31#define NS_PER_TICK (1000000000LL / HZ) 31#define NS_PER_TICK (1000000000LL / HZ)
32 32
33/* runstate info updated by Xen */ 33/* runstate info updated by Xen */
34static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 34static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
35 35
36/* snapshots of runstate info */ 36/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); 37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 38
39/* unused ns of stolen and blocked time */ 39/* unused ns of stolen and blocked time */
40static DEFINE_PER_CPU(u64, residual_stolen); 40static DEFINE_PER_CPU(u64, xen_residual_stolen);
41static DEFINE_PER_CPU(u64, residual_blocked); 41static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 42
43/* return an consistent snapshot of 64-bit time/counter value */ 43/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p) 44static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
79 79
80 BUG_ON(preemptible()); 80 BUG_ON(preemptible());
81 81
82 state = &__get_cpu_var(runstate); 82 state = &__get_cpu_var(xen_runstate);
83 83
84 /* 84 /*
85 * The runstate info is always updated by the hypervisor on 85 * The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
97/* return true when a vcpu could run but has no real cpu to run on */ 97/* return true when a vcpu could run but has no real cpu to run on */
98bool xen_vcpu_stolen(int vcpu) 98bool xen_vcpu_stolen(int vcpu)
99{ 99{
100 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; 100 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
101} 101}
102 102
103void xen_setup_runstate_info(int cpu) 103void xen_setup_runstate_info(int cpu)
104{ 104{
105 struct vcpu_register_runstate_memory_area area; 105 struct vcpu_register_runstate_memory_area area;
106 106
107 area.addr.v = &per_cpu(runstate, cpu); 107 area.addr.v = &per_cpu(xen_runstate, cpu);
108 108
109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, 109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
110 cpu, &area)) 110 cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
122 122
123 WARN_ON(state.state != RUNSTATE_running); 123 WARN_ON(state.state != RUNSTATE_running);
124 124
125 snap = &__get_cpu_var(runstate_snapshot); 125 snap = &__get_cpu_var(xen_runstate_snapshot);
126 126
127 /* work out how much time the VCPU has not been runn*ing* */ 127 /* work out how much time the VCPU has not been runn*ing* */
128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; 128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
133 133
134 /* Add the appropriate number of ticks of stolen time, 134 /* Add the appropriate number of ticks of stolen time,
135 including any left-overs from last time. */ 135 including any left-overs from last time. */
136 stolen = runnable + offline + __get_cpu_var(residual_stolen); 136 stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
137 137
138 if (stolen < 0) 138 if (stolen < 0)
139 stolen = 0; 139 stolen = 0;
140 140
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __get_cpu_var(residual_stolen) = stolen; 142 __get_cpu_var(xen_residual_stolen) = stolen;
143 account_steal_ticks(ticks); 143 account_steal_ticks(ticks);
144 144
145 /* Add the appropriate number of ticks of blocked time, 145 /* Add the appropriate number of ticks of blocked time,
146 including any left-overs from last time. */ 146 including any left-overs from last time. */
147 blocked += __get_cpu_var(residual_blocked); 147 blocked += __get_cpu_var(xen_residual_blocked);
148 148
149 if (blocked < 0) 149 if (blocked < 0)
150 blocked = 0; 150 blocked = 0;
151 151
152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); 152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
153 __get_cpu_var(residual_blocked) = blocked; 153 __get_cpu_var(xen_residual_blocked) = blocked;
154 account_idle_ticks(ticks); 154 account_idle_ticks(ticks);
155} 155}
156 156