aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig30
-rw-r--r--arch/x86/include/asm/cpufeature.h13
-rw-r--r--arch/x86/include/asm/mmzone_64.h23
-rw-r--r--arch/x86/include/asm/percpu.h27
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c7
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/mm/numa_64.c131
-rw-r--r--drivers/acpi/processor_throttling.c32
10 files changed, 70 insertions, 203 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cc6c53a95bfd..8db4fbf30b59 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1223,6 +1223,10 @@ config HAVE_ARCH_BOOTMEM
1223 def_bool y 1223 def_bool y
1224 depends on X86_32 && NUMA 1224 depends on X86_32 && NUMA
1225 1225
1226config HAVE_ARCH_ALLOC_REMAP
1227 def_bool y
1228 depends on X86_32 && NUMA
1229
1226config ARCH_HAVE_MEMORY_PRESENT 1230config ARCH_HAVE_MEMORY_PRESENT
1227 def_bool y 1231 def_bool y
1228 depends on X86_32 && DISCONTIGMEM 1232 depends on X86_32 && DISCONTIGMEM
@@ -1231,13 +1235,9 @@ config NEED_NODE_MEMMAP_SIZE
1231 def_bool y 1235 def_bool y
1232 depends on X86_32 && (DISCONTIGMEM || SPARSEMEM) 1236 depends on X86_32 && (DISCONTIGMEM || SPARSEMEM)
1233 1237
1234config HAVE_ARCH_ALLOC_REMAP
1235 def_bool y
1236 depends on X86_32 && NUMA
1237
1238config ARCH_FLATMEM_ENABLE 1238config ARCH_FLATMEM_ENABLE
1239 def_bool y 1239 def_bool y
1240 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA 1240 depends on X86_32 && !NUMA
1241 1241
1242config ARCH_DISCONTIGMEM_ENABLE 1242config ARCH_DISCONTIGMEM_ENABLE
1243 def_bool y 1243 def_bool y
@@ -1247,20 +1247,16 @@ config ARCH_DISCONTIGMEM_DEFAULT
1247 def_bool y 1247 def_bool y
1248 depends on NUMA && X86_32 1248 depends on NUMA && X86_32
1249 1249
1250config ARCH_PROC_KCORE_TEXT
1251 def_bool y
1252 depends on X86_64 && PROC_KCORE
1253
1254config ARCH_SPARSEMEM_DEFAULT
1255 def_bool y
1256 depends on X86_64
1257
1258config ARCH_SPARSEMEM_ENABLE 1250config ARCH_SPARSEMEM_ENABLE
1259 def_bool y 1251 def_bool y
1260 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD 1252 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
1261 select SPARSEMEM_STATIC if X86_32 1253 select SPARSEMEM_STATIC if X86_32
1262 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1254 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1263 1255
1256config ARCH_SPARSEMEM_DEFAULT
1257 def_bool y
1258 depends on X86_64
1259
1264config ARCH_SELECT_MEMORY_MODEL 1260config ARCH_SELECT_MEMORY_MODEL
1265 def_bool y 1261 def_bool y
1266 depends on ARCH_SPARSEMEM_ENABLE 1262 depends on ARCH_SPARSEMEM_ENABLE
@@ -1269,6 +1265,10 @@ config ARCH_MEMORY_PROBE
1269 def_bool X86_64 1265 def_bool X86_64
1270 depends on MEMORY_HOTPLUG 1266 depends on MEMORY_HOTPLUG
1271 1267
1268config ARCH_PROC_KCORE_TEXT
1269 def_bool y
1270 depends on X86_64 && PROC_KCORE
1271
1272config ILLEGAL_POINTER_VALUE 1272config ILLEGAL_POINTER_VALUE
1273 hex 1273 hex
1274 default 0 if X86_32 1274 default 0 if X86_32
@@ -1703,10 +1703,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
1703 def_bool y 1703 def_bool y
1704 depends on MEMORY_HOTPLUG 1704 depends on MEMORY_HOTPLUG
1705 1705
1706config HAVE_ARCH_EARLY_PFN_TO_NID
1707 def_bool X86_64
1708 depends on NUMA
1709
1710config USE_PERCPU_NUMA_NODE_ID 1706config USE_PERCPU_NUMA_NODE_ID
1711 def_bool y 1707 def_bool y
1712 depends on NUMA 1708 depends on NUMA
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 91f3e087cf21..50c0d30e676d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -207,8 +207,7 @@ extern const char * const x86_power_flags[32];
207#define test_cpu_cap(c, bit) \ 207#define test_cpu_cap(c, bit) \
208 test_bit(bit, (unsigned long *)((c)->x86_capability)) 208 test_bit(bit, (unsigned long *)((c)->x86_capability))
209 209
210#define cpu_has(c, bit) \ 210#define REQUIRED_MASK_BIT_SET(bit) \
211 (__builtin_constant_p(bit) && \
212 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ 211 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
213 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ 212 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
214 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ 213 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
@@ -218,10 +217,16 @@ extern const char * const x86_power_flags[32];
218 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ 217 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
219 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ 218 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
220 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ 219 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
221 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) \ 220 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
222 ? 1 : \ 221
222#define cpu_has(c, bit) \
223 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
223 test_cpu_cap(c, bit)) 224 test_cpu_cap(c, bit))
224 225
226#define this_cpu_has(bit) \
227 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
228 x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
229
225#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) 230#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
226 231
227#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) 232#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index 288b96f815a6..b3f88d7867c7 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -4,36 +4,13 @@
4#ifndef _ASM_X86_MMZONE_64_H 4#ifndef _ASM_X86_MMZONE_64_H
5#define _ASM_X86_MMZONE_64_H 5#define _ASM_X86_MMZONE_64_H
6 6
7
8#ifdef CONFIG_NUMA 7#ifdef CONFIG_NUMA
9 8
10#include <linux/mmdebug.h> 9#include <linux/mmdebug.h>
11
12#include <asm/smp.h> 10#include <asm/smp.h>
13 11
14/* Simple perfect hash to map physical addresses to node numbers */
15struct memnode {
16 int shift;
17 unsigned int mapsize;
18 s16 *map;
19 s16 embedded_map[64 - 8];
20} ____cacheline_aligned; /* total size = 128 bytes */
21extern struct memnode memnode;
22#define memnode_shift memnode.shift
23#define memnodemap memnode.map
24#define memnodemapsize memnode.mapsize
25
26extern struct pglist_data *node_data[]; 12extern struct pglist_data *node_data[];
27 13
28static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
29{
30 unsigned nid;
31 VIRTUAL_BUG_ON(!memnodemap);
32 nid = memnodemap[addr >> memnode_shift];
33 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
34 return nid;
35}
36
37#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
38 15
39#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 16#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index d475b4398d8b..76042d981596 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -542,6 +542,33 @@ do { \
542 old__; \ 542 old__; \
543}) 543})
544 544
545static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
546 const unsigned long __percpu *addr)
547{
548 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
549
550 return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0;
551}
552
553static inline int x86_this_cpu_variable_test_bit(int nr,
554 const unsigned long __percpu *addr)
555{
556 int oldbit;
557
558 asm volatile("bt "__percpu_arg(2)",%1\n\t"
559 "sbb %0,%0"
560 : "=r" (oldbit)
561 : "m" (*(unsigned long *)addr), "Ir" (nr));
562
563 return oldbit;
564}
565
566#define x86_this_cpu_test_bit(nr, addr) \
567 (__builtin_constant_p((nr)) \
568 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
569 : x86_this_cpu_variable_test_bit((nr), (addr)))
570
571
545#include <asm-generic/percpu.h> 572#include <asm-generic/percpu.h>
546 573
547/* We can use this directly for local CPU (faster). */ 574/* We can use this directly for local CPU (faster). */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index fabf01eff771..2bc503bf9e99 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -505,7 +505,7 @@ static void __cpuinit setup_APIC_timer(void)
505{ 505{
506 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 506 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
507 507
508 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) { 508 if (this_cpu_has(X86_FEATURE_ARAT)) {
509 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; 509 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
510 /* Make LAPIC timer preferrable over percpu HPET */ 510 /* Make LAPIC timer preferrable over percpu HPET */
511 lapic_clockevent.rating = 150; 511 lapic_clockevent.rating = 150;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 6f8c5e9da97f..6b0f4cde7a22 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -355,7 +355,6 @@ static void notify_thresholds(__u64 msr_val)
355static void intel_thermal_interrupt(void) 355static void intel_thermal_interrupt(void)
356{ 356{
357 __u64 msr_val; 357 __u64 msr_val;
358 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
359 358
360 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 359 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
361 360
@@ -367,19 +366,19 @@ static void intel_thermal_interrupt(void)
367 CORE_LEVEL) != 0) 366 CORE_LEVEL) != 0)
368 mce_log_therm_throt_event(CORE_THROTTLED | msr_val); 367 mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
369 368
370 if (cpu_has(c, X86_FEATURE_PLN)) 369 if (this_cpu_has(X86_FEATURE_PLN))
371 if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, 370 if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
372 POWER_LIMIT_EVENT, 371 POWER_LIMIT_EVENT,
373 CORE_LEVEL) != 0) 372 CORE_LEVEL) != 0)
374 mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); 373 mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
375 374
376 if (cpu_has(c, X86_FEATURE_PTS)) { 375 if (this_cpu_has(X86_FEATURE_PTS)) {
377 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 376 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
378 if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, 377 if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
379 THERMAL_THROTTLING_EVENT, 378 THERMAL_THROTTLING_EVENT,
380 PACKAGE_LEVEL) != 0) 379 PACKAGE_LEVEL) != 0)
381 mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); 380 mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
382 if (cpu_has(c, X86_FEATURE_PLN)) 381 if (this_cpu_has(X86_FEATURE_PLN))
383 if (therm_throt_process(msr_val & 382 if (therm_throt_process(msr_val &
384 PACKAGE_THERM_STATUS_POWER_LIMIT, 383 PACKAGE_THERM_STATUS_POWER_LIMIT,
385 POWER_LIMIT_EVENT, 384 POWER_LIMIT_EVENT,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index d46cbe46b7ab..88a90a977f8e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -449,7 +449,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
449void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 449void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
450{ 450{
451 if (!need_resched()) { 451 if (!need_resched()) {
452 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 452 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
453 clflush((void *)&current_thread_info()->flags); 453 clflush((void *)&current_thread_info()->flags);
454 454
455 __monitor((void *)&current_thread_info()->flags, 0, 0); 455 __monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -465,7 +465,7 @@ static void mwait_idle(void)
465 if (!need_resched()) { 465 if (!need_resched()) {
466 trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 466 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
467 trace_cpu_idle(1, smp_processor_id()); 467 trace_cpu_idle(1, smp_processor_id());
468 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 468 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
469 clflush((void *)&current_thread_info()->flags); 469 clflush((void *)&current_thread_info()->flags);
470 470
471 __monitor((void *)&current_thread_info()->flags, 0, 0); 471 __monitor((void *)&current_thread_info()->flags, 0, 0);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c2871d3c71b6..a3c430bdfb60 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1332,9 +1332,9 @@ static inline void mwait_play_dead(void)
1332 void *mwait_ptr; 1332 void *mwait_ptr;
1333 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1333 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1334 1334
1335 if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c))) 1335 if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))
1336 return; 1336 return;
1337 if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) 1337 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1338 return; 1338 return;
1339 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) 1339 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1340 return; 1340 return;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index e8c00cc72033..13f5b068e8c2 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -28,125 +28,10 @@ EXPORT_SYMBOL(node_data);
28 28
29nodemask_t numa_nodes_parsed __initdata; 29nodemask_t numa_nodes_parsed __initdata;
30 30
31struct memnode memnode;
32
33static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size;
35
36static struct numa_meminfo numa_meminfo __initdata; 31static struct numa_meminfo numa_meminfo __initdata;
37
38static int numa_distance_cnt; 32static int numa_distance_cnt;
39static u8 *numa_distance; 33static u8 *numa_distance;
40 34
41/*
42 * Given a shift value, try to populate memnodemap[]
43 * Returns :
44 * 1 if OK
45 * 0 if memnodmap[] too small (of shift too small)
46 * -1 if node overlap or lost ram (shift too big)
47 */
48static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift)
49{
50 unsigned long addr, end;
51 int i, res = -1;
52
53 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
54 for (i = 0; i < mi->nr_blks; i++) {
55 addr = mi->blk[i].start;
56 end = mi->blk[i].end;
57 if (addr >= end)
58 continue;
59 if ((end >> shift) >= memnodemapsize)
60 return 0;
61 do {
62 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
63 return -1;
64 memnodemap[addr >> shift] = mi->blk[i].nid;
65 addr += (1UL << shift);
66 } while (addr < end);
67 res = 1;
68 }
69 return res;
70}
71
72static int __init allocate_cachealigned_memnodemap(void)
73{
74 unsigned long addr;
75
76 memnodemap = memnode.embedded_map;
77 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
78 return 0;
79
80 addr = 0x8000;
81 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
82 nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
83 nodemap_size, L1_CACHE_BYTES);
84 if (nodemap_addr == MEMBLOCK_ERROR) {
85 printk(KERN_ERR
86 "NUMA: Unable to allocate Memory to Node hash map\n");
87 nodemap_addr = nodemap_size = 0;
88 return -1;
89 }
90 memnodemap = phys_to_virt(nodemap_addr);
91 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
92
93 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
94 nodemap_addr, nodemap_addr + nodemap_size);
95 return 0;
96}
97
98/*
99 * The LSB of all start and end addresses in the node map is the value of the
100 * maximum possible shift.
101 */
102static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi)
103{
104 int i, nodes_used = 0;
105 unsigned long start, end;
106 unsigned long bitfield = 0, memtop = 0;
107
108 for (i = 0; i < mi->nr_blks; i++) {
109 start = mi->blk[i].start;
110 end = mi->blk[i].end;
111 if (start >= end)
112 continue;
113 bitfield |= start;
114 nodes_used++;
115 if (end > memtop)
116 memtop = end;
117 }
118 if (nodes_used <= 1)
119 i = 63;
120 else
121 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
122 memnodemapsize = (memtop >> i)+1;
123 return i;
124}
125
126static int __init compute_hash_shift(const struct numa_meminfo *mi)
127{
128 int shift;
129
130 shift = extract_lsb_from_nodes(mi);
131 if (allocate_cachealigned_memnodemap())
132 return -1;
133 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
134 shift);
135
136 if (populate_memnodemap(mi, shift) != 1) {
137 printk(KERN_INFO "Your memory is not aligned you need to "
138 "rebuild your kernel with a bigger NODEMAPSIZE "
139 "shift=%d\n", shift);
140 return -1;
141 }
142 return shift;
143}
144
145int __meminit __early_pfn_to_nid(unsigned long pfn)
146{
147 return phys_to_nid(pfn << PAGE_SHIFT);
148}
149
150static void * __init early_node_mem(int nodeid, unsigned long start, 35static void * __init early_node_mem(int nodeid, unsigned long start,
151 unsigned long end, unsigned long size, 36 unsigned long end, unsigned long size,
152 unsigned long align) 37 unsigned long align)
@@ -270,7 +155,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
270 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); 155 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
271 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, 156 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
272 nodedata_phys + pgdat_size - 1); 157 nodedata_phys + pgdat_size - 1);
273 nid = phys_to_nid(nodedata_phys); 158 nid = early_pfn_to_nid(nodedata_phys >> PAGE_SHIFT);
274 if (nid != nodeid) 159 if (nid != nodeid)
275 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid); 160 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
276 161
@@ -527,12 +412,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
527 if (WARN_ON(nodes_empty(node_possible_map))) 412 if (WARN_ON(nodes_empty(node_possible_map)))
528 return -EINVAL; 413 return -EINVAL;
529 414
530 memnode_shift = compute_hash_shift(mi);
531 if (memnode_shift < 0) {
532 printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n");
533 return -EINVAL;
534 }
535
536 for (i = 0; i < mi->nr_blks; i++) 415 for (i = 0; i < mi->nr_blks; i++)
537 memblock_x86_register_active_regions(mi->blk[i].nid, 416 memblock_x86_register_active_regions(mi->blk[i].nid,
538 mi->blk[i].start >> PAGE_SHIFT, 417 mi->blk[i].start >> PAGE_SHIFT,
@@ -626,17 +505,13 @@ static int __init numa_init(int (*init_func)(void))
626 505
627void __init initmem_init(void) 506void __init initmem_init(void)
628{ 507{
629 int ret;
630
631 if (!numa_off) { 508 if (!numa_off) {
632#ifdef CONFIG_ACPI_NUMA 509#ifdef CONFIG_ACPI_NUMA
633 ret = numa_init(x86_acpi_numa_init); 510 if (!numa_init(x86_acpi_numa_init))
634 if (!ret)
635 return; 511 return;
636#endif 512#endif
637#ifdef CONFIG_AMD_NUMA 513#ifdef CONFIG_AMD_NUMA
638 ret = numa_init(amd_numa_init); 514 if (!numa_init(amd_numa_init))
639 if (!ret)
640 return; 515 return;
641#endif 516#endif
642 } 517 }
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index fa84e9744330..6f2c6d914cbc 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -710,20 +710,14 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
710} 710}
711 711
712#ifdef CONFIG_X86 712#ifdef CONFIG_X86
713static int acpi_throttling_rdmsr(struct acpi_processor *pr, 713static int acpi_throttling_rdmsr(u64 *value)
714 u64 *value)
715{ 714{
716 struct cpuinfo_x86 *c;
717 u64 msr_high, msr_low; 715 u64 msr_high, msr_low;
718 unsigned int cpu;
719 u64 msr = 0; 716 u64 msr = 0;
720 int ret = -1; 717 int ret = -1;
721 718
722 cpu = pr->id; 719 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
723 c = &cpu_data(cpu); 720 !this_cpu_has(X86_FEATURE_ACPI)) {
724
725 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
726 !cpu_has(c, X86_FEATURE_ACPI)) {
727 printk(KERN_ERR PREFIX 721 printk(KERN_ERR PREFIX
728 "HARDWARE addr space,NOT supported yet\n"); 722 "HARDWARE addr space,NOT supported yet\n");
729 } else { 723 } else {
@@ -738,18 +732,13 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr,
738 return ret; 732 return ret;
739} 733}
740 734
741static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) 735static int acpi_throttling_wrmsr(u64 value)
742{ 736{
743 struct cpuinfo_x86 *c;
744 unsigned int cpu;
745 int ret = -1; 737 int ret = -1;
746 u64 msr; 738 u64 msr;
747 739
748 cpu = pr->id; 740 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
749 c = &cpu_data(cpu); 741 !this_cpu_has(X86_FEATURE_ACPI)) {
750
751 if ((c->x86_vendor != X86_VENDOR_INTEL) ||
752 !cpu_has(c, X86_FEATURE_ACPI)) {
753 printk(KERN_ERR PREFIX 742 printk(KERN_ERR PREFIX
754 "HARDWARE addr space,NOT supported yet\n"); 743 "HARDWARE addr space,NOT supported yet\n");
755 } else { 744 } else {
@@ -761,15 +750,14 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value)
761 return ret; 750 return ret;
762} 751}
763#else 752#else
764static int acpi_throttling_rdmsr(struct acpi_processor *pr, 753static int acpi_throttling_rdmsr(u64 *value)
765 u64 *value)
766{ 754{
767 printk(KERN_ERR PREFIX 755 printk(KERN_ERR PREFIX
768 "HARDWARE addr space,NOT supported yet\n"); 756 "HARDWARE addr space,NOT supported yet\n");
769 return -1; 757 return -1;
770} 758}
771 759
772static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) 760static int acpi_throttling_wrmsr(u64 value)
773{ 761{
774 printk(KERN_ERR PREFIX 762 printk(KERN_ERR PREFIX
775 "HARDWARE addr space,NOT supported yet\n"); 763 "HARDWARE addr space,NOT supported yet\n");
@@ -801,7 +789,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
801 ret = 0; 789 ret = 0;
802 break; 790 break;
803 case ACPI_ADR_SPACE_FIXED_HARDWARE: 791 case ACPI_ADR_SPACE_FIXED_HARDWARE:
804 ret = acpi_throttling_rdmsr(pr, value); 792 ret = acpi_throttling_rdmsr(value);
805 break; 793 break;
806 default: 794 default:
807 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 795 printk(KERN_ERR PREFIX "Unknown addr space %d\n",
@@ -834,7 +822,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr,
834 ret = 0; 822 ret = 0;
835 break; 823 break;
836 case ACPI_ADR_SPACE_FIXED_HARDWARE: 824 case ACPI_ADR_SPACE_FIXED_HARDWARE:
837 ret = acpi_throttling_wrmsr(pr, value); 825 ret = acpi_throttling_wrmsr(value);
838 break; 826 break;
839 default: 827 default:
840 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 828 printk(KERN_ERR PREFIX "Unknown addr space %d\n",