diff options
Diffstat (limited to 'arch/x86')
30 files changed, 433 insertions, 203 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 55d106b5e31b..211ca3f7fd16 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -185,17 +185,16 @@ struct bootnode; | |||
185 | 185 | ||
186 | #ifdef CONFIG_ACPI_NUMA | 186 | #ifdef CONFIG_ACPI_NUMA |
187 | extern int acpi_numa; | 187 | extern int acpi_numa; |
188 | extern int acpi_get_nodes(struct bootnode *physnodes); | 188 | extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start, |
189 | unsigned long end); | ||
189 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | 190 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); |
190 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | 191 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) |
192 | |||
193 | #ifdef CONFIG_NUMA_EMU | ||
191 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, | 194 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, |
192 | int num_nodes); | 195 | int num_nodes); |
193 | #else | ||
194 | static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, | ||
195 | int num_nodes) | ||
196 | { | ||
197 | } | ||
198 | #endif | 196 | #endif |
197 | #endif /* CONFIG_ACPI_NUMA */ | ||
199 | 198 | ||
200 | #define acpi_unlazy_tlb(x) leave_mm(x) | 199 | #define acpi_unlazy_tlb(x) leave_mm(x) |
201 | 200 | ||
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 6aee50d655d1..64dc82ee19f0 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -3,16 +3,27 @@ | |||
3 | 3 | ||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | 5 | ||
6 | struct amd_nb_bus_dev_range { | ||
7 | u8 bus; | ||
8 | u8 dev_base; | ||
9 | u8 dev_limit; | ||
10 | }; | ||
11 | |||
6 | extern struct pci_device_id amd_nb_misc_ids[]; | 12 | extern struct pci_device_id amd_nb_misc_ids[]; |
13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; | ||
7 | struct bootnode; | 14 | struct bootnode; |
8 | 15 | ||
9 | extern int early_is_amd_nb(u32 value); | 16 | extern int early_is_amd_nb(u32 value); |
10 | extern int amd_cache_northbridges(void); | 17 | extern int amd_cache_northbridges(void); |
11 | extern void amd_flush_garts(void); | 18 | extern void amd_flush_garts(void); |
12 | extern int amd_get_nodes(struct bootnode *nodes); | ||
13 | extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); | 19 | extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); |
14 | extern int amd_scan_nodes(void); | 20 | extern int amd_scan_nodes(void); |
15 | 21 | ||
22 | #ifdef CONFIG_NUMA_EMU | ||
23 | extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); | ||
24 | extern void amd_get_nodes(struct bootnode *nodes); | ||
25 | #endif | ||
26 | |||
16 | struct amd_northbridge { | 27 | struct amd_northbridge { |
17 | struct pci_dev *misc; | 28 | struct pci_dev *misc; |
18 | }; | 29 | }; |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 0141b234406f..4729b2b63117 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -116,11 +116,11 @@ enum fixed_addresses { | |||
116 | #endif | 116 | #endif |
117 | FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ | 117 | FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ |
118 | FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ | 118 | FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ |
119 | __end_of_permanent_fixed_addresses, | ||
120 | |||
121 | #ifdef CONFIG_X86_MRST | 119 | #ifdef CONFIG_X86_MRST |
122 | FIX_LNW_VRTC, | 120 | FIX_LNW_VRTC, |
123 | #endif | 121 | #endif |
122 | __end_of_permanent_fixed_addresses, | ||
123 | |||
124 | /* | 124 | /* |
125 | * 256 temporary boot-time mappings, used by early_ioremap(), | 125 | * 256 temporary boot-time mappings, used by early_ioremap(), |
126 | * before ioremap() is functional. | 126 | * before ioremap() is functional. |
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h index 49dbfdfa50f9..91d915a65259 100644 --- a/arch/x86/include/asm/gpio.h +++ b/arch/x86/include/asm/gpio.h | |||
@@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio) | |||
38 | return __gpio_cansleep(gpio); | 38 | return __gpio_cansleep(gpio); |
39 | } | 39 | } |
40 | 40 | ||
41 | /* | ||
42 | * Not implemented, yet. | ||
43 | */ | ||
44 | static inline int gpio_to_irq(unsigned int gpio) | 41 | static inline int gpio_to_irq(unsigned int gpio) |
45 | { | 42 | { |
46 | return -ENOSYS; | 43 | return __gpio_to_irq(gpio); |
47 | } | 44 | } |
48 | 45 | ||
49 | static inline int irq_to_gpio(unsigned int irq) | 46 | static inline int irq_to_gpio(unsigned int irq) |
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index f23eb2528464..ca242d35e873 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h | |||
@@ -18,7 +18,6 @@ enum die_val { | |||
18 | DIE_TRAP, | 18 | DIE_TRAP, |
19 | DIE_GPF, | 19 | DIE_GPF, |
20 | DIE_CALL, | 20 | DIE_CALL, |
21 | DIE_NMI_IPI, | ||
22 | DIE_PAGE_FAULT, | 21 | DIE_PAGE_FAULT, |
23 | DIE_NMIUNKNOWN, | 22 | DIE_NMIUNKNOWN, |
24 | }; | 23 | }; |
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h index f7920601e472..72a8b52e7dfd 100644 --- a/arch/x86/include/asm/mach_traps.h +++ b/arch/x86/include/asm/mach_traps.h | |||
@@ -7,9 +7,19 @@ | |||
7 | 7 | ||
8 | #include <asm/mc146818rtc.h> | 8 | #include <asm/mc146818rtc.h> |
9 | 9 | ||
10 | #define NMI_REASON_PORT 0x61 | ||
11 | |||
12 | #define NMI_REASON_SERR 0x80 | ||
13 | #define NMI_REASON_IOCHK 0x40 | ||
14 | #define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK) | ||
15 | |||
16 | #define NMI_REASON_CLEAR_SERR 0x04 | ||
17 | #define NMI_REASON_CLEAR_IOCHK 0x08 | ||
18 | #define NMI_REASON_CLEAR_MASK 0x0f | ||
19 | |||
10 | static inline unsigned char get_nmi_reason(void) | 20 | static inline unsigned char get_nmi_reason(void) |
11 | { | 21 | { |
12 | return inb(0x61); | 22 | return inb(NMI_REASON_PORT); |
13 | } | 23 | } |
14 | 24 | ||
15 | static inline void reassert_nmi(void) | 25 | static inline void reassert_nmi(void) |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c4021b953510..c76f5b92b840 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -23,6 +23,26 @@ void arch_trigger_all_cpu_backtrace(void); | |||
23 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | 23 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* | ||
27 | * Define some priorities for the nmi notifier call chain. | ||
28 | * | ||
29 | * Create a local nmi bit that has a higher priority than | ||
30 | * external nmis, because the local ones are more frequent. | ||
31 | * | ||
32 | * Also setup some default high/normal/low settings for | ||
33 | * subsystems to registers with. Using 4 bits to seperate | ||
34 | * the priorities. This can go alot higher if needed be. | ||
35 | */ | ||
36 | |||
37 | #define NMI_LOCAL_SHIFT 16 /* randomly picked */ | ||
38 | #define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT) | ||
39 | #define NMI_HIGH_PRIOR (1ULL << 8) | ||
40 | #define NMI_NORMAL_PRIOR (1ULL << 4) | ||
41 | #define NMI_LOW_PRIOR (1ULL << 0) | ||
42 | #define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR) | ||
43 | #define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR) | ||
44 | #define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR) | ||
45 | |||
26 | void stop_nmi(void); | 46 | void stop_nmi(void); |
27 | void restart_nmi(void); | 47 | void restart_nmi(void); |
28 | 48 | ||
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 823e070e7c26..5ae87285a502 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h | |||
@@ -38,7 +38,7 @@ extern void __cpuinit numa_add_cpu(int cpu); | |||
38 | extern void __cpuinit numa_remove_cpu(int cpu); | 38 | extern void __cpuinit numa_remove_cpu(int cpu); |
39 | 39 | ||
40 | #ifdef CONFIG_NUMA_EMU | 40 | #ifdef CONFIG_NUMA_EMU |
41 | #define FAKE_NODE_MIN_SIZE ((u64)64 << 20) | 41 | #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) |
42 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) | 42 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) |
43 | #endif /* CONFIG_NUMA_EMU */ | 43 | #endif /* CONFIG_NUMA_EMU */ |
44 | #else | 44 | #else |
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 295e2ff18a6a..e2f6a99f14ab 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -20,6 +20,9 @@ | |||
20 | #define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR) | 20 | #define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR) |
21 | #define ARCH_P4_MAX_CCCR (18) | 21 | #define ARCH_P4_MAX_CCCR (18) |
22 | 22 | ||
23 | #define ARCH_P4_CNTRVAL_BITS (40) | ||
24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) | ||
25 | |||
23 | #define P4_ESCR_EVENT_MASK 0x7e000000U | 26 | #define P4_ESCR_EVENT_MASK 0x7e000000U |
24 | #define P4_ESCR_EVENT_SHIFT 25 | 27 | #define P4_ESCR_EVENT_SHIFT 25 |
25 | #define P4_ESCR_EVENTMASK_MASK 0x01fffe00U | 28 | #define P4_ESCR_EVENTMASK_MASK 0x01fffe00U |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index affacb5e0065..0a99f7198bc3 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -20,6 +20,13 @@ struct pci_device_id amd_nb_misc_ids[] = { | |||
20 | }; | 20 | }; |
21 | EXPORT_SYMBOL(amd_nb_misc_ids); | 21 | EXPORT_SYMBOL(amd_nb_misc_ids); |
22 | 22 | ||
23 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { | ||
24 | { 0x00, 0x18, 0x20 }, | ||
25 | { 0xff, 0x00, 0x20 }, | ||
26 | { 0xfe, 0x00, 0x20 }, | ||
27 | { } | ||
28 | }; | ||
29 | |||
23 | struct amd_northbridge_info amd_northbridges; | 30 | struct amd_northbridge_info amd_northbridges; |
24 | EXPORT_SYMBOL(amd_northbridges); | 31 | EXPORT_SYMBOL(amd_northbridges); |
25 | 32 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index dcd7c83e1659..5955a7800a96 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -39,18 +39,6 @@ int fallback_aper_force __initdata; | |||
39 | 39 | ||
40 | int fix_aperture __initdata = 1; | 40 | int fix_aperture __initdata = 1; |
41 | 41 | ||
42 | struct bus_dev_range { | ||
43 | int bus; | ||
44 | int dev_base; | ||
45 | int dev_limit; | ||
46 | }; | ||
47 | |||
48 | static struct bus_dev_range bus_dev_ranges[] __initdata = { | ||
49 | { 0x00, 0x18, 0x20}, | ||
50 | { 0xff, 0x00, 0x20}, | ||
51 | { 0xfe, 0x00, 0x20} | ||
52 | }; | ||
53 | |||
54 | static struct resource gart_resource = { | 42 | static struct resource gart_resource = { |
55 | .name = "GART", | 43 | .name = "GART", |
56 | .flags = IORESOURCE_MEM, | 44 | .flags = IORESOURCE_MEM, |
@@ -294,13 +282,13 @@ void __init early_gart_iommu_check(void) | |||
294 | search_agp_bridge(&agp_aper_order, &valid_agp); | 282 | search_agp_bridge(&agp_aper_order, &valid_agp); |
295 | 283 | ||
296 | fix = 0; | 284 | fix = 0; |
297 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 285 | for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) { |
298 | int bus; | 286 | int bus; |
299 | int dev_base, dev_limit; | 287 | int dev_base, dev_limit; |
300 | 288 | ||
301 | bus = bus_dev_ranges[i].bus; | 289 | bus = amd_nb_bus_dev_ranges[i].bus; |
302 | dev_base = bus_dev_ranges[i].dev_base; | 290 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; |
303 | dev_limit = bus_dev_ranges[i].dev_limit; | 291 | dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; |
304 | 292 | ||
305 | for (slot = dev_base; slot < dev_limit; slot++) { | 293 | for (slot = dev_base; slot < dev_limit; slot++) { |
306 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) | 294 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
@@ -349,13 +337,13 @@ void __init early_gart_iommu_check(void) | |||
349 | return; | 337 | return; |
350 | 338 | ||
351 | /* disable them all at first */ | 339 | /* disable them all at first */ |
352 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 340 | for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { |
353 | int bus; | 341 | int bus; |
354 | int dev_base, dev_limit; | 342 | int dev_base, dev_limit; |
355 | 343 | ||
356 | bus = bus_dev_ranges[i].bus; | 344 | bus = amd_nb_bus_dev_ranges[i].bus; |
357 | dev_base = bus_dev_ranges[i].dev_base; | 345 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; |
358 | dev_limit = bus_dev_ranges[i].dev_limit; | 346 | dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; |
359 | 347 | ||
360 | for (slot = dev_base; slot < dev_limit; slot++) { | 348 | for (slot = dev_base; slot < dev_limit; slot++) { |
361 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) | 349 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
@@ -390,14 +378,14 @@ int __init gart_iommu_hole_init(void) | |||
390 | 378 | ||
391 | fix = 0; | 379 | fix = 0; |
392 | node = 0; | 380 | node = 0; |
393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 381 | for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { |
394 | int bus; | 382 | int bus; |
395 | int dev_base, dev_limit; | 383 | int dev_base, dev_limit; |
396 | u32 ctl; | 384 | u32 ctl; |
397 | 385 | ||
398 | bus = bus_dev_ranges[i].bus; | 386 | bus = amd_nb_bus_dev_ranges[i].bus; |
399 | dev_base = bus_dev_ranges[i].dev_base; | 387 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; |
400 | dev_limit = bus_dev_ranges[i].dev_limit; | 388 | dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; |
401 | 389 | ||
402 | for (slot = dev_base; slot < dev_limit; slot++) { | 390 | for (slot = dev_base; slot < dev_limit; slot++) { |
403 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) | 391 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
@@ -505,7 +493,7 @@ out: | |||
505 | } | 493 | } |
506 | 494 | ||
507 | /* Fix up the north bridges */ | 495 | /* Fix up the north bridges */ |
508 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 496 | for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { |
509 | int bus, dev_base, dev_limit; | 497 | int bus, dev_base, dev_limit; |
510 | 498 | ||
511 | /* | 499 | /* |
@@ -514,9 +502,9 @@ out: | |||
514 | */ | 502 | */ |
515 | u32 ctl = DISTLBWALKPRB | aper_order << 1; | 503 | u32 ctl = DISTLBWALKPRB | aper_order << 1; |
516 | 504 | ||
517 | bus = bus_dev_ranges[i].bus; | 505 | bus = amd_nb_bus_dev_ranges[i].bus; |
518 | dev_base = bus_dev_ranges[i].dev_base; | 506 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; |
519 | dev_limit = bus_dev_ranges[i].dev_limit; | 507 | dev_limit = amd_nb_bus_dev_ranges[i].dev_limit; |
520 | for (slot = dev_base; slot < dev_limit; slot++) { | 508 | for (slot = dev_base; slot < dev_limit; slot++) { |
521 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) | 509 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
522 | continue; | 510 | continue; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a51345ba449e..06c196d7e59c 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -684,7 +684,7 @@ static int __init calibrate_APIC_clock(void) | |||
684 | lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, | 684 | lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, |
685 | lapic_clockevent.shift); | 685 | lapic_clockevent.shift); |
686 | lapic_clockevent.max_delta_ns = | 686 | lapic_clockevent.max_delta_ns = |
687 | clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); | 687 | clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent); |
688 | lapic_clockevent.min_delta_ns = | 688 | lapic_clockevent.min_delta_ns = |
689 | clockevent_delta2ns(0xF, &lapic_clockevent); | 689 | clockevent_delta2ns(0xF, &lapic_clockevent); |
690 | 690 | ||
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 72ec29e1ae06..79fd43ca6f96 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -68,7 +68,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | |||
68 | 68 | ||
69 | switch (cmd) { | 69 | switch (cmd) { |
70 | case DIE_NMI: | 70 | case DIE_NMI: |
71 | case DIE_NMI_IPI: | ||
72 | break; | 71 | break; |
73 | 72 | ||
74 | default: | 73 | default: |
@@ -96,7 +95,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | |||
96 | static __read_mostly struct notifier_block backtrace_notifier = { | 95 | static __read_mostly struct notifier_block backtrace_notifier = { |
97 | .notifier_call = arch_trigger_all_cpu_backtrace_handler, | 96 | .notifier_call = arch_trigger_all_cpu_backtrace_handler, |
98 | .next = NULL, | 97 | .next = NULL, |
99 | .priority = 1 | 98 | .priority = NMI_LOCAL_LOW_PRIOR, |
100 | }; | 99 | }; |
101 | 100 | ||
102 | static int __init register_trigger_all_cpu_backtrace(void) | 101 | static int __init register_trigger_all_cpu_backtrace(void) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index ecca5f41ad2c..bd16b58b8850 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = { | |||
378 | 378 | ||
379 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 379 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
380 | { | 380 | { |
381 | __this_cpu_write(x2apic_extra_bits, (pnode << 6)); | 381 | __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); |
382 | } | 382 | } |
383 | 383 | ||
384 | /* | 384 | /* |
@@ -641,7 +641,7 @@ void __cpuinit uv_cpu_init(void) | |||
641 | */ | 641 | */ |
642 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | 642 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) |
643 | { | 643 | { |
644 | if (reason != DIE_NMI_IPI) | 644 | if (reason != DIE_NMIUNKNOWN) |
645 | return NOTIFY_OK; | 645 | return NOTIFY_OK; |
646 | 646 | ||
647 | if (in_crash_kexec) | 647 | if (in_crash_kexec) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index e7dbde7bfedb..a77971979564 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <asm/mce.h> | 26 | #include <asm/mce.h> |
27 | #include <asm/apic.h> | 27 | #include <asm/apic.h> |
28 | #include <asm/nmi.h> | ||
28 | 29 | ||
29 | /* Update fake mce registers on current CPU. */ | 30 | /* Update fake mce registers on current CPU. */ |
30 | static void inject_mce(struct mce *m) | 31 | static void inject_mce(struct mce *m) |
@@ -83,7 +84,7 @@ static int mce_raise_notify(struct notifier_block *self, | |||
83 | struct die_args *args = (struct die_args *)data; | 84 | struct die_args *args = (struct die_args *)data; |
84 | int cpu = smp_processor_id(); | 85 | int cpu = smp_processor_id(); |
85 | struct mce *m = &__get_cpu_var(injectm); | 86 | struct mce *m = &__get_cpu_var(injectm); |
86 | if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) | 87 | if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) |
87 | return NOTIFY_DONE; | 88 | return NOTIFY_DONE; |
88 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
89 | if (m->inject_flags & MCJ_EXCEPTION) | 90 | if (m->inject_flags & MCJ_EXCEPTION) |
@@ -95,7 +96,7 @@ static int mce_raise_notify(struct notifier_block *self, | |||
95 | 96 | ||
96 | static struct notifier_block mce_raise_nb = { | 97 | static struct notifier_block mce_raise_nb = { |
97 | .notifier_call = mce_raise_notify, | 98 | .notifier_call = mce_raise_notify, |
98 | .priority = 1000, | 99 | .priority = NMI_LOCAL_NORMAL_PRIOR, |
99 | }; | 100 | }; |
100 | 101 | ||
101 | /* Inject mce on current CPU */ | 102 | /* Inject mce on current CPU */ |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 04921017abe0..9d977a2ea693 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1267,7 +1267,6 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1267 | 1267 | ||
1268 | switch (cmd) { | 1268 | switch (cmd) { |
1269 | case DIE_NMI: | 1269 | case DIE_NMI: |
1270 | case DIE_NMI_IPI: | ||
1271 | break; | 1270 | break; |
1272 | case DIE_NMIUNKNOWN: | 1271 | case DIE_NMIUNKNOWN: |
1273 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1272 | this_nmi = percpu_read(irq_stat.__nmi_count); |
@@ -1317,7 +1316,7 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1317 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { | 1316 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
1318 | .notifier_call = perf_event_nmi_handler, | 1317 | .notifier_call = perf_event_nmi_handler, |
1319 | .next = NULL, | 1318 | .next = NULL, |
1320 | .priority = 1 | 1319 | .priority = NMI_LOCAL_LOW_PRIOR, |
1321 | }; | 1320 | }; |
1322 | 1321 | ||
1323 | static struct event_constraint unconstrained; | 1322 | static struct event_constraint unconstrained; |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 81400b93e694..e56b9bfbabd1 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -753,19 +753,21 @@ out: | |||
753 | 753 | ||
754 | static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | 754 | static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) |
755 | { | 755 | { |
756 | int overflow = 0; | 756 | u64 v; |
757 | u32 low, high; | ||
758 | 757 | ||
759 | rdmsr(hwc->config_base + hwc->idx, low, high); | 758 | /* an official way for overflow indication */ |
760 | 759 | rdmsrl(hwc->config_base + hwc->idx, v); | |
761 | /* we need to check high bit for unflagged overflows */ | 760 | if (v & P4_CCCR_OVF) { |
762 | if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) { | 761 | wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF); |
763 | overflow = 1; | 762 | return 1; |
764 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | ||
765 | ((u64)low) & ~P4_CCCR_OVF); | ||
766 | } | 763 | } |
767 | 764 | ||
768 | return overflow; | 765 | /* it might be unflagged overflow */ |
766 | rdmsrl(hwc->event_base + hwc->idx, v); | ||
767 | if (!(v & ARCH_P4_CNTRVAL_MASK)) | ||
768 | return 1; | ||
769 | |||
770 | return 0; | ||
769 | } | 771 | } |
770 | 772 | ||
771 | static void p4_pmu_disable_pebs(void) | 773 | static void p4_pmu_disable_pebs(void) |
@@ -1152,9 +1154,9 @@ static __initconst const struct x86_pmu p4_pmu = { | |||
1152 | */ | 1154 | */ |
1153 | .num_counters = ARCH_P4_MAX_CCCR, | 1155 | .num_counters = ARCH_P4_MAX_CCCR, |
1154 | .apic = 1, | 1156 | .apic = 1, |
1155 | .cntval_bits = 40, | 1157 | .cntval_bits = ARCH_P4_CNTRVAL_BITS, |
1156 | .cntval_mask = (1ULL << 40) - 1, | 1158 | .cntval_mask = ARCH_P4_CNTRVAL_MASK, |
1157 | .max_period = (1ULL << 39) - 1, | 1159 | .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1, |
1158 | .hw_config = p4_hw_config, | 1160 | .hw_config = p4_hw_config, |
1159 | .schedule_events = p4_pmu_schedule_events, | 1161 | .schedule_events = p4_pmu_schedule_events, |
1160 | /* | 1162 | /* |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 8474c998cbd4..d6fb146c0d8b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -197,14 +197,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
197 | */ | 197 | */ |
198 | void dump_stack(void) | 198 | void dump_stack(void) |
199 | { | 199 | { |
200 | unsigned long bp = 0; | ||
201 | unsigned long stack; | 200 | unsigned long stack; |
202 | 201 | ||
203 | #ifdef CONFIG_FRAME_POINTER | ||
204 | if (!bp) | ||
205 | get_bp(bp); | ||
206 | #endif | ||
207 | |||
208 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 202 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
209 | current->pid, current->comm, print_tainted(), | 203 | current->pid, current->comm, print_tainted(), |
210 | init_utsname()->release, | 204 | init_utsname()->release, |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e3ba417e8697..d3b895f375d3 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -299,17 +299,21 @@ ENDPROC(native_usergs_sysret64) | |||
299 | ENTRY(save_args) | 299 | ENTRY(save_args) |
300 | XCPT_FRAME | 300 | XCPT_FRAME |
301 | cld | 301 | cld |
302 | movq_cfi rdi, RDI+16-ARGOFFSET | 302 | /* |
303 | movq_cfi rsi, RSI+16-ARGOFFSET | 303 | * start from rbp in pt_regs and jump over |
304 | movq_cfi rdx, RDX+16-ARGOFFSET | 304 | * return address. |
305 | movq_cfi rcx, RCX+16-ARGOFFSET | 305 | */ |
306 | movq_cfi rax, RAX+16-ARGOFFSET | 306 | movq_cfi rdi, RDI+8-RBP |
307 | movq_cfi r8, R8+16-ARGOFFSET | 307 | movq_cfi rsi, RSI+8-RBP |
308 | movq_cfi r9, R9+16-ARGOFFSET | 308 | movq_cfi rdx, RDX+8-RBP |
309 | movq_cfi r10, R10+16-ARGOFFSET | 309 | movq_cfi rcx, RCX+8-RBP |
310 | movq_cfi r11, R11+16-ARGOFFSET | 310 | movq_cfi rax, RAX+8-RBP |
311 | 311 | movq_cfi r8, R8+8-RBP | |
312 | leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ | 312 | movq_cfi r9, R9+8-RBP |
313 | movq_cfi r10, R10+8-RBP | ||
314 | movq_cfi r11, R11+8-RBP | ||
315 | |||
316 | leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ | ||
313 | movq_cfi rbp, 8 /* push %rbp */ | 317 | movq_cfi rbp, 8 /* push %rbp */ |
314 | leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ | 318 | leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ |
315 | testl $3, CS(%rdi) | 319 | testl $3, CS(%rdi) |
@@ -782,8 +786,9 @@ END(interrupt) | |||
782 | 786 | ||
783 | /* 0(%rsp): ~(interrupt number) */ | 787 | /* 0(%rsp): ~(interrupt number) */ |
784 | .macro interrupt func | 788 | .macro interrupt func |
785 | subq $ORIG_RAX-ARGOFFSET+8, %rsp | 789 | /* reserve pt_regs for scratch regs and rbp */ |
786 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8 | 790 | subq $ORIG_RAX-RBP, %rsp |
791 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP | ||
787 | call save_args | 792 | call save_args |
788 | PARTIAL_FRAME 0 | 793 | PARTIAL_FRAME 0 |
789 | call \func | 794 | call \func |
@@ -808,9 +813,14 @@ ret_from_intr: | |||
808 | TRACE_IRQS_OFF | 813 | TRACE_IRQS_OFF |
809 | decl PER_CPU_VAR(irq_count) | 814 | decl PER_CPU_VAR(irq_count) |
810 | leaveq | 815 | leaveq |
816 | |||
811 | CFI_RESTORE rbp | 817 | CFI_RESTORE rbp |
812 | CFI_DEF_CFA_REGISTER rsp | 818 | CFI_DEF_CFA_REGISTER rsp |
813 | CFI_ADJUST_CFA_OFFSET -8 | 819 | CFI_ADJUST_CFA_OFFSET -8 |
820 | |||
821 | /* we did not save rbx, restore only from ARGOFFSET */ | ||
822 | addq $8, %rsp | ||
823 | CFI_ADJUST_CFA_OFFSET -8 | ||
814 | exit_intr: | 824 | exit_intr: |
815 | GET_THREAD_INFO(%rcx) | 825 | GET_THREAD_INFO(%rcx) |
816 | testl $3,CS-ARGOFFSET(%rsp) | 826 | testl $3,CS-ARGOFFSET(%rsp) |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index cd21b654dec6..a4130005028a 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/apicdef.h> | 48 | #include <asm/apicdef.h> |
49 | #include <asm/system.h> | 49 | #include <asm/system.h> |
50 | #include <asm/apic.h> | 50 | #include <asm/apic.h> |
51 | #include <asm/nmi.h> | ||
51 | 52 | ||
52 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = | 53 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = |
53 | { | 54 | { |
@@ -525,10 +526,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
525 | } | 526 | } |
526 | return NOTIFY_DONE; | 527 | return NOTIFY_DONE; |
527 | 528 | ||
528 | case DIE_NMI_IPI: | ||
529 | /* Just ignore, we will handle the roundup on DIE_NMI. */ | ||
530 | return NOTIFY_DONE; | ||
531 | |||
532 | case DIE_NMIUNKNOWN: | 529 | case DIE_NMIUNKNOWN: |
533 | if (was_in_debug_nmi[raw_smp_processor_id()]) { | 530 | if (was_in_debug_nmi[raw_smp_processor_id()]) { |
534 | was_in_debug_nmi[raw_smp_processor_id()] = 0; | 531 | was_in_debug_nmi[raw_smp_processor_id()] = 0; |
@@ -606,7 +603,7 @@ static struct notifier_block kgdb_notifier = { | |||
606 | /* | 603 | /* |
607 | * Lowest-prio notifier priority, we want to be notified last: | 604 | * Lowest-prio notifier priority, we want to be notified last: |
608 | */ | 605 | */ |
609 | .priority = -INT_MAX, | 606 | .priority = NMI_LOCAL_LOW_PRIOR, |
610 | }; | 607 | }; |
611 | 608 | ||
612 | /** | 609 | /** |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index c495aa8d4815..fc7aae1e2bc7 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/pci_x86.h> | 18 | #include <asm/pci_x86.h> |
19 | #include <asm/virtext.h> | 19 | #include <asm/virtext.h> |
20 | #include <asm/cpu.h> | 20 | #include <asm/cpu.h> |
21 | #include <asm/nmi.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_X86_32 | 23 | #ifdef CONFIG_X86_32 |
23 | # include <linux/ctype.h> | 24 | # include <linux/ctype.h> |
@@ -747,7 +748,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
747 | { | 748 | { |
748 | int cpu; | 749 | int cpu; |
749 | 750 | ||
750 | if (val != DIE_NMI_IPI) | 751 | if (val != DIE_NMI) |
751 | return NOTIFY_OK; | 752 | return NOTIFY_OK; |
752 | 753 | ||
753 | cpu = raw_smp_processor_id(); | 754 | cpu = raw_smp_processor_id(); |
@@ -778,6 +779,8 @@ static void smp_send_nmi_allbutself(void) | |||
778 | 779 | ||
779 | static struct notifier_block crash_nmi_nb = { | 780 | static struct notifier_block crash_nmi_nb = { |
780 | .notifier_call = crash_nmi_callback, | 781 | .notifier_call = crash_nmi_callback, |
782 | /* we want to be the first one called */ | ||
783 | .priority = NMI_LOCAL_HIGH_PRIOR+1, | ||
781 | }; | 784 | }; |
782 | 785 | ||
783 | /* Halt all other CPUs, calling the specified function on each of them | 786 | /* Halt all other CPUs, calling the specified function on each of them |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c7149c96d079..763df77343dd 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -97,12 +97,12 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |||
97 | */ | 97 | */ |
98 | static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); | 98 | static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); |
99 | 99 | ||
100 | void cpu_hotplug_driver_lock() | 100 | void cpu_hotplug_driver_lock(void) |
101 | { | 101 | { |
102 | mutex_lock(&x86_cpu_hotplug_driver_mutex); | 102 | mutex_lock(&x86_cpu_hotplug_driver_mutex); |
103 | } | 103 | } |
104 | 104 | ||
105 | void cpu_hotplug_driver_unlock() | 105 | void cpu_hotplug_driver_unlock(void) |
106 | { | 106 | { |
107 | mutex_unlock(&x86_cpu_hotplug_driver_mutex); | 107 | mutex_unlock(&x86_cpu_hotplug_driver_mutex); |
108 | } | 108 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c76aaca5694d..b9b67166f9de 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -84,6 +84,11 @@ EXPORT_SYMBOL_GPL(used_vectors); | |||
84 | static int ignore_nmis; | 84 | static int ignore_nmis; |
85 | 85 | ||
86 | int unknown_nmi_panic; | 86 | int unknown_nmi_panic; |
87 | /* | ||
88 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | ||
89 | * only be used in NMI handler. | ||
90 | */ | ||
91 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | ||
87 | 92 | ||
88 | static inline void conditional_sti(struct pt_regs *regs) | 93 | static inline void conditional_sti(struct pt_regs *regs) |
89 | { | 94 | { |
@@ -310,15 +315,15 @@ static int __init setup_unknown_nmi_panic(char *str) | |||
310 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | 315 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); |
311 | 316 | ||
312 | static notrace __kprobes void | 317 | static notrace __kprobes void |
313 | mem_parity_error(unsigned char reason, struct pt_regs *regs) | 318 | pci_serr_error(unsigned char reason, struct pt_regs *regs) |
314 | { | 319 | { |
315 | printk(KERN_EMERG | 320 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", |
316 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | 321 | reason, smp_processor_id()); |
317 | reason, smp_processor_id()); | ||
318 | |||
319 | printk(KERN_EMERG | ||
320 | "You have some hardware problem, likely on the PCI bus.\n"); | ||
321 | 322 | ||
323 | /* | ||
324 | * On some machines, PCI SERR line is used to report memory | ||
325 | * errors. EDAC makes use of it. | ||
326 | */ | ||
322 | #if defined(CONFIG_EDAC) | 327 | #if defined(CONFIG_EDAC) |
323 | if (edac_handler_set()) { | 328 | if (edac_handler_set()) { |
324 | edac_atomic_assert_error(); | 329 | edac_atomic_assert_error(); |
@@ -329,11 +334,11 @@ mem_parity_error(unsigned char reason, struct pt_regs *regs) | |||
329 | if (panic_on_unrecovered_nmi) | 334 | if (panic_on_unrecovered_nmi) |
330 | panic("NMI: Not continuing"); | 335 | panic("NMI: Not continuing"); |
331 | 336 | ||
332 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | 337 | pr_emerg("Dazed and confused, but trying to continue\n"); |
333 | 338 | ||
334 | /* Clear and disable the memory parity error line. */ | 339 | /* Clear and disable the PCI SERR error line. */ |
335 | reason = (reason & 0xf) | 4; | 340 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; |
336 | outb(reason, 0x61); | 341 | outb(reason, NMI_REASON_PORT); |
337 | } | 342 | } |
338 | 343 | ||
339 | static notrace __kprobes void | 344 | static notrace __kprobes void |
@@ -341,15 +346,17 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
341 | { | 346 | { |
342 | unsigned long i; | 347 | unsigned long i; |
343 | 348 | ||
344 | printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); | 349 | pr_emerg( |
350 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | ||
351 | reason, smp_processor_id()); | ||
345 | show_registers(regs); | 352 | show_registers(regs); |
346 | 353 | ||
347 | if (panic_on_io_nmi) | 354 | if (panic_on_io_nmi) |
348 | panic("NMI IOCK error: Not continuing"); | 355 | panic("NMI IOCK error: Not continuing"); |
349 | 356 | ||
350 | /* Re-enable the IOCK line, wait for a few seconds */ | 357 | /* Re-enable the IOCK line, wait for a few seconds */ |
351 | reason = (reason & 0xf) | 8; | 358 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; |
352 | outb(reason, 0x61); | 359 | outb(reason, NMI_REASON_PORT); |
353 | 360 | ||
354 | i = 20000; | 361 | i = 20000; |
355 | while (--i) { | 362 | while (--i) { |
@@ -357,8 +364,8 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
357 | udelay(100); | 364 | udelay(100); |
358 | } | 365 | } |
359 | 366 | ||
360 | reason &= ~8; | 367 | reason &= ~NMI_REASON_CLEAR_IOCHK; |
361 | outb(reason, 0x61); | 368 | outb(reason, NMI_REASON_PORT); |
362 | } | 369 | } |
363 | 370 | ||
364 | static notrace __kprobes void | 371 | static notrace __kprobes void |
@@ -377,57 +384,50 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | |||
377 | return; | 384 | return; |
378 | } | 385 | } |
379 | #endif | 386 | #endif |
380 | printk(KERN_EMERG | 387 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", |
381 | "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | 388 | reason, smp_processor_id()); |
382 | reason, smp_processor_id()); | ||
383 | 389 | ||
384 | printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); | 390 | pr_emerg("Do you have a strange power saving mode enabled?\n"); |
385 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | 391 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) |
386 | panic("NMI: Not continuing"); | 392 | panic("NMI: Not continuing"); |
387 | 393 | ||
388 | printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); | 394 | pr_emerg("Dazed and confused, but trying to continue\n"); |
389 | } | 395 | } |
390 | 396 | ||
391 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | 397 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) |
392 | { | 398 | { |
393 | unsigned char reason = 0; | 399 | unsigned char reason = 0; |
394 | int cpu; | ||
395 | 400 | ||
396 | cpu = smp_processor_id(); | 401 | /* |
397 | 402 | * CPU-specific NMI must be processed before non-CPU-specific | |
398 | /* Only the BSP gets external NMIs from the system. */ | 403 | * NMI, otherwise we may lose it, because the CPU-specific |
399 | if (!cpu) | 404 | * NMI can not be detected/processed on other CPUs. |
400 | reason = get_nmi_reason(); | 405 | */ |
406 | if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
407 | return; | ||
401 | 408 | ||
402 | if (!(reason & 0xc0)) { | 409 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ |
403 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) | 410 | raw_spin_lock(&nmi_reason_lock); |
404 | == NOTIFY_STOP) | 411 | reason = get_nmi_reason(); |
405 | return; | ||
406 | 412 | ||
407 | #ifdef CONFIG_X86_LOCAL_APIC | 413 | if (reason & NMI_REASON_MASK) { |
408 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) | 414 | if (reason & NMI_REASON_SERR) |
409 | == NOTIFY_STOP) | 415 | pci_serr_error(reason, regs); |
410 | return; | 416 | else if (reason & NMI_REASON_IOCHK) |
417 | io_check_error(reason, regs); | ||
418 | #ifdef CONFIG_X86_32 | ||
419 | /* | ||
420 | * Reassert NMI in case it became active | ||
421 | * meanwhile as it's edge-triggered: | ||
422 | */ | ||
423 | reassert_nmi(); | ||
411 | #endif | 424 | #endif |
412 | unknown_nmi_error(reason, regs); | 425 | raw_spin_unlock(&nmi_reason_lock); |
413 | |||
414 | return; | 426 | return; |
415 | } | 427 | } |
416 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 428 | raw_spin_unlock(&nmi_reason_lock); |
417 | return; | ||
418 | 429 | ||
419 | /* AK: following checks seem to be broken on modern chipsets. FIXME */ | 430 | unknown_nmi_error(reason, regs); |
420 | if (reason & 0x80) | ||
421 | mem_parity_error(reason, regs); | ||
422 | if (reason & 0x40) | ||
423 | io_check_error(reason, regs); | ||
424 | #ifdef CONFIG_X86_32 | ||
425 | /* | ||
426 | * Reassert NMI in case it became active meanwhile | ||
427 | * as it's edge-triggered: | ||
428 | */ | ||
429 | reassert_nmi(); | ||
430 | #endif | ||
431 | } | 431 | } |
432 | 432 | ||
433 | dotraplinkage notrace __kprobes void | 433 | dotraplinkage notrace __kprobes void |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 03d2ea82f35a..823f79a17ad1 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -965,7 +965,7 @@ out: | |||
965 | 965 | ||
966 | static int __init init_tsc_clocksource(void) | 966 | static int __init init_tsc_clocksource(void) |
967 | { | 967 | { |
968 | if (!cpu_has_tsc || tsc_disabled > 0) | 968 | if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz) |
969 | return 0; | 969 | return 0; |
970 | 970 | ||
971 | if (tsc_clocksource_reliable) | 971 | if (tsc_clocksource_reliable) |
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index 08a0069b87a5..f21962c435ed 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/amd_nb.h> | 27 | #include <asm/amd_nb.h> |
28 | 28 | ||
29 | static struct bootnode __initdata nodes[8]; | 29 | static struct bootnode __initdata nodes[8]; |
30 | static unsigned char __initdata nodeids[8]; | ||
30 | static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; | 31 | static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; |
31 | 32 | ||
32 | static __init int find_northbridge(void) | 33 | static __init int find_northbridge(void) |
@@ -68,19 +69,6 @@ static __init void early_get_boot_cpu_id(void) | |||
68 | #endif | 69 | #endif |
69 | } | 70 | } |
70 | 71 | ||
71 | int __init amd_get_nodes(struct bootnode *physnodes) | ||
72 | { | ||
73 | int i; | ||
74 | int ret = 0; | ||
75 | |||
76 | for_each_node_mask(i, nodes_parsed) { | ||
77 | physnodes[ret].start = nodes[i].start; | ||
78 | physnodes[ret].end = nodes[i].end; | ||
79 | ret++; | ||
80 | } | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | 72 | int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) |
85 | { | 73 | { |
86 | unsigned long start = PFN_PHYS(start_pfn); | 74 | unsigned long start = PFN_PHYS(start_pfn); |
@@ -113,7 +101,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | |||
113 | base = read_pci_config(0, nb, 1, 0x40 + i*8); | 101 | base = read_pci_config(0, nb, 1, 0x40 + i*8); |
114 | limit = read_pci_config(0, nb, 1, 0x44 + i*8); | 102 | limit = read_pci_config(0, nb, 1, 0x44 + i*8); |
115 | 103 | ||
116 | nodeid = limit & 7; | 104 | nodeids[i] = nodeid = limit & 7; |
117 | if ((base & 3) == 0) { | 105 | if ((base & 3) == 0) { |
118 | if (i < numnodes) | 106 | if (i < numnodes) |
119 | pr_info("Skipping disabled node %d\n", i); | 107 | pr_info("Skipping disabled node %d\n", i); |
@@ -193,6 +181,76 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) | |||
193 | return 0; | 181 | return 0; |
194 | } | 182 | } |
195 | 183 | ||
184 | #ifdef CONFIG_NUMA_EMU | ||
185 | static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = { | ||
186 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
187 | }; | ||
188 | |||
189 | void __init amd_get_nodes(struct bootnode *physnodes) | ||
190 | { | ||
191 | int i; | ||
192 | |||
193 | for_each_node_mask(i, nodes_parsed) { | ||
194 | physnodes[i].start = nodes[i].start; | ||
195 | physnodes[i].end = nodes[i].end; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | static int __init find_node_by_addr(unsigned long addr) | ||
200 | { | ||
201 | int ret = NUMA_NO_NODE; | ||
202 | int i; | ||
203 | |||
204 | for (i = 0; i < 8; i++) | ||
205 | if (addr >= nodes[i].start && addr < nodes[i].end) { | ||
206 | ret = i; | ||
207 | break; | ||
208 | } | ||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be | ||
214 | * setup to represent the physical topology but reflect the emulated | ||
215 | * environment. For each emulated node, the real node which it appears on is | ||
216 | * found and a fake pxm to nid mapping is created which mirrors the actual | ||
217 | * locality. node_distance() then represents the correct distances between | ||
218 | * emulated nodes by using the fake acpi mappings to pxms. | ||
219 | */ | ||
220 | void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) | ||
221 | { | ||
222 | unsigned int bits; | ||
223 | unsigned int cores; | ||
224 | unsigned int apicid_base = 0; | ||
225 | int i; | ||
226 | |||
227 | bits = boot_cpu_data.x86_coreid_bits; | ||
228 | cores = 1 << bits; | ||
229 | early_get_boot_cpu_id(); | ||
230 | if (boot_cpu_physical_apicid > 0) | ||
231 | apicid_base = boot_cpu_physical_apicid; | ||
232 | |||
233 | for (i = 0; i < nr_nodes; i++) { | ||
234 | int index; | ||
235 | int nid; | ||
236 | int j; | ||
237 | |||
238 | nid = find_node_by_addr(nodes[i].start); | ||
239 | if (nid == NUMA_NO_NODE) | ||
240 | continue; | ||
241 | |||
242 | index = nodeids[nid] << bits; | ||
243 | if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE) | ||
244 | for (j = apicid_base; j < cores + apicid_base; j++) | ||
245 | fake_apicid_to_node[index + j] = i; | ||
246 | #ifdef CONFIG_ACPI_NUMA | ||
247 | __acpi_map_pxm_to_node(nid, i); | ||
248 | #endif | ||
249 | } | ||
250 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); | ||
251 | } | ||
252 | #endif /* CONFIG_NUMA_EMU */ | ||
253 | |||
196 | int __init amd_scan_nodes(void) | 254 | int __init amd_scan_nodes(void) |
197 | { | 255 | { |
198 | unsigned int bits; | 256 | unsigned int bits; |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 7762a517d69d..1e72102e80c9 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -260,30 +260,30 @@ void __init numa_init_array(void) | |||
260 | #ifdef CONFIG_NUMA_EMU | 260 | #ifdef CONFIG_NUMA_EMU |
261 | /* Numa emulation */ | 261 | /* Numa emulation */ |
262 | static struct bootnode nodes[MAX_NUMNODES] __initdata; | 262 | static struct bootnode nodes[MAX_NUMNODES] __initdata; |
263 | static struct bootnode physnodes[MAX_NUMNODES] __initdata; | 263 | static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata; |
264 | static char *cmdline __initdata; | 264 | static char *cmdline __initdata; |
265 | 265 | ||
266 | static int __init setup_physnodes(unsigned long start, unsigned long end, | 266 | static int __init setup_physnodes(unsigned long start, unsigned long end, |
267 | int acpi, int amd) | 267 | int acpi, int amd) |
268 | { | 268 | { |
269 | int nr_nodes = 0; | ||
270 | int ret = 0; | 269 | int ret = 0; |
271 | int i; | 270 | int i; |
272 | 271 | ||
272 | memset(physnodes, 0, sizeof(physnodes)); | ||
273 | #ifdef CONFIG_ACPI_NUMA | 273 | #ifdef CONFIG_ACPI_NUMA |
274 | if (acpi) | 274 | if (acpi) |
275 | nr_nodes = acpi_get_nodes(physnodes); | 275 | acpi_get_nodes(physnodes, start, end); |
276 | #endif | 276 | #endif |
277 | #ifdef CONFIG_AMD_NUMA | 277 | #ifdef CONFIG_AMD_NUMA |
278 | if (amd) | 278 | if (amd) |
279 | nr_nodes = amd_get_nodes(physnodes); | 279 | amd_get_nodes(physnodes); |
280 | #endif | 280 | #endif |
281 | /* | 281 | /* |
282 | * Basic sanity checking on the physical node map: there may be errors | 282 | * Basic sanity checking on the physical node map: there may be errors |
283 | * if the SRAT or AMD code incorrectly reported the topology or the mem= | 283 | * if the SRAT or AMD code incorrectly reported the topology or the mem= |
284 | * kernel parameter is used. | 284 | * kernel parameter is used. |
285 | */ | 285 | */ |
286 | for (i = 0; i < nr_nodes; i++) { | 286 | for (i = 0; i < MAX_NUMNODES; i++) { |
287 | if (physnodes[i].start == physnodes[i].end) | 287 | if (physnodes[i].start == physnodes[i].end) |
288 | continue; | 288 | continue; |
289 | if (physnodes[i].start > end) { | 289 | if (physnodes[i].start > end) { |
@@ -298,17 +298,6 @@ static int __init setup_physnodes(unsigned long start, unsigned long end, | |||
298 | physnodes[i].start = start; | 298 | physnodes[i].start = start; |
299 | if (physnodes[i].end > end) | 299 | if (physnodes[i].end > end) |
300 | physnodes[i].end = end; | 300 | physnodes[i].end = end; |
301 | } | ||
302 | |||
303 | /* | ||
304 | * Remove all nodes that have no memory or were truncated because of the | ||
305 | * limited address range. | ||
306 | */ | ||
307 | for (i = 0; i < nr_nodes; i++) { | ||
308 | if (physnodes[i].start == physnodes[i].end) | ||
309 | continue; | ||
310 | physnodes[ret].start = physnodes[i].start; | ||
311 | physnodes[ret].end = physnodes[i].end; | ||
312 | ret++; | 301 | ret++; |
313 | } | 302 | } |
314 | 303 | ||
@@ -324,6 +313,24 @@ static int __init setup_physnodes(unsigned long start, unsigned long end, | |||
324 | return ret; | 313 | return ret; |
325 | } | 314 | } |
326 | 315 | ||
316 | static void __init fake_physnodes(int acpi, int amd, int nr_nodes) | ||
317 | { | ||
318 | int i; | ||
319 | |||
320 | BUG_ON(acpi && amd); | ||
321 | #ifdef CONFIG_ACPI_NUMA | ||
322 | if (acpi) | ||
323 | acpi_fake_nodes(nodes, nr_nodes); | ||
324 | #endif | ||
325 | #ifdef CONFIG_AMD_NUMA | ||
326 | if (amd) | ||
327 | amd_fake_nodes(nodes, nr_nodes); | ||
328 | #endif | ||
329 | if (!acpi && !amd) | ||
330 | for (i = 0; i < nr_cpu_ids; i++) | ||
331 | numa_set_node(i, 0); | ||
332 | } | ||
333 | |||
327 | /* | 334 | /* |
328 | * Setups up nid to range from addr to addr + size. If the end | 335 | * Setups up nid to range from addr to addr + size. If the end |
329 | * boundary is greater than max_addr, then max_addr is used instead. | 336 | * boundary is greater than max_addr, then max_addr is used instead. |
@@ -352,8 +359,7 @@ static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) | |||
352 | * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr | 359 | * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr |
353 | * to max_addr. The return value is the number of nodes allocated. | 360 | * to max_addr. The return value is the number of nodes allocated. |
354 | */ | 361 | */ |
355 | static int __init split_nodes_interleave(u64 addr, u64 max_addr, | 362 | static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes) |
356 | int nr_phys_nodes, int nr_nodes) | ||
357 | { | 363 | { |
358 | nodemask_t physnode_mask = NODE_MASK_NONE; | 364 | nodemask_t physnode_mask = NODE_MASK_NONE; |
359 | u64 size; | 365 | u64 size; |
@@ -384,7 +390,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, | |||
384 | return -1; | 390 | return -1; |
385 | } | 391 | } |
386 | 392 | ||
387 | for (i = 0; i < nr_phys_nodes; i++) | 393 | for (i = 0; i < MAX_NUMNODES; i++) |
388 | if (physnodes[i].start != physnodes[i].end) | 394 | if (physnodes[i].start != physnodes[i].end) |
389 | node_set(i, physnode_mask); | 395 | node_set(i, physnode_mask); |
390 | 396 | ||
@@ -553,11 +559,9 @@ static int __init numa_emulation(unsigned long start_pfn, | |||
553 | { | 559 | { |
554 | u64 addr = start_pfn << PAGE_SHIFT; | 560 | u64 addr = start_pfn << PAGE_SHIFT; |
555 | u64 max_addr = last_pfn << PAGE_SHIFT; | 561 | u64 max_addr = last_pfn << PAGE_SHIFT; |
556 | int num_phys_nodes; | ||
557 | int num_nodes; | 562 | int num_nodes; |
558 | int i; | 563 | int i; |
559 | 564 | ||
560 | num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd); | ||
561 | /* | 565 | /* |
562 | * If the numa=fake command-line contains a 'M' or 'G', it represents | 566 | * If the numa=fake command-line contains a 'M' or 'G', it represents |
563 | * the fixed node size. Otherwise, if it is just a single number N, | 567 | * the fixed node size. Otherwise, if it is just a single number N, |
@@ -572,7 +576,7 @@ static int __init numa_emulation(unsigned long start_pfn, | |||
572 | unsigned long n; | 576 | unsigned long n; |
573 | 577 | ||
574 | n = simple_strtoul(cmdline, NULL, 0); | 578 | n = simple_strtoul(cmdline, NULL, 0); |
575 | num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n); | 579 | num_nodes = split_nodes_interleave(addr, max_addr, n); |
576 | } | 580 | } |
577 | 581 | ||
578 | if (num_nodes < 0) | 582 | if (num_nodes < 0) |
@@ -595,7 +599,8 @@ static int __init numa_emulation(unsigned long start_pfn, | |||
595 | nodes[i].end >> PAGE_SHIFT); | 599 | nodes[i].end >> PAGE_SHIFT); |
596 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 600 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
597 | } | 601 | } |
598 | acpi_fake_nodes(nodes, num_nodes); | 602 | setup_physnodes(addr, max_addr, acpi, amd); |
603 | fake_physnodes(acpi, amd, num_nodes); | ||
599 | numa_init_array(); | 604 | numa_init_array(); |
600 | return 0; | 605 | return 0; |
601 | } | 606 | } |
@@ -610,8 +615,12 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, | |||
610 | nodes_clear(node_online_map); | 615 | nodes_clear(node_online_map); |
611 | 616 | ||
612 | #ifdef CONFIG_NUMA_EMU | 617 | #ifdef CONFIG_NUMA_EMU |
618 | setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, | ||
619 | acpi, amd); | ||
613 | if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) | 620 | if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) |
614 | return; | 621 | return; |
622 | setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT, | ||
623 | acpi, amd); | ||
615 | nodes_clear(node_possible_map); | 624 | nodes_clear(node_possible_map); |
616 | nodes_clear(node_online_map); | 625 | nodes_clear(node_online_map); |
617 | #endif | 626 | #endif |
@@ -767,6 +776,7 @@ void __cpuinit numa_clear_node(int cpu) | |||
767 | 776 | ||
768 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | 777 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
769 | 778 | ||
779 | #ifndef CONFIG_NUMA_EMU | ||
770 | void __cpuinit numa_add_cpu(int cpu) | 780 | void __cpuinit numa_add_cpu(int cpu) |
771 | { | 781 | { |
772 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 782 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
@@ -776,34 +786,115 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
776 | { | 786 | { |
777 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 787 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
778 | } | 788 | } |
789 | #else | ||
790 | void __cpuinit numa_add_cpu(int cpu) | ||
791 | { | ||
792 | unsigned long addr; | ||
793 | u16 apicid; | ||
794 | int physnid; | ||
795 | int nid = NUMA_NO_NODE; | ||
796 | |||
797 | apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | ||
798 | if (apicid != BAD_APICID) | ||
799 | nid = apicid_to_node[apicid]; | ||
800 | if (nid == NUMA_NO_NODE) | ||
801 | nid = early_cpu_to_node(cpu); | ||
802 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | ||
803 | |||
804 | /* | ||
805 | * Use the starting address of the emulated node to find which physical | ||
806 | * node it is allocated on. | ||
807 | */ | ||
808 | addr = node_start_pfn(nid) << PAGE_SHIFT; | ||
809 | for (physnid = 0; physnid < MAX_NUMNODES; physnid++) | ||
810 | if (addr >= physnodes[physnid].start && | ||
811 | addr < physnodes[physnid].end) | ||
812 | break; | ||
813 | |||
814 | /* | ||
815 | * Map the cpu to each emulated node that is allocated on the physical | ||
816 | * node of the cpu's apic id. | ||
817 | */ | ||
818 | for_each_online_node(nid) { | ||
819 | addr = node_start_pfn(nid) << PAGE_SHIFT; | ||
820 | if (addr >= physnodes[physnid].start && | ||
821 | addr < physnodes[physnid].end) | ||
822 | cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); | ||
823 | } | ||
824 | } | ||
825 | |||
826 | void __cpuinit numa_remove_cpu(int cpu) | ||
827 | { | ||
828 | int i; | ||
829 | |||
830 | for_each_online_node(i) | ||
831 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | ||
832 | } | ||
833 | #endif /* !CONFIG_NUMA_EMU */ | ||
779 | 834 | ||
780 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | 835 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
836 | static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) | ||
837 | { | ||
838 | int node = early_cpu_to_node(cpu); | ||
839 | struct cpumask *mask; | ||
840 | char buf[64]; | ||
841 | |||
842 | mask = node_to_cpumask_map[node]; | ||
843 | if (!mask) { | ||
844 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | ||
845 | dump_stack(); | ||
846 | return NULL; | ||
847 | } | ||
848 | |||
849 | cpulist_scnprintf(buf, sizeof(buf), mask); | ||
850 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | ||
851 | enable ? "numa_add_cpu" : "numa_remove_cpu", | ||
852 | cpu, node, buf); | ||
853 | return mask; | ||
854 | } | ||
781 | 855 | ||
782 | /* | 856 | /* |
783 | * --------- debug versions of the numa functions --------- | 857 | * --------- debug versions of the numa functions --------- |
784 | */ | 858 | */ |
859 | #ifndef CONFIG_NUMA_EMU | ||
785 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | 860 | static void __cpuinit numa_set_cpumask(int cpu, int enable) |
786 | { | 861 | { |
787 | int node = early_cpu_to_node(cpu); | ||
788 | struct cpumask *mask; | 862 | struct cpumask *mask; |
789 | char buf[64]; | ||
790 | 863 | ||
791 | mask = node_to_cpumask_map[node]; | 864 | mask = debug_cpumask_set_cpu(cpu, enable); |
792 | if (mask == NULL) { | 865 | if (!mask) |
793 | printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node); | ||
794 | dump_stack(); | ||
795 | return; | 866 | return; |
796 | } | ||
797 | 867 | ||
798 | if (enable) | 868 | if (enable) |
799 | cpumask_set_cpu(cpu, mask); | 869 | cpumask_set_cpu(cpu, mask); |
800 | else | 870 | else |
801 | cpumask_clear_cpu(cpu, mask); | 871 | cpumask_clear_cpu(cpu, mask); |
872 | } | ||
873 | #else | ||
874 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
875 | { | ||
876 | int node = early_cpu_to_node(cpu); | ||
877 | struct cpumask *mask; | ||
878 | int i; | ||
802 | 879 | ||
803 | cpulist_scnprintf(buf, sizeof(buf), mask); | 880 | for_each_online_node(i) { |
804 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | 881 | unsigned long addr; |
805 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); | 882 | |
883 | addr = node_start_pfn(i) << PAGE_SHIFT; | ||
884 | if (addr < physnodes[node].start || | ||
885 | addr >= physnodes[node].end) | ||
886 | continue; | ||
887 | mask = debug_cpumask_set_cpu(cpu, enable); | ||
888 | if (!mask) | ||
889 | return; | ||
890 | |||
891 | if (enable) | ||
892 | cpumask_set_cpu(cpu, mask); | ||
893 | else | ||
894 | cpumask_clear_cpu(cpu, mask); | ||
895 | } | ||
806 | } | 896 | } |
897 | #endif /* CONFIG_NUMA_EMU */ | ||
807 | 898 | ||
808 | void __cpuinit numa_add_cpu(int cpu) | 899 | void __cpuinit numa_add_cpu(int cpu) |
809 | { | 900 | { |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 171a0aacb99a..603d285d1daa 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -349,18 +349,19 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) | |||
349 | 349 | ||
350 | void __init acpi_numa_arch_fixup(void) {} | 350 | void __init acpi_numa_arch_fixup(void) {} |
351 | 351 | ||
352 | int __init acpi_get_nodes(struct bootnode *physnodes) | 352 | #ifdef CONFIG_NUMA_EMU |
353 | void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, | ||
354 | unsigned long end) | ||
353 | { | 355 | { |
354 | int i; | 356 | int i; |
355 | int ret = 0; | ||
356 | 357 | ||
357 | for_each_node_mask(i, nodes_parsed) { | 358 | for_each_node_mask(i, nodes_parsed) { |
358 | physnodes[ret].start = nodes[i].start; | 359 | cutoff_node(i, start, end); |
359 | physnodes[ret].end = nodes[i].end; | 360 | physnodes[i].start = nodes[i].start; |
360 | ret++; | 361 | physnodes[i].end = nodes[i].end; |
361 | } | 362 | } |
362 | return ret; | ||
363 | } | 363 | } |
364 | #endif /* CONFIG_NUMA_EMU */ | ||
364 | 365 | ||
365 | /* Use the information discovered above to actually set up the nodes. */ | 366 | /* Use the information discovered above to actually set up the nodes. */ |
366 | int __init acpi_scan_nodes(unsigned long start, unsigned long end) | 367 | int __init acpi_scan_nodes(unsigned long start, unsigned long end) |
@@ -505,8 +506,6 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) | |||
505 | { | 506 | { |
506 | int i, j; | 507 | int i, j; |
507 | 508 | ||
508 | printk(KERN_INFO "Faking PXM affinity for fake nodes on real " | ||
509 | "topology.\n"); | ||
510 | for (i = 0; i < num_nodes; i++) { | 509 | for (i = 0; i < num_nodes; i++) { |
511 | int nid, pxm; | 510 | int nid, pxm; |
512 | 511 | ||
@@ -526,6 +525,17 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) | |||
526 | fake_apicid_to_node[j] == NUMA_NO_NODE) | 525 | fake_apicid_to_node[j] == NUMA_NO_NODE) |
527 | fake_apicid_to_node[j] = i; | 526 | fake_apicid_to_node[j] = i; |
528 | } | 527 | } |
528 | |||
529 | /* | ||
530 | * If there are apicid-to-node mappings for physical nodes that do not | ||
531 | * have a corresponding emulated node, it should default to a guaranteed | ||
532 | * value. | ||
533 | */ | ||
534 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
535 | if (apicid_to_node[i] != NUMA_NO_NODE && | ||
536 | fake_apicid_to_node[i] == NUMA_NO_NODE) | ||
537 | fake_apicid_to_node[i] = 0; | ||
538 | |||
529 | for (i = 0; i < num_nodes; i++) | 539 | for (i = 0; i < num_nodes; i++) |
530 | __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); | 540 | __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); |
531 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); | 541 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index f24a8533bcdf..e2b7b0c06cdf 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -65,7 +65,6 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
65 | 65 | ||
66 | switch (val) { | 66 | switch (val) { |
67 | case DIE_NMI: | 67 | case DIE_NMI: |
68 | case DIE_NMI_IPI: | ||
69 | if (ctr_running) | 68 | if (ctr_running) |
70 | model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); | 69 | model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); |
71 | else if (!nmi_enabled) | 70 | else if (!nmi_enabled) |
@@ -361,7 +360,7 @@ static void nmi_cpu_setup(void *dummy) | |||
361 | static struct notifier_block profile_exceptions_nb = { | 360 | static struct notifier_block profile_exceptions_nb = { |
362 | .notifier_call = profile_exceptions_notify, | 361 | .notifier_call = profile_exceptions_notify, |
363 | .next = NULL, | 362 | .next = NULL, |
364 | .priority = 2 | 363 | .priority = NMI_LOCAL_LOW_PRIOR, |
365 | }; | 364 | }; |
366 | 365 | ||
367 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 366 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c index 0636dd93cef8..720bf5a53c51 100644 --- a/arch/x86/oprofile/nmi_timer_int.c +++ b/arch/x86/oprofile/nmi_timer_int.c | |||
@@ -38,7 +38,7 @@ static int profile_timer_exceptions_notify(struct notifier_block *self, | |||
38 | static struct notifier_block profile_timer_exceptions_nb = { | 38 | static struct notifier_block profile_timer_exceptions_nb = { |
39 | .notifier_call = profile_timer_exceptions_notify, | 39 | .notifier_call = profile_timer_exceptions_notify, |
40 | .next = NULL, | 40 | .next = NULL, |
41 | .priority = 0 | 41 | .priority = NMI_LOW_PRIOR, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static int timer_start(void) | 44 | static int timer_start(void) |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index fc1e8fe07e5c..e27dffbbb1a7 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
5 | #include <linux/range.h> | 5 | #include <linux/range.h> |
6 | 6 | ||
7 | #include <asm/amd_nb.h> | ||
7 | #include <asm/pci_x86.h> | 8 | #include <asm/pci_x86.h> |
8 | 9 | ||
9 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
@@ -378,6 +379,34 @@ static struct notifier_block __cpuinitdata amd_cpu_notifier = { | |||
378 | .notifier_call = amd_cpu_notify, | 379 | .notifier_call = amd_cpu_notify, |
379 | }; | 380 | }; |
380 | 381 | ||
382 | static void __init pci_enable_pci_io_ecs(void) | ||
383 | { | ||
384 | #ifdef CONFIG_AMD_NB | ||
385 | unsigned int i, n; | ||
386 | |||
387 | for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) { | ||
388 | u8 bus = amd_nb_bus_dev_ranges[i].bus; | ||
389 | u8 slot = amd_nb_bus_dev_ranges[i].dev_base; | ||
390 | u8 limit = amd_nb_bus_dev_ranges[i].dev_limit; | ||
391 | |||
392 | for (; slot < limit; ++slot) { | ||
393 | u32 val = read_pci_config(bus, slot, 3, 0); | ||
394 | |||
395 | if (!early_is_amd_nb(val)) | ||
396 | continue; | ||
397 | |||
398 | val = read_pci_config(bus, slot, 3, 0x8c); | ||
399 | if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) { | ||
400 | val |= ENABLE_CF8_EXT_CFG >> 32; | ||
401 | write_pci_config(bus, slot, 3, 0x8c, val); | ||
402 | } | ||
403 | ++n; | ||
404 | } | ||
405 | } | ||
406 | pr_info("Extended Config Space enabled on %u nodes\n", n); | ||
407 | #endif | ||
408 | } | ||
409 | |||
381 | static int __init pci_io_ecs_init(void) | 410 | static int __init pci_io_ecs_init(void) |
382 | { | 411 | { |
383 | int cpu; | 412 | int cpu; |
@@ -386,6 +415,10 @@ static int __init pci_io_ecs_init(void) | |||
386 | if (boot_cpu_data.x86 < 0x10) | 415 | if (boot_cpu_data.x86 < 0x10) |
387 | return 0; | 416 | return 0; |
388 | 417 | ||
418 | /* Try the PCI method first. */ | ||
419 | if (early_pci_allowed()) | ||
420 | pci_enable_pci_io_ecs(); | ||
421 | |||
389 | register_cpu_notifier(&amd_cpu_notifier); | 422 | register_cpu_notifier(&amd_cpu_notifier); |
390 | for_each_online_cpu(cpu) | 423 | for_each_online_cpu(cpu) |
391 | amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, | 424 | amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, |