aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c22
-rw-r--r--arch/x86/kernel/acpi/processor.c101
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c10
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/apic/apic.c23
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/probe_64.c13
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c38
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c9
-rw-r--r--arch/x86/kernel/dumpstack.c14
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c14
-rw-r--r--arch/x86/kernel/process_64.c24
19 files changed, 113 insertions, 188 deletions
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index fd5ca97a2ad5..6f35260bb3ef 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_ACPI) += boot.o
4obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o 4obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o
5 5
6ifneq ($(CONFIG_ACPI_PROCESSOR),) 6ifneq ($(CONFIG_ACPI_PROCESSOR),)
7obj-y += cstate.o processor.o 7obj-y += cstate.o
8endif 8endif
9 9
10$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin 10$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index fb1035cd9a6a..036d28adf59d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1529,16 +1529,10 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
1529 * if acpi_blacklisted() acpi_disabled = 1; 1529 * if acpi_blacklisted() acpi_disabled = 1;
1530 * acpi_irq_model=... 1530 * acpi_irq_model=...
1531 * ... 1531 * ...
1532 *
1533 * return value: (currently ignored)
1534 * 0: success
1535 * !0: failure
1536 */ 1532 */
1537 1533
1538int __init acpi_boot_table_init(void) 1534void __init acpi_boot_table_init(void)
1539{ 1535{
1540 int error;
1541
1542 dmi_check_system(acpi_dmi_table); 1536 dmi_check_system(acpi_dmi_table);
1543 1537
1544 /* 1538 /*
@@ -1546,15 +1540,14 @@ int __init acpi_boot_table_init(void)
1546 * One exception: acpi=ht continues far enough to enumerate LAPICs 1540 * One exception: acpi=ht continues far enough to enumerate LAPICs
1547 */ 1541 */
1548 if (acpi_disabled && !acpi_ht) 1542 if (acpi_disabled && !acpi_ht)
1549 return 1; 1543 return;
1550 1544
1551 /* 1545 /*
1552 * Initialize the ACPI boot-time table parser. 1546 * Initialize the ACPI boot-time table parser.
1553 */ 1547 */
1554 error = acpi_table_init(); 1548 if (acpi_table_init()) {
1555 if (error) {
1556 disable_acpi(); 1549 disable_acpi();
1557 return error; 1550 return;
1558 } 1551 }
1559 1552
1560 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); 1553 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1562,18 +1555,15 @@ int __init acpi_boot_table_init(void)
1562 /* 1555 /*
1563 * blacklist may disable ACPI entirely 1556 * blacklist may disable ACPI entirely
1564 */ 1557 */
1565 error = acpi_blacklisted(); 1558 if (acpi_blacklisted()) {
1566 if (error) {
1567 if (acpi_force) { 1559 if (acpi_force) {
1568 printk(KERN_WARNING PREFIX "acpi=force override\n"); 1560 printk(KERN_WARNING PREFIX "acpi=force override\n");
1569 } else { 1561 } else {
1570 printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); 1562 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1571 disable_acpi(); 1563 disable_acpi();
1572 return error; 1564 return;
1573 } 1565 }
1574 } 1566 }
1575
1576 return 0;
1577} 1567}
1578 1568
1579int __init early_acpi_boot_init(void) 1569int __init early_acpi_boot_init(void)
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
deleted file mode 100644
index d85d1b2432ba..000000000000
--- a/arch/x86/kernel/acpi/processor.c
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * Copyright (C) 2005 Intel Corporation
3 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
4 * - Added _PDC for platforms with Intel CPUs
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/acpi.h>
11
12#include <acpi/processor.h>
13#include <asm/acpi.h>
14
15static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
16{
17 struct acpi_object_list *obj_list;
18 union acpi_object *obj;
19 u32 *buf;
20
21 /* allocate and initialize pdc. It will be used later. */
22 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
23 if (!obj_list) {
24 printk(KERN_ERR "Memory allocation error\n");
25 return;
26 }
27
28 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
29 if (!obj) {
30 printk(KERN_ERR "Memory allocation error\n");
31 kfree(obj_list);
32 return;
33 }
34
35 buf = kmalloc(12, GFP_KERNEL);
36 if (!buf) {
37 printk(KERN_ERR "Memory allocation error\n");
38 kfree(obj);
39 kfree(obj_list);
40 return;
41 }
42
43 buf[0] = ACPI_PDC_REVISION_ID;
44 buf[1] = 1;
45 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
46
47 /*
48 * The default of PDC_SMP_T_SWCOORD bit is set for intel x86 cpu so
49 * that OSPM is capable of native ACPI throttling software
50 * coordination using BIOS supplied _TSD info.
51 */
52 buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
53 if (cpu_has(c, X86_FEATURE_EST))
54 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
55
56 if (cpu_has(c, X86_FEATURE_ACPI))
57 buf[2] |= ACPI_PDC_T_FFH;
58
59 /*
60 * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
61 */
62 if (!cpu_has(c, X86_FEATURE_MWAIT))
63 buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
64
65 obj->type = ACPI_TYPE_BUFFER;
66 obj->buffer.length = 12;
67 obj->buffer.pointer = (u8 *) buf;
68 obj_list->count = 1;
69 obj_list->pointer = obj;
70 pr->pdc = obj_list;
71
72 return;
73}
74
75
76/* Initialize _PDC data based on the CPU vendor */
77void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
78{
79 struct cpuinfo_x86 *c = &cpu_data(pr->id);
80
81 pr->pdc = NULL;
82 if (c->x86_vendor == X86_VENDOR_INTEL ||
83 c->x86_vendor == X86_VENDOR_CENTAUR)
84 init_intel_pdc(pr, c);
85
86 return;
87}
88
89EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
90
91void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
92{
93 if (pr->pdc) {
94 kfree(pr->pdc->pointer->buffer.pointer);
95 kfree(pr->pdc->pointer);
96 kfree(pr->pdc);
97 pr->pdc = NULL;
98 }
99}
100
101EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 82e508677b91..f9961034e557 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -162,6 +162,8 @@ static int __init acpi_sleep_setup(char *str)
162#endif 162#endif
163 if (strncmp(str, "old_ordering", 12) == 0) 163 if (strncmp(str, "old_ordering", 12) == 0)
164 acpi_old_suspend_ordering(); 164 acpi_old_suspend_ordering();
165 if (strncmp(str, "sci_force_enable", 16) == 0)
166 acpi_set_sci_en_on_resume();
165 str = strchr(str, ','); 167 str = strchr(str, ',');
166 if (str != NULL) 168 if (str != NULL)
167 str += strspn(str, ", \t"); 169 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 1dca9c34eaeb..fb490ce7dd55 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -138,6 +138,11 @@ int amd_iommus_present;
138bool amd_iommu_np_cache __read_mostly; 138bool amd_iommu_np_cache __read_mostly;
139 139
140/* 140/*
141 * Set to true if ACPI table parsing and hardware intialization went properly
142 */
143static bool amd_iommu_initialized;
144
145/*
141 * List of protection domains - used during resume 146 * List of protection domains - used during resume
142 */ 147 */
143LIST_HEAD(amd_iommu_pd_list); 148LIST_HEAD(amd_iommu_pd_list);
@@ -929,6 +934,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
929 } 934 }
930 WARN_ON(p != end); 935 WARN_ON(p != end);
931 936
937 amd_iommu_initialized = true;
938
932 return 0; 939 return 0;
933} 940}
934 941
@@ -1263,6 +1270,9 @@ static int __init amd_iommu_init(void)
1263 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1264 goto free; 1271 goto free;
1265 1272
1273 if (!amd_iommu_initialized)
1274 goto free;
1275
1266 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1276 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1267 goto free; 1277 goto free;
1268 1278
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997e8b25..f147a95fd84a 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,6 +31,7 @@
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
34EXPORT_SYMBOL_GPL(gart_iommu_aperture);
34int gart_iommu_aperture_disabled __initdata; 35int gart_iommu_aperture_disabled __initdata;
35int gart_iommu_aperture_allowed __initdata; 36int gart_iommu_aperture_allowed __initdata;
36 37
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aa57c079c98f..3987e4408f75 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -61,12 +61,6 @@ unsigned int boot_cpu_physical_apicid = -1U;
61 61
62/* 62/*
63 * The highest APIC ID seen during enumeration. 63 * The highest APIC ID seen during enumeration.
64 *
65 * On AMD, this determines the messaging protocol we can use: if all APIC IDs
66 * are in the 0 ... 7 range, then we can use logical addressing which
67 * has some performance advantages (better broadcasting).
68 *
69 * If there's an APIC ID above 8, we use physical addressing.
70 */ 64 */
71unsigned int max_physical_apicid; 65unsigned int max_physical_apicid;
72 66
@@ -1898,14 +1892,17 @@ void __cpuinit generic_processor_info(int apicid, int version)
1898 max_physical_apicid = apicid; 1892 max_physical_apicid = apicid;
1899 1893
1900#ifdef CONFIG_X86_32 1894#ifdef CONFIG_X86_32
1901 switch (boot_cpu_data.x86_vendor) { 1895 if (num_processors > 8) {
1902 case X86_VENDOR_INTEL: 1896 switch (boot_cpu_data.x86_vendor) {
1903 if (num_processors > 8) 1897 case X86_VENDOR_INTEL:
1904 def_to_bigsmp = 1; 1898 if (!APIC_XAPIC(version)) {
1905 break; 1899 def_to_bigsmp = 0;
1906 case X86_VENDOR_AMD: 1900 break;
1907 if (max_physical_apicid >= 8) 1901 }
1902 /* If P4 and above fall through */
1903 case X86_VENDOR_AMD:
1908 def_to_bigsmp = 1; 1904 def_to_bigsmp = 1;
1905 }
1909 } 1906 }
1910#endif 1907#endif
1911 1908
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index eacbd2b31d27..e3c3d820c325 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -240,6 +240,11 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
240 printk(KERN_DEBUG "system APIC only can use physical flat"); 240 printk(KERN_DEBUG "system APIC only can use physical flat");
241 return 1; 241 return 1;
242 } 242 }
243
244 if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
245 printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
246 return 1;
247 }
243#endif 248#endif
244 249
245 return 0; 250 return 0;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index de00c4619a55..53243ca7816d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2434 cfg = irq_cfg(irq); 2434 cfg = irq_cfg(irq);
2435 raw_spin_lock(&desc->lock); 2435 raw_spin_lock(&desc->lock);
2436 2436
2437 /*
2438 * Check if the irq migration is in progress. If so, we
2439 * haven't received the cleanup request yet for this irq.
2440 */
2441 if (cfg->move_in_progress)
2442 goto unlock;
2443
2437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2444 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2438 goto unlock; 2445 goto unlock;
2439 2446
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index c4cbd3080c1c..450fe2064a14 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,17 +67,8 @@ void __init default_setup_apic_routing(void)
67 } 67 }
68#endif 68#endif
69 69
70 if (apic == &apic_flat) { 70 if (apic == &apic_flat && num_processors > 8)
71 switch (boot_cpu_data.x86_vendor) { 71 apic = &apic_physflat;
72 case X86_VENDOR_INTEL:
73 if (num_processors > 8)
74 apic = &apic_physflat;
75 break;
76 case X86_VENDOR_AMD:
77 if (max_physical_apicid >= 8)
78 apic = &apic_physflat;
79 }
80 }
81 72
82 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
83 74
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d56b0efb2057..21db3cbea7dc 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -36,6 +36,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
36 36
37static enum uv_system_type uv_system_type; 37static enum uv_system_type uv_system_type;
38static u64 gru_start_paddr, gru_end_paddr; 38static u64 gru_start_paddr, gru_end_paddr;
39int uv_min_hub_revision_id;
40EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
39 41
40static inline bool is_GRU_range(u64 start, u64 end) 42static inline bool is_GRU_range(u64 start, u64 end)
41{ 43{
@@ -55,12 +57,19 @@ static int early_get_nodeid(void)
55 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); 57 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
56 node_id.v = *mmr; 58 node_id.v = *mmr;
57 early_iounmap(mmr, sizeof(*mmr)); 59 early_iounmap(mmr, sizeof(*mmr));
60
61 /* Currently, all blades have same revision number */
62 uv_min_hub_revision_id = node_id.s.revision;
63
58 return node_id.s.node_id; 64 return node_id.s.node_id;
59} 65}
60 66
61static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 67static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
62{ 68{
69 int nodeid;
70
63 if (!strcmp(oem_id, "SGI")) { 71 if (!strcmp(oem_id, "SGI")) {
72 nodeid = early_get_nodeid();
64 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 73 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
65 if (!strcmp(oem_table_id, "UVL")) 74 if (!strcmp(oem_table_id, "UVL"))
66 uv_system_type = UV_LEGACY_APIC; 75 uv_system_type = UV_LEGACY_APIC;
@@ -68,7 +77,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
68 uv_system_type = UV_X2APIC; 77 uv_system_type = UV_X2APIC;
69 else if (!strcmp(oem_table_id, "UVH")) { 78 else if (!strcmp(oem_table_id, "UVH")) {
70 __get_cpu_var(x2apic_extra_bits) = 79 __get_cpu_var(x2apic_extra_bits) =
71 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); 80 nodeid << (UV_APIC_PNODE_SHIFT - 1);
72 uv_system_type = UV_NON_UNIQUE_APIC; 81 uv_system_type = UV_NON_UNIQUE_APIC;
73 return 1; 82 return 1;
74 } 83 }
@@ -374,13 +383,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
374 383
375enum map_type {map_wb, map_uc}; 384enum map_type {map_wb, map_uc};
376 385
377static __init void map_high(char *id, unsigned long base, int shift, 386static __init void map_high(char *id, unsigned long base, int pshift,
378 int max_pnode, enum map_type map_type) 387 int bshift, int max_pnode, enum map_type map_type)
379{ 388{
380 unsigned long bytes, paddr; 389 unsigned long bytes, paddr;
381 390
382 paddr = base << shift; 391 paddr = base << pshift;
383 bytes = (1UL << shift) * (max_pnode + 1); 392 bytes = (1UL << bshift) * (max_pnode + 1);
384 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 393 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
385 paddr + bytes); 394 paddr + bytes);
386 if (map_type == map_uc) 395 if (map_type == map_uc)
@@ -396,7 +405,7 @@ static __init void map_gru_high(int max_pnode)
396 405
397 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 406 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
398 if (gru.s.enable) { 407 if (gru.s.enable) {
399 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 408 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
400 gru_start_paddr = ((u64)gru.s.base << shift); 409 gru_start_paddr = ((u64)gru.s.base << shift);
401 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 410 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
402 411
@@ -410,7 +419,7 @@ static __init void map_mmr_high(int max_pnode)
410 419
411 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 420 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
412 if (mmr.s.enable) 421 if (mmr.s.enable)
413 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 422 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
414} 423}
415 424
416static __init void map_mmioh_high(int max_pnode) 425static __init void map_mmioh_high(int max_pnode)
@@ -420,7 +429,8 @@ static __init void map_mmioh_high(int max_pnode)
420 429
421 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 430 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
422 if (mmioh.s.enable) 431 if (mmioh.s.enable)
423 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); 432 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
433 max_pnode, map_uc);
424} 434}
425 435
426static __init void map_low_mmrs(void) 436static __init void map_low_mmrs(void)
@@ -629,8 +639,10 @@ void __init uv_system_init(void)
629 uv_rtc_init(); 639 uv_rtc_init();
630 640
631 for_each_present_cpu(cpu) { 641 for_each_present_cpu(cpu) {
642 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
643
632 nid = cpu_to_node(cpu); 644 nid = cpu_to_node(cpu);
633 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); 645 pnode = uv_apicid_to_pnode(apicid);
634 blade = boot_pnode_to_blade(pnode); 646 blade = boot_pnode_to_blade(pnode);
635 lcpu = uv_blade_info[blade].nr_possible_cpus; 647 lcpu = uv_blade_info[blade].nr_possible_cpus;
636 uv_blade_info[blade].nr_possible_cpus++; 648 uv_blade_info[blade].nr_possible_cpus++;
@@ -651,15 +663,13 @@ void __init uv_system_init(void)
651 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; 663 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
652 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 664 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
653 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 665 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
654 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; 666 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
655 uv_node_to_blade[nid] = blade; 667 uv_node_to_blade[nid] = blade;
656 uv_cpu_to_blade[cpu] = blade; 668 uv_cpu_to_blade[cpu] = blade;
657 max_pnode = max(pnode, max_pnode); 669 max_pnode = max(pnode, max_pnode);
658 670
659 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " 671 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
660 "lcpu %d, blade %d\n", 672 cpu, apicid, pnode, nid, lcpu, blade);
661 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
662 lcpu, blade);
663 } 673 }
664 674
665 /* Add blade/pnode info for nodes without cpus */ 675 /* Add blade/pnode info for nodes without cpus */
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index f28decf8dde3..1b1920fa7c80 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -190,9 +190,11 @@ static void do_drv_write(void *_cmd)
190 190
191static void drv_read(struct drv_cmd *cmd) 191static void drv_read(struct drv_cmd *cmd)
192{ 192{
193 int err;
193 cmd->val = 0; 194 cmd->val = 0;
194 195
195 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); 196 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
197 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
196} 198}
197 199
198static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c223b7e895d9..8c1c07073ccc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1343,6 +1343,13 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1343 bits |= 0x2; 1343 bits |= 0x2;
1344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 1344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1345 bits |= 0x1; 1345 bits |= 0x1;
1346
1347 /*
1348 * ANY bit is supported in v3 and up
1349 */
1350 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1351 bits |= 0x4;
1352
1346 bits <<= (idx * 4); 1353 bits <<= (idx * 4);
1347 mask = 0xfULL << (idx * 4); 1354 mask = 0xfULL << (idx * 4);
1348 1355
@@ -2347,7 +2354,7 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2347 callchain_store(entry, PERF_CONTEXT_KERNEL); 2354 callchain_store(entry, PERF_CONTEXT_KERNEL);
2348 callchain_store(entry, regs->ip); 2355 callchain_store(entry, regs->ip);
2349 2356
2350 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); 2357 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2351} 2358}
2352 2359
2353/* 2360/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c56bc2873030..6d817554780a 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -123,13 +123,15 @@ print_context_stack_bp(struct thread_info *tinfo,
123 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 123 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
124 unsigned long addr = *ret_addr; 124 unsigned long addr = *ret_addr;
125 125
126 if (__kernel_text_address(addr)) { 126 if (!__kernel_text_address(addr))
127 ops->address(data, addr, 1); 127 break;
128 frame = frame->next_frame; 128
129 ret_addr = &frame->return_address; 129 ops->address(data, addr, 1);
130 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 130 frame = frame->next_frame;
131 } 131 ret_addr = &frame->return_address;
132 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
132 } 133 }
134
133 return (unsigned long)frame; 135 return (unsigned long)frame;
134} 136}
135EXPORT_SYMBOL_GPL(print_context_stack_bp); 137EXPORT_SYMBOL_GPL(print_context_stack_bp);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ed7ab2ca48..a1a7876cadcb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -733,13 +733,13 @@ struct early_res {
733}; 733};
734static struct early_res early_res[MAX_EARLY_RES] __initdata = { 734static struct early_res early_res[MAX_EARLY_RES] __initdata = {
735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
736#ifdef CONFIG_X86_32 736#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
737 /* 737 /*
738 * But first pinch a few for the stack/trampoline stuff 738 * But first pinch a few for the stack/trampoline stuff
739 * FIXME: Don't need the extra page at 4K, but need to fix 739 * FIXME: Don't need the extra page at 4K, but need to fix
740 * trampoline before removing it. (see the GDT stuff) 740 * trampoline before removing it. (see the GDT stuff)
741 */ 741 */
742 { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, 742 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
743#endif 743#endif
744 744
745 {} 745 {}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 844c02c65fcb..0c8632433090 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -394,7 +394,7 @@ static enum ucode_state microcode_update_cpu(int cpu)
394 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 394 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
395 enum ucode_state ustate; 395 enum ucode_state ustate;
396 396
397 if (uci->valid && uci->mc) 397 if (uci->valid)
398 ustate = microcode_resume_cpu(cpu); 398 ustate = microcode_resume_cpu(cpu);
399 else 399 else
400 ustate = microcode_init_cpu(cpu); 400 ustate = microcode_init_cpu(cpu);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 98c2cdeb599e..02c3ee013ccd 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -103,8 +103,8 @@ void show_regs_common(void)
103 if (!product) 103 if (!product)
104 product = ""; 104 product = "";
105 105
106 printk("\n"); 106 printk(KERN_CONT "\n");
107 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", 107 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
108 current->pid, current->comm, print_tainted(), 108 current->pid, current->comm, print_tainted(),
109 init_utsname()->release, 109 init_utsname()->release,
110 (int)strcspn(init_utsname()->version, " "), 110 (int)strcspn(init_utsname()->version, " "),
@@ -288,6 +288,8 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
288 regs.es = __USER_DS; 288 regs.es = __USER_DS;
289 regs.fs = __KERNEL_PERCPU; 289 regs.fs = __KERNEL_PERCPU;
290 regs.gs = __KERNEL_STACK_CANARY; 290 regs.gs = __KERNEL_STACK_CANARY;
291#else
292 regs.ss = __KERNEL_DS;
291#endif 293#endif
292 294
293 regs.orig_ax = -1; 295 regs.orig_ax = -1;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9c517b5858f0..37ad1e046aae 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -139,16 +139,16 @@ void __show_regs(struct pt_regs *regs, int all)
139 139
140 show_regs_common(); 140 show_regs_common();
141 141
142 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 142 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
143 (u16)regs->cs, regs->ip, regs->flags, 143 (u16)regs->cs, regs->ip, regs->flags,
144 smp_processor_id()); 144 smp_processor_id());
145 print_symbol("EIP is at %s\n", regs->ip); 145 print_symbol("EIP is at %s\n", regs->ip);
146 146
147 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 147 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
148 regs->ax, regs->bx, regs->cx, regs->dx); 148 regs->ax, regs->bx, regs->cx, regs->dx);
149 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 149 printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
150 regs->si, regs->di, regs->bp, sp); 150 regs->si, regs->di, regs->bp, sp);
151 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 151 printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
152 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); 152 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
153 153
154 if (!all) 154 if (!all)
@@ -158,19 +158,19 @@ void __show_regs(struct pt_regs *regs, int all)
158 cr2 = read_cr2(); 158 cr2 = read_cr2();
159 cr3 = read_cr3(); 159 cr3 = read_cr3();
160 cr4 = read_cr4_safe(); 160 cr4 = read_cr4_safe();
161 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 161 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
162 cr0, cr2, cr3, cr4); 162 cr0, cr2, cr3, cr4);
163 163
164 get_debugreg(d0, 0); 164 get_debugreg(d0, 0);
165 get_debugreg(d1, 1); 165 get_debugreg(d1, 1);
166 get_debugreg(d2, 2); 166 get_debugreg(d2, 2);
167 get_debugreg(d3, 3); 167 get_debugreg(d3, 3);
168 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 168 printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
169 d0, d1, d2, d3); 169 d0, d1, d2, d3);
170 170
171 get_debugreg(d6, 6); 171 get_debugreg(d6, 6);
172 get_debugreg(d7, 7); 172 get_debugreg(d7, 7);
173 printk("DR6: %08lx DR7: %08lx\n", 173 printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
174 d6, d7); 174 d6, d7);
175} 175}
176 176
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 52fbd0c60198..f9e033150cdf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -161,19 +161,19 @@ void __show_regs(struct pt_regs *regs, int all)
161 unsigned int ds, cs, es; 161 unsigned int ds, cs, es;
162 162
163 show_regs_common(); 163 show_regs_common();
164 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 164 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
165 printk_address(regs->ip, 1); 165 printk_address(regs->ip, 1);
166 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, 166 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
167 regs->sp, regs->flags); 167 regs->sp, regs->flags);
168 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", 168 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
169 regs->ax, regs->bx, regs->cx); 169 regs->ax, regs->bx, regs->cx);
170 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", 170 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
171 regs->dx, regs->si, regs->di); 171 regs->dx, regs->si, regs->di);
172 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", 172 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
173 regs->bp, regs->r8, regs->r9); 173 regs->bp, regs->r8, regs->r9);
174 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", 174 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
175 regs->r10, regs->r11, regs->r12); 175 regs->r10, regs->r11, regs->r12);
176 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", 176 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
177 regs->r13, regs->r14, regs->r15); 177 regs->r13, regs->r14, regs->r15);
178 178
179 asm("movl %%ds,%0" : "=r" (ds)); 179 asm("movl %%ds,%0" : "=r" (ds));
@@ -194,21 +194,21 @@ void __show_regs(struct pt_regs *regs, int all)
194 cr3 = read_cr3(); 194 cr3 = read_cr3();
195 cr4 = read_cr4(); 195 cr4 = read_cr4();
196 196
197 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 197 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
198 fs, fsindex, gs, gsindex, shadowgs); 198 fs, fsindex, gs, gsindex, shadowgs);
199 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, 199 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
200 es, cr0); 200 es, cr0);
201 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, 201 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
202 cr4); 202 cr4);
203 203
204 get_debugreg(d0, 0); 204 get_debugreg(d0, 0);
205 get_debugreg(d1, 1); 205 get_debugreg(d1, 1);
206 get_debugreg(d2, 2); 206 get_debugreg(d2, 2);
207 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 207 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 get_debugreg(d3, 3); 208 get_debugreg(d3, 3);
209 get_debugreg(d6, 6); 209 get_debugreg(d6, 6);
210 get_debugreg(d7, 7); 210 get_debugreg(d7, 7);
211 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 211 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
212} 212}
213 213
214void show_regs(struct pt_regs *regs) 214void show_regs(struct pt_regs *regs)