aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/amd_iommu.c16
-rw-r--r--arch/x86/kernel/amd_iommu_init.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/init_task.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c8
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
8 files changed, 42 insertions, 22 deletions
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 8ded418b0593..13ab720573e3 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,4 +1,4 @@
1 .section .text.page_aligned 1 .section .text..page_aligned
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page_types.h> 4#include <asm/page_types.h>
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa5a1474cd18..0d20286d78c6 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1487,6 +1487,7 @@ static int __attach_device(struct device *dev,
1487 struct protection_domain *domain) 1487 struct protection_domain *domain)
1488{ 1488{
1489 struct iommu_dev_data *dev_data, *alias_data; 1489 struct iommu_dev_data *dev_data, *alias_data;
1490 int ret;
1490 1491
1491 dev_data = get_dev_data(dev); 1492 dev_data = get_dev_data(dev);
1492 alias_data = get_dev_data(dev_data->alias); 1493 alias_data = get_dev_data(dev_data->alias);
@@ -1498,13 +1499,14 @@ static int __attach_device(struct device *dev,
1498 spin_lock(&domain->lock); 1499 spin_lock(&domain->lock);
1499 1500
1500 /* Some sanity checks */ 1501 /* Some sanity checks */
1502 ret = -EBUSY;
1501 if (alias_data->domain != NULL && 1503 if (alias_data->domain != NULL &&
1502 alias_data->domain != domain) 1504 alias_data->domain != domain)
1503 return -EBUSY; 1505 goto out_unlock;
1504 1506
1505 if (dev_data->domain != NULL && 1507 if (dev_data->domain != NULL &&
1506 dev_data->domain != domain) 1508 dev_data->domain != domain)
1507 return -EBUSY; 1509 goto out_unlock;
1508 1510
1509 /* Do real assignment */ 1511 /* Do real assignment */
1510 if (dev_data->alias != dev) { 1512 if (dev_data->alias != dev) {
@@ -1520,10 +1522,14 @@ static int __attach_device(struct device *dev,
1520 1522
1521 atomic_inc(&dev_data->bind); 1523 atomic_inc(&dev_data->bind);
1522 1524
1525 ret = 0;
1526
1527out_unlock:
1528
1523 /* ready */ 1529 /* ready */
1524 spin_unlock(&domain->lock); 1530 spin_unlock(&domain->lock);
1525 1531
1526 return 0; 1532 return ret;
1527} 1533}
1528 1534
1529/* 1535/*
@@ -2324,10 +2330,6 @@ int __init amd_iommu_init_dma_ops(void)
2324 2330
2325 iommu_detected = 1; 2331 iommu_detected = 1;
2326 swiotlb = 0; 2332 swiotlb = 0;
2327#ifdef CONFIG_GART_IOMMU
2328 gart_iommu_aperture_disabled = 1;
2329 gart_iommu_aperture = 0;
2330#endif
2331 2333
2332 /* Make the driver finally visible to the drivers */ 2334 /* Make the driver finally visible to the drivers */
2333 dma_ops = &amd_iommu_dma_ops; 2335 dma_ops = &amd_iommu_dma_ops;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3bacb4d0844c..3cc63e2b8dd4 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -287,8 +287,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
287{ 287{
288 u8 *ret; 288 u8 *ret;
289 289
290 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) 290 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
291 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
292 address);
293 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
291 return NULL; 294 return NULL;
295 }
292 296
293 ret = ioremap_nocache(address, MMIO_REGION_LENGTH); 297 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
294 if (ret != NULL) 298 if (ret != NULL)
@@ -1314,7 +1318,7 @@ static int __init amd_iommu_init(void)
1314 ret = amd_iommu_init_dma_ops(); 1318 ret = amd_iommu_init_dma_ops();
1315 1319
1316 if (ret) 1320 if (ret)
1317 goto free; 1321 goto free_disable;
1318 1322
1319 amd_iommu_init_api(); 1323 amd_iommu_init_api();
1320 1324
@@ -1332,9 +1336,10 @@ static int __init amd_iommu_init(void)
1332out: 1336out:
1333 return ret; 1337 return ret;
1334 1338
1335free: 1339free_disable:
1336 disable_iommus(); 1340 disable_iommus();
1337 1341
1342free:
1338 amd_iommu_uninit_devices(); 1343 amd_iommu_uninit_devices();
1339 1344
1340 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1345 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
@@ -1353,6 +1358,15 @@ free:
1353 1358
1354 free_unity_maps(); 1359 free_unity_maps();
1355 1360
1361#ifdef CONFIG_GART_IOMMU
1362 /*
1363 * We failed to initialize the AMD IOMMU - try fallback to GART
1364 * if possible.
1365 */
1366 gart_iommu_init();
1367
1368#endif
1369
1356 goto out; 1370 goto out;
1357} 1371}
1358 1372
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 707165dbc203..18cc42562250 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -36,6 +36,7 @@
36#include <linux/fs.h> 36#include <linux/fs.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/debugfs.h> 38#include <linux/debugfs.h>
39#include <linux/edac_mce.h>
39 40
40#include <asm/processor.h> 41#include <asm/processor.h>
41#include <asm/hw_irq.h> 42#include <asm/hw_irq.h>
@@ -169,6 +170,15 @@ void mce_log(struct mce *mce)
169 entry = rcu_dereference_check_mce(mcelog.next); 170 entry = rcu_dereference_check_mce(mcelog.next);
170 for (;;) { 171 for (;;) {
171 /* 172 /*
173 * If edac_mce is enabled, it will check the error type
174 * and will process it, if it is a known error.
175 * Otherwise, the error will be sent through mcelog
176 * interface
177 */
178 if (edac_mce_parse(mce))
179 return;
180
181 /*
172 * When the buffer fills up discard new entries. 182 * When the buffer fills up discard new entries.
173 * Assume that the earlier errors are the more 183 * Assume that the earlier errors are the more
174 * interesting ones: 184 * interesting ones:
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 3a54dcb9cd0e..43e9ccf44947 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
34/* 34/*
35 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 35 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
36 * no more per-task TSS's. The TSS size is kept cacheline-aligned 36 * no more per-task TSS's. The TSS size is kept cacheline-aligned
37 * so they are allowed to end up in the .data.cacheline_aligned 37 * so they are allowed to end up in the .data..cacheline_aligned
38 * section. Since TSS's are completely CPU-local, we want them 38 * section. Since TSS's are completely CPU-local, we want them
39 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 39 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
40 */ 40 */
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a867940a6dfc..de3b63ae3da2 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -21,12 +21,6 @@
21#include <asm/cpu.h> 21#include <asm/cpu.h>
22#include <asm/stackprotector.h> 22#include <asm/stackprotector.h>
23 23
24#ifdef CONFIG_DEBUG_PER_CPU_MAPS
25# define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__)
26#else
27# define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
28#endif
29
30DEFINE_PER_CPU(int, cpu_number); 24DEFINE_PER_CPU(int, cpu_number);
31EXPORT_PER_CPU_SYMBOL(cpu_number); 25EXPORT_PER_CPU_SYMBOL(cpu_number);
32 26
@@ -247,7 +241,7 @@ void __init setup_per_cpu_areas(void)
247#endif 241#endif
248#endif 242#endif
249 /* 243 /*
250 * Up to this point, the boot CPU has been using .data.init 244 * Up to this point, the boot CPU has been using .init.data
251 * area. Reload any changed state for the boot CPU. 245 * area. Reload any changed state for the boot CPU.
252 */ 246 */
253 if (cpu == boot_cpu_id) 247 if (cpu == boot_cpu_id)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 37462f1ddba5..c4f33b2e77d6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -686,7 +686,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
686static void __cpuinit announce_cpu(int cpu, int apicid) 686static void __cpuinit announce_cpu(int cpu, int apicid)
687{ 687{
688 static int current_node = -1; 688 static int current_node = -1;
689 int node = cpu_to_node(cpu); 689 int node = early_cpu_to_node(cpu);
690 690
691 if (system_state == SYSTEM_BOOTING) { 691 if (system_state == SYSTEM_BOOTING) {
692 if (node != current_node) { 692 if (node != current_node) {
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 2cc249718c46..d0bb52296fa3 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -97,7 +97,7 @@ SECTIONS
97 HEAD_TEXT 97 HEAD_TEXT
98#ifdef CONFIG_X86_32 98#ifdef CONFIG_X86_32
99 . = ALIGN(PAGE_SIZE); 99 . = ALIGN(PAGE_SIZE);
100 *(.text.page_aligned) 100 *(.text..page_aligned)
101#endif 101#endif
102 . = ALIGN(8); 102 . = ALIGN(8);
103 _stext = .; 103 _stext = .;
@@ -305,7 +305,7 @@ SECTIONS
305 . = ALIGN(PAGE_SIZE); 305 . = ALIGN(PAGE_SIZE);
306 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 306 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
307 __bss_start = .; 307 __bss_start = .;
308 *(.bss.page_aligned) 308 *(.bss..page_aligned)
309 *(.bss) 309 *(.bss)
310 . = ALIGN(4); 310 . = ALIGN(4);
311 __bss_stop = .; 311 __bss_stop = .;