aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/amd_iommu.c9
-rw-r--r--arch/x86/kernel/cpuid.c4
-rw-r--r--arch/x86/kernel/dumpstack_32.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/msr.c4
-rw-r--r--arch/x86/kernel/pci-calgary_64.c18
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c8
-rw-r--r--arch/x86/kernel/smpboot.c14
10 files changed, 31 insertions, 38 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 34e4d112b1e..a8fd9ebdc8e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -295,7 +295,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
295 u64 address, size_t size) 295 u64 address, size_t size)
296{ 296{
297 int s = 0; 297 int s = 0;
298 unsigned pages = iommu_num_pages(address, size); 298 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
299 299
300 address &= PAGE_MASK; 300 address &= PAGE_MASK;
301 301
@@ -680,7 +680,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
680 iommu->exclusion_start < dma_dom->aperture_size) { 680 iommu->exclusion_start < dma_dom->aperture_size) {
681 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 681 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
682 int pages = iommu_num_pages(iommu->exclusion_start, 682 int pages = iommu_num_pages(iommu->exclusion_start,
683 iommu->exclusion_length); 683 iommu->exclusion_length,
684 PAGE_SIZE);
684 dma_ops_reserve_addresses(dma_dom, startpage, pages); 685 dma_ops_reserve_addresses(dma_dom, startpage, pages);
685 } 686 }
686 687
@@ -935,7 +936,7 @@ static dma_addr_t __map_single(struct device *dev,
935 unsigned long align_mask = 0; 936 unsigned long align_mask = 0;
936 int i; 937 int i;
937 938
938 pages = iommu_num_pages(paddr, size); 939 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
939 paddr &= PAGE_MASK; 940 paddr &= PAGE_MASK;
940 941
941 if (align) 942 if (align)
@@ -980,7 +981,7 @@ static void __unmap_single(struct amd_iommu *iommu,
980 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 981 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
981 return; 982 return;
982 983
983 pages = iommu_num_pages(dma_addr, size); 984 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
984 dma_addr &= PAGE_MASK; 985 dma_addr &= PAGE_MASK;
985 start = dma_addr; 986 start = dma_addr;
986 987
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 6a44d646599..72cefd1e649 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -147,8 +147,8 @@ static __cpuinit int cpuid_device_create(int cpu)
147{ 147{
148 struct device *dev; 148 struct device *dev;
149 149
150 dev = device_create_drvdata(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), 150 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
151 NULL, "cpu%d", cpu); 151 "cpu%d", cpu);
152 return IS_ERR(dev) ? PTR_ERR(dev) : 0; 152 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
153} 153}
154 154
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 201ee359a1a..1a78180f08d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -13,6 +13,7 @@
13#include <linux/kexec.h> 13#include <linux/kexec.h>
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/nmi.h> 15#include <linux/nmi.h>
16#include <linux/sysfs.h>
16 17
17#include <asm/stacktrace.h> 18#include <asm/stacktrace.h>
18 19
@@ -343,6 +344,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
343 printk("DEBUG_PAGEALLOC"); 344 printk("DEBUG_PAGEALLOC");
344#endif 345#endif
345 printk("\n"); 346 printk("\n");
347 sysfs_printk_last_file();
346 if (notify_die(DIE_OOPS, str, regs, err, 348 if (notify_die(DIE_OOPS, str, regs, err,
347 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 349 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
348 return 1; 350 return 1;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 086cc8118e3..96a5db7da8a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -13,6 +13,7 @@
13#include <linux/kexec.h> 13#include <linux/kexec.h>
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/nmi.h> 15#include <linux/nmi.h>
16#include <linux/sysfs.h>
16 17
17#include <asm/stacktrace.h> 18#include <asm/stacktrace.h>
18 19
@@ -489,6 +490,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
489 printk("DEBUG_PAGEALLOC"); 490 printk("DEBUG_PAGEALLOC");
490#endif 491#endif
491 printk("\n"); 492 printk("\n");
493 sysfs_printk_last_file();
492 if (notify_die(DIE_OOPS, str, regs, err, 494 if (notify_die(DIE_OOPS, str, regs, err,
493 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 495 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
494 return 1; 496 return 1;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 78e642feac3..ce97bf3bed1 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1282,12 +1282,10 @@ void __init e820_reserve_resources(void)
1282 e820_res = res; 1282 e820_res = res;
1283 for (i = 0; i < e820.nr_map; i++) { 1283 for (i = 0; i < e820.nr_map; i++) {
1284 end = e820.map[i].addr + e820.map[i].size - 1; 1284 end = e820.map[i].addr + e820.map[i].size - 1;
1285#ifndef CONFIG_RESOURCES_64BIT 1285 if (end != (resource_size_t)end) {
1286 if (end > 0x100000000ULL) {
1287 res++; 1286 res++;
1288 continue; 1287 continue;
1289 } 1288 }
1290#endif
1291 res->name = e820_type_to_string(e820.map[i].type); 1289 res->name = e820_type_to_string(e820.map[i].type);
1292 res->start = e820.map[i].addr; 1290 res->start = e820.map[i].addr;
1293 res->end = end; 1291 res->end = end;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 2e2af5d1819..82a7c7ed6d4 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -163,8 +163,8 @@ static int __cpuinit msr_device_create(int cpu)
163{ 163{
164 struct device *dev; 164 struct device *dev;
165 165
166 dev = device_create_drvdata(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), 166 dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
167 NULL, "msr%d", cpu); 167 "msr%d", cpu);
168 return IS_ERR(dev) ? PTR_ERR(dev) : 0; 168 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
169} 169}
170 170
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 080d1d27f37..e1e731d78f3 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -217,16 +217,6 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
217 217
218#endif /* CONFIG_IOMMU_DEBUG */ 218#endif /* CONFIG_IOMMU_DEBUG */
219 219
220static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
221{
222 unsigned int npages;
223
224 npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
225 npages >>= PAGE_SHIFT;
226
227 return npages;
228}
229
230static inline int translation_enabled(struct iommu_table *tbl) 220static inline int translation_enabled(struct iommu_table *tbl)
231{ 221{
232 /* only PHBs with translation enabled have an IOMMU table */ 222 /* only PHBs with translation enabled have an IOMMU table */
@@ -408,7 +398,7 @@ static void calgary_unmap_sg(struct device *dev,
408 if (dmalen == 0) 398 if (dmalen == 0)
409 break; 399 break;
410 400
411 npages = num_dma_pages(dma, dmalen); 401 npages = iommu_num_pages(dma, dmalen, PAGE_SIZE);
412 iommu_free(tbl, dma, npages); 402 iommu_free(tbl, dma, npages);
413 } 403 }
414} 404}
@@ -427,7 +417,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
427 BUG_ON(!sg_page(s)); 417 BUG_ON(!sg_page(s));
428 418
429 vaddr = (unsigned long) sg_virt(s); 419 vaddr = (unsigned long) sg_virt(s);
430 npages = num_dma_pages(vaddr, s->length); 420 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
431 421
432 entry = iommu_range_alloc(dev, tbl, npages); 422 entry = iommu_range_alloc(dev, tbl, npages);
433 if (entry == bad_dma_address) { 423 if (entry == bad_dma_address) {
@@ -464,7 +454,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
464 struct iommu_table *tbl = find_iommu_table(dev); 454 struct iommu_table *tbl = find_iommu_table(dev);
465 455
466 uaddr = (unsigned long)vaddr; 456 uaddr = (unsigned long)vaddr;
467 npages = num_dma_pages(uaddr, size); 457 npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
468 458
469 return iommu_alloc(dev, tbl, vaddr, npages, direction); 459 return iommu_alloc(dev, tbl, vaddr, npages, direction);
470} 460}
@@ -475,7 +465,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
475 struct iommu_table *tbl = find_iommu_table(dev); 465 struct iommu_table *tbl = find_iommu_table(dev);
476 unsigned int npages; 466 unsigned int npages;
477 467
478 npages = num_dma_pages(dma_handle, size); 468 npages = iommu_num_pages(dma_handle, size, PAGE_SIZE);
479 iommu_free(tbl, dma_handle, npages); 469 iommu_free(tbl, dma_handle, npages);
480} 470}
481 471
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0a3824e837b..19262482021 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -125,13 +125,13 @@ void __init pci_iommu_alloc(void)
125 pci_swiotlb_init(); 125 pci_swiotlb_init();
126} 126}
127 127
128unsigned long iommu_num_pages(unsigned long addr, unsigned long len) 128unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
129{ 129{
130 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); 130 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
131 131
132 return size >> PAGE_SHIFT; 132 return size >> PAGE_SHIFT;
133} 133}
134EXPORT_SYMBOL(iommu_num_pages); 134EXPORT_SYMBOL(iommu_nr_pages);
135#endif 135#endif
136 136
137void *dma_generic_alloc_coherent(struct device *dev, size_t size, 137void *dma_generic_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 145f1c83369..e3f75bbcede 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -231,7 +231,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
231static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 231static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
232 size_t size, int dir, unsigned long align_mask) 232 size_t size, int dir, unsigned long align_mask)
233{ 233{
234 unsigned long npages = iommu_num_pages(phys_mem, size); 234 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
235 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); 235 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
236 int i; 236 int i;
237 237
@@ -285,7 +285,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
285 return; 285 return;
286 286
287 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 287 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
288 npages = iommu_num_pages(dma_addr, size); 288 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
289 for (i = 0; i < npages; i++) { 289 for (i = 0; i < npages; i++) {
290 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 290 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
291 CLEAR_LEAK(iommu_page + i); 291 CLEAR_LEAK(iommu_page + i);
@@ -368,7 +368,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
368 } 368 }
369 369
370 addr = phys_addr; 370 addr = phys_addr;
371 pages = iommu_num_pages(s->offset, s->length); 371 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
372 while (pages--) { 372 while (pages--) {
373 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 373 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
374 SET_LEAK(iommu_page); 374 SET_LEAK(iommu_page);
@@ -451,7 +451,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
451 451
452 seg_size += s->length; 452 seg_size += s->length;
453 need = nextneed; 453 need = nextneed;
454 pages += iommu_num_pages(s->offset, s->length); 454 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
455 ps = s; 455 ps = s;
456 } 456 }
457 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 457 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8c3aca7cb34..7ed9e070a6e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -282,6 +282,8 @@ static void __cpuinit smp_callin(void)
282 cpu_set(cpuid, cpu_callin_map); 282 cpu_set(cpuid, cpu_callin_map);
283} 283}
284 284
285static int __cpuinitdata unsafe_smp;
286
285/* 287/*
286 * Activate a secondary processor. 288 * Activate a secondary processor.
287 */ 289 */
@@ -397,7 +399,7 @@ static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
397 goto valid_k7; 399 goto valid_k7;
398 400
399 /* If we get here, not a certified SMP capable AMD system. */ 401 /* If we get here, not a certified SMP capable AMD system. */
400 add_taint(TAINT_UNSAFE_SMP); 402 unsafe_smp = 1;
401 } 403 }
402 404
403valid_k7: 405valid_k7:
@@ -414,12 +416,10 @@ static void __cpuinit smp_checks(void)
414 * Don't taint if we are running SMP kernel on a single non-MP 416 * Don't taint if we are running SMP kernel on a single non-MP
415 * approved Athlon 417 * approved Athlon
416 */ 418 */
417 if (tainted & TAINT_UNSAFE_SMP) { 419 if (unsafe_smp && num_online_cpus() > 1) {
418 if (num_online_cpus()) 420 printk(KERN_INFO "WARNING: This combination of AMD"
419 printk(KERN_INFO "WARNING: This combination of AMD" 421 "processors is not suitable for SMP.\n");
420 "processors is not suitable for SMP.\n"); 422 add_taint(TAINT_UNSAFE_SMP);
421 else
422 tainted &= ~TAINT_UNSAFE_SMP;
423 } 423 }
424} 424}
425 425