aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-03 02:57:41 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-03 02:57:41 -0500
commitdb5935001a43528e673ad26ffec9d98c60a496a9 (patch)
tree8e735327a97beccabb5d94ef93df25d2bacda705 /arch/x86
parent34f3a814eef8069a24e5b3ebcf27aba9dabac2ea (diff)
parent45beca08dd8b6d6a65c5ffd730af2eac7a2c7a03 (diff)
Merge commit 'v2.6.28-rc3' into sched/core
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/Kconfig.cpu24
-rw-r--r--arch/x86/boot/compressed/.gitignore2
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h9
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h9
-rw-r--r--arch/x86/include/asm/pgtable-3level.h4
-rw-r--r--arch/x86/include/asm/smp.h6
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h1
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/e820.c8
-rw-r--r--arch/x86/kernel/k8.c1
-rw-r--r--arch/x86/kernel/machine_kexec_32.c5
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/microcode_core.c4
-rw-r--r--arch/x86/kernel/pci-dma.c16
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vsmp_64.c2
-rw-r--r--arch/x86/lguest/boot.c32
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/init_64.c61
-rw-r--r--arch/x86/mm/ioremap.c22
-rw-r--r--arch/x86/mm/pat.c4
28 files changed, 164 insertions, 88 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d11d7b513191..6f20718d3156 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -231,6 +231,10 @@ config SMP
231 231
232 If you don't know what to do here, say N. 232 If you don't know what to do here, say N.
233 233
234config X86_HAS_BOOT_CPU_ID
235 def_bool y
236 depends on X86_VOYAGER
237
234config X86_FIND_SMP_CONFIG 238config X86_FIND_SMP_CONFIG
235 def_bool y 239 def_bool y
236 depends on X86_MPPARSE || X86_VOYAGER 240 depends on X86_MPPARSE || X86_VOYAGER
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 0b7c4a3f0651..b815664fe370 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -513,19 +513,19 @@ config CPU_SUP_UMC_32
513 If unsure, say N. 513 If unsure, say N.
514 514
515config X86_DS 515config X86_DS
516 bool "Debug Store support" 516 def_bool X86_PTRACE_BTS
517 default y 517 depends on X86_DEBUGCTLMSR
518 help
519 Add support for Debug Store.
520 This allows the kernel to provide a memory buffer to the hardware
521 to store various profiling and tracing events.
522 518
523config X86_PTRACE_BTS 519config X86_PTRACE_BTS
524 bool "ptrace interface to Branch Trace Store" 520 bool "Branch Trace Store"
525 default y 521 default y
526 depends on (X86_DS && X86_DEBUGCTLMSR) 522 depends on X86_DEBUGCTLMSR
527 help 523 help
528 Add a ptrace interface to allow collecting an execution trace 524 This adds a ptrace interface to the hardware's branch trace store.
529 of the traced task. 525
530 This collects control flow changes in a (cyclic) buffer and allows 526 Debuggers may use it to collect an execution trace of the debugged
531 debuggers to fill in the gaps and show an execution trace of the debuggee. 527 application in order to answer the question 'how did I get here?'.
528 Debuggers may trace user mode as well as kernel mode.
529
530 Say Y unless there is no application development on this machine
531 and you want to save a small amount of code size.
diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore
index be0ed065249b..63eff3b04d01 100644
--- a/arch/x86/boot/compressed/.gitignore
+++ b/arch/x86/boot/compressed/.gitignore
@@ -1 +1,3 @@
1relocs 1relocs
2vmlinux.bin.all
3vmlinux.relocs
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index f73e95d75b45..cfdf8c2c5c31 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -91,7 +91,7 @@
91#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 91#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ 93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
94#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ 94#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
95 95
96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
97#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 97#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
index 3ffc5a7bf667..398493461913 100644
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ b/arch/x86/include/asm/es7000/wakecpu.h
@@ -50,10 +50,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
50{ 50{
51} 51}
52 52
53#if APIC_DEBUG 53#define inquire_remote_apic(apicid) do { \
54 #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) 54 if (apic_verbosity >= APIC_DEBUG) \
55#else 55 __inquire_remote_apic(apicid); \
56 #define inquire_remote_apic(apicid) {} 56 } while (0)
57#endif
58 57
59#endif /* __ASM_MACH_WAKECPU_H */ 58#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 5618a103f395..ac2abc88cd95 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -82,9 +82,9 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
82extern void early_ioremap_init(void); 82extern void early_ioremap_init(void);
83extern void early_ioremap_clear(void); 83extern void early_ioremap_clear(void);
84extern void early_ioremap_reset(void); 84extern void early_ioremap_reset(void);
85extern void *early_ioremap(unsigned long offset, unsigned long size); 85extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
86extern void *early_memremap(unsigned long offset, unsigned long size); 86extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
87extern void early_iounmap(void *addr, unsigned long size); 87extern void early_iounmap(void __iomem *addr, unsigned long size);
88extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 88extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
89 89
90 90
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 98e28ea8cd16..e4a552d44465 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -7,7 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled; 9extern int dmar_disabled;
10extern int forbid_dac;
11 10
12extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
13 12
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
index d5c0b826a4ff..9d80db91e992 100644
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h
@@ -33,10 +33,9 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
33{ 33{
34} 34}
35 35
36#if APIC_DEBUG 36#define inquire_remote_apic(apicid) do { \
37 #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) 37 if (apic_verbosity >= APIC_DEBUG) \
38#else 38 __inquire_remote_apic(apicid); \
39 #define inquire_remote_apic(apicid) {} 39 } while (0)
40#endif
41 40
42#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */ 41#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index fb16cec702e4..52597aeadfff 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -120,13 +120,13 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 120 write_cr3(pgd);
121} 121}
122 122
123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) 123#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
124 124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) 125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126 126
127 127
128/* Find an entry in the second-level page table.. */ 128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ 129#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
130 pmd_index(address)) 130 pmd_index(address))
131 131
132#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 2766021aef80..d12811ce51d9 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -225,5 +225,11 @@ static inline int hard_smp_processor_id(void)
225 225
226#endif /* CONFIG_X86_LOCAL_APIC */ 226#endif /* CONFIG_X86_LOCAL_APIC */
227 227
228#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
229extern unsigned char boot_cpu_id;
230#else
231#define boot_cpu_id 0
232#endif
233
228#endif /* __ASSEMBLY__ */ 234#endif /* __ASSEMBLY__ */
229#endif /* _ASM_X86_SMP_H */ 235#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index c6ad93e315c8..7a5782610b2b 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/timer.h>
16#include <asm/types.h> 17#include <asm/types.h>
17#include <asm/percpu.h> 18#include <asm/percpu.h>
18 19
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 0d9c993aa93e..ef8f831af823 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
69 */ 69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{ 71{
72#ifdef CONFIG_SMP 72#ifdef CONFIG_X86_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index; 73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width; 74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings; 75 unsigned int core_select_mask, core_level_siblings;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 25581dcb280e..b9c9ea0217a9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -20,6 +20,7 @@
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/asm.h> 21#include <asm/asm.h>
22#include <asm/numa.h> 22#include <asm/numa.h>
23#include <asm/smp.h>
23#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
24#include <asm/mpspec.h> 25#include <asm/mpspec.h>
25#include <asm/apic.h> 26#include <asm/apic.h>
@@ -549,6 +550,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
549 this_cpu->c_early_init(c); 550 this_cpu->c_early_init(c);
550 551
551 validate_pat_support(c); 552 validate_pat_support(c);
553
554#ifdef CONFIG_SMP
555 c->cpu_index = boot_cpu_id;
556#endif
552} 557}
553 558
554void __init early_cpu_init(void) 559void __init early_cpu_init(void)
@@ -1134,7 +1139,7 @@ void __cpuinit cpu_init(void)
1134 /* 1139 /*
1135 * Boot processor to setup the FP and extended state context info. 1140 * Boot processor to setup the FP and extended state context info.
1136 */ 1141 */
1137 if (!smp_processor_id()) 1142 if (smp_processor_id() == boot_cpu_id)
1138 init_thread_xstate(); 1143 init_thread_xstate();
1139 1144
1140 xsave_init(); 1145 xsave_init();
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ce97bf3bed12..7aafeb5263ef 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1290,15 +1290,17 @@ void __init e820_reserve_resources(void)
1290 res->start = e820.map[i].addr; 1290 res->start = e820.map[i].addr;
1291 res->end = end; 1291 res->end = end;
1292 1292
1293 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1293 res->flags = IORESOURCE_MEM;
1294 1294
1295 /* 1295 /*
1296 * don't register the region that could be conflicted with 1296 * don't register the region that could be conflicted with
1297 * pci device BAR resource and insert them later in 1297 * pci device BAR resource and insert them later in
1298 * pcibios_resource_survey() 1298 * pcibios_resource_survey()
1299 */ 1299 */
1300 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) 1300 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
1301 res->flags |= IORESOURCE_BUSY;
1301 insert_resource(&iomem_resource, res); 1302 insert_resource(&iomem_resource, res);
1303 }
1302 res++; 1304 res++;
1303 } 1305 }
1304 1306
@@ -1318,7 +1320,7 @@ void __init e820_reserve_resources_late(void)
1318 res = e820_res; 1320 res = e820_res;
1319 for (i = 0; i < e820.nr_map; i++) { 1321 for (i = 0; i < e820.nr_map; i++) {
1320 if (!res->parent && res->end) 1322 if (!res->parent && res->end)
1321 reserve_region_with_split(&iomem_resource, res->start, res->end, res->name); 1323 insert_resource_expand_to_fit(&iomem_resource, res);
1322 res++; 1324 res++;
1323 } 1325 }
1324} 1326}
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index 304d8bad6559..cbc4332a77b2 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -18,7 +18,6 @@ static u32 *flush_words;
18struct pci_device_id k8_nb_ids[] = { 18struct pci_device_id k8_nb_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
22 {} 21 {}
23}; 22};
24EXPORT_SYMBOL(k8_nb_ids); 23EXPORT_SYMBOL(k8_nb_ids);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 0732adba05ca..7a385746509a 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -162,7 +162,10 @@ void machine_kexec(struct kimage *image)
162 page_list[VA_PTE_0] = (unsigned long)kexec_pte0; 162 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
163 page_list[PA_PTE_1] = __pa(kexec_pte1); 163 page_list[PA_PTE_1] = __pa(kexec_pte1);
164 page_list[VA_PTE_1] = (unsigned long)kexec_pte1; 164 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
165 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); 165
166 if (image->type == KEXEC_TYPE_DEFAULT)
167 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
168 << PAGE_SHIFT);
166 169
167 /* The segment registers are funny things, they have both a 170 /* The segment registers are funny things, they have both a
168 * visible and an invisible part. Whenever the visible part is 171 * visible and an invisible part. Whenever the visible part is
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7a1f8eeac2c7..5f8e5d75a254 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -39,7 +39,7 @@
39#include <asm/microcode.h> 39#include <asm/microcode.h>
40 40
41MODULE_DESCRIPTION("AMD Microcode Update Driver"); 41MODULE_DESCRIPTION("AMD Microcode Update Driver");
42MODULE_AUTHOR("Peter Oruba <peter.oruba@amd.com>"); 42MODULE_AUTHOR("Peter Oruba");
43MODULE_LICENSE("GPL v2"); 43MODULE_LICENSE("GPL v2");
44 44
45#define UCODE_MAGIC 0x00414d44 45#define UCODE_MAGIC 0x00414d44
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 936d8d55f230..82fb2809ce32 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -480,8 +480,8 @@ static int __init microcode_init(void)
480 480
481 printk(KERN_INFO 481 printk(KERN_INFO
482 "Microcode Update Driver: v" MICROCODE_VERSION 482 "Microcode Update Driver: v" MICROCODE_VERSION
483 " <tigran@aivazian.fsnet.co.uk>" 483 " <tigran@aivazian.fsnet.co.uk>,"
484 " <peter.oruba@amd.com>\n"); 484 " Peter Oruba\n");
485 485
486 return 0; 486 return 0;
487} 487}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1972266e8ba5..192624820217 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -9,6 +9,8 @@
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h> 10#include <asm/amd_iommu.h>
11 11
12static int forbid_dac __read_mostly;
13
12struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
13EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
14 16
@@ -291,3 +293,17 @@ void pci_iommu_shutdown(void)
291} 293}
292/* Must execute after PCI subsystem */ 294/* Must execute after PCI subsystem */
293fs_initcall(pci_iommu_init); 295fs_initcall(pci_iommu_init);
296
297#ifdef CONFIG_PCI
298/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
299
300static __devinit void via_no_dac(struct pci_dev *dev)
301{
302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 printk(KERN_INFO "PCI: VIA PCI bridge detected."
304 "Disabling DAC.\n");
305 forbid_dac = 1;
306 }
307}
308DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
309#endif
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index e3f75bbcedea..a42b02b4df68 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -744,7 +744,7 @@ void __init gart_iommu_init(void)
744 long i; 744 long i;
745 745
746 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { 746 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
747 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); 747 printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
748 return; 748 return;
749 } 749 }
750 750
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 161bb850fc47..62348e4fd8d1 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -759,7 +759,7 @@ __cpuinit int unsynchronized_tsc(void)
759 if (!cpu_has_tsc || tsc_unstable) 759 if (!cpu_has_tsc || tsc_unstable)
760 return 1; 760 return 1;
761 761
762#ifdef CONFIG_SMP 762#ifdef CONFIG_X86_SMP
763 if (apic_is_clustered_box()) 763 if (apic_is_clustered_box())
764 return 1; 764 return 1;
765#endif 765#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 7766d36983fc..a688f3bfaec2 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -78,7 +78,7 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
78 78
79static void __init set_vsmp_pv_ops(void) 79static void __init set_vsmp_pv_ops(void)
80{ 80{
81 void *address; 81 void __iomem *address;
82 unsigned int cap, ctl, cfg; 82 unsigned int cap, ctl, cfg;
83 83
84 /* set vSMP magic bits to indicate vSMP capable kernel */ 84 /* set vSMP magic bits to indicate vSMP capable kernel */
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 48ee4f9435f4..a5d8e1ace1cf 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -367,10 +367,9 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
367 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a 367 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
368 * name like "FPUTRAP bit" be a little less cryptic? 368 * name like "FPUTRAP bit" be a little less cryptic?
369 * 369 *
370 * We store cr0 (and cr3) locally, because the Host never changes it. The 370 * We store cr0 locally because the Host never changes it. The Guest sometimes
371 * Guest sometimes wants to read it and we'd prefer not to bother the Host 371 * wants to read it and we'd prefer not to bother the Host unnecessarily. */
372 * unnecessarily. */ 372static unsigned long current_cr0;
373static unsigned long current_cr0, current_cr3;
374static void lguest_write_cr0(unsigned long val) 373static void lguest_write_cr0(unsigned long val)
375{ 374{
376 lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); 375 lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0);
@@ -399,17 +398,23 @@ static unsigned long lguest_read_cr2(void)
399 return lguest_data.cr2; 398 return lguest_data.cr2;
400} 399}
401 400
401/* See lguest_set_pte() below. */
402static bool cr3_changed = false;
403
402/* cr3 is the current toplevel pagetable page: the principle is the same as 404/* cr3 is the current toplevel pagetable page: the principle is the same as
403 * cr0. Keep a local copy, and tell the Host when it changes. */ 405 * cr0. Keep a local copy, and tell the Host when it changes. The only
406 * difference is that our local copy is in lguest_data because the Host needs
407 * to set it upon our initial hypercall. */
404static void lguest_write_cr3(unsigned long cr3) 408static void lguest_write_cr3(unsigned long cr3)
405{ 409{
410 lguest_data.pgdir = cr3;
406 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); 411 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
407 current_cr3 = cr3; 412 cr3_changed = true;
408} 413}
409 414
410static unsigned long lguest_read_cr3(void) 415static unsigned long lguest_read_cr3(void)
411{ 416{
412 return current_cr3; 417 return lguest_data.pgdir;
413} 418}
414 419
415/* cr4 is used to enable and disable PGE, but we don't care. */ 420/* cr4 is used to enable and disable PGE, but we don't care. */
@@ -498,13 +503,13 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
498 * to forget all of them. Fortunately, this is very rare. 503 * to forget all of them. Fortunately, this is very rare.
499 * 504 *
500 * ... except in early boot when the kernel sets up the initial pagetables, 505 * ... except in early boot when the kernel sets up the initial pagetables,
501 * which makes booting astonishingly slow. So we don't even tell the Host 506 * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell
502 * anything changed until we've done the first page table switch. */ 507 * the Host anything changed until we've done the first page table switch,
508 * which brings boot back to 0.25 seconds. */
503static void lguest_set_pte(pte_t *ptep, pte_t pteval) 509static void lguest_set_pte(pte_t *ptep, pte_t pteval)
504{ 510{
505 *ptep = pteval; 511 *ptep = pteval;
506 /* Don't bother with hypercall before initial setup. */ 512 if (cr3_changed)
507 if (current_cr3)
508 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); 513 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
509} 514}
510 515
@@ -521,7 +526,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
521static void lguest_flush_tlb_single(unsigned long addr) 526static void lguest_flush_tlb_single(unsigned long addr)
522{ 527{
523 /* Simply set it to zero: if it was not, it will fault back in. */ 528 /* Simply set it to zero: if it was not, it will fault back in. */
524 lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0); 529 lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
525} 530}
526 531
527/* This is what happens after the Guest has removed a large number of entries. 532/* This is what happens after the Guest has removed a large number of entries.
@@ -581,6 +586,9 @@ static void __init lguest_init_IRQ(void)
581 586
582 for (i = 0; i < LGUEST_IRQS; i++) { 587 for (i = 0; i < LGUEST_IRQS; i++) {
583 int vector = FIRST_EXTERNAL_VECTOR + i; 588 int vector = FIRST_EXTERNAL_VECTOR + i;
589 /* Some systems map "vectors" to interrupts weirdly. Lguest has
590 * a straightforward 1 to 1 mapping, so force that here. */
591 __get_cpu_var(vector_irq)[vector] = i;
584 if (vector != SYSCALL_VECTOR) { 592 if (vector != SYSCALL_VECTOR) {
585 set_intr_gate(vector, interrupt[vector]); 593 set_intr_gate(vector, interrupt[vector]);
586 set_irq_chip_and_handler_name(i, &lguest_irq_controller, 594 set_irq_chip_and_handler_name(i, &lguest_irq_controller,
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 0f6e8a6523ae..7f4c6af14351 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -90,6 +90,7 @@ static void ack_vic_irq(unsigned int irq);
90static void vic_enable_cpi(void); 90static void vic_enable_cpi(void);
91static void do_boot_cpu(__u8 cpuid); 91static void do_boot_cpu(__u8 cpuid);
92static void do_quad_bootstrap(void); 92static void do_quad_bootstrap(void);
93static void initialize_secondary(void);
93 94
94int hard_smp_processor_id(void); 95int hard_smp_processor_id(void);
95int safe_smp_processor_id(void); 96int safe_smp_processor_id(void);
@@ -344,6 +345,12 @@ static void do_quad_bootstrap(void)
344 } 345 }
345} 346}
346 347
348void prefill_possible_map(void)
349{
350 /* This is empty on voyager because we need a much
351 * earlier detection which is done in find_smp_config */
352}
353
347/* Set up all the basic stuff: read the SMP config and make all the 354/* Set up all the basic stuff: read the SMP config and make all the
348 * SMP information reflect only the boot cpu. All others will be 355 * SMP information reflect only the boot cpu. All others will be
349 * brought on-line later. */ 356 * brought on-line later. */
@@ -413,6 +420,7 @@ void __init smp_store_cpu_info(int id)
413 struct cpuinfo_x86 *c = &cpu_data(id); 420 struct cpuinfo_x86 *c = &cpu_data(id);
414 421
415 *c = boot_cpu_data; 422 *c = boot_cpu_data;
423 c->cpu_index = id;
416 424
417 identify_secondary_cpu(c); 425 identify_secondary_cpu(c);
418} 426}
@@ -650,6 +658,8 @@ void __init smp_boot_cpus(void)
650 smp_tune_scheduling(); 658 smp_tune_scheduling();
651 */ 659 */
652 smp_store_cpu_info(boot_cpu_id); 660 smp_store_cpu_info(boot_cpu_id);
661 /* setup the jump vector */
662 initial_code = (unsigned long)initialize_secondary;
653 printk("CPU%d: ", boot_cpu_id); 663 printk("CPU%d: ", boot_cpu_id);
654 print_cpu_info(&cpu_data(boot_cpu_id)); 664 print_cpu_info(&cpu_data(boot_cpu_id));
655 665
@@ -702,7 +712,7 @@ void __init smp_boot_cpus(void)
702 712
703/* Reload the secondary CPUs task structure (this function does not 713/* Reload the secondary CPUs task structure (this function does not
704 * return ) */ 714 * return ) */
705void __init initialize_secondary(void) 715static void __init initialize_secondary(void)
706{ 716{
707#if 0 717#if 0
708 // AC kernels only 718 // AC kernels only
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 4ba373c5b8c8..be54176e9eb2 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
233 len = (unsigned long) nr_pages << PAGE_SHIFT; 233 len = (unsigned long) nr_pages << PAGE_SHIFT;
234 end = start + len; 234 end = start + len;
235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
236 start, len))) 236 (void __user *)start, len)))
237 goto slow_irqon; 237 goto slow_irqon;
238 238
239 /* 239 /*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index f79a02f64d10..9db01db6e3cd 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -671,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
671 unsigned long last_map_addr = 0; 671 unsigned long last_map_addr = 0;
672 unsigned long page_size_mask = 0; 672 unsigned long page_size_mask = 0;
673 unsigned long start_pfn, end_pfn; 673 unsigned long start_pfn, end_pfn;
674 unsigned long pos;
674 675
675 struct map_range mr[NR_RANGE_MR]; 676 struct map_range mr[NR_RANGE_MR];
676 int nr_range, i; 677 int nr_range, i;
677 int use_pse, use_gbpages; 678 int use_pse, use_gbpages;
678 679
679 printk(KERN_INFO "init_memory_mapping\n"); 680 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
680 681
681 /* 682 /*
682 * Find space for the kernel direct mapping tables. 683 * Find space for the kernel direct mapping tables.
@@ -710,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
710 711
711 /* head if not big page alignment ?*/ 712 /* head if not big page alignment ?*/
712 start_pfn = start >> PAGE_SHIFT; 713 start_pfn = start >> PAGE_SHIFT;
713 end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) 714 pos = start_pfn << PAGE_SHIFT;
715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
714 << (PMD_SHIFT - PAGE_SHIFT); 716 << (PMD_SHIFT - PAGE_SHIFT);
715 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 717 if (start_pfn < end_pfn) {
718 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
719 pos = end_pfn << PAGE_SHIFT;
720 }
716 721
717 /* big page (2M) range*/ 722 /* big page (2M) range*/
718 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) 723 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
719 << (PMD_SHIFT - PAGE_SHIFT); 724 << (PMD_SHIFT - PAGE_SHIFT);
720 end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) 725 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
721 << (PUD_SHIFT - PAGE_SHIFT); 726 << (PUD_SHIFT - PAGE_SHIFT);
722 if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) 727 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
723 end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); 728 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
724 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 729 if (start_pfn < end_pfn) {
725 page_size_mask & (1<<PG_LEVEL_2M)); 730 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
731 page_size_mask & (1<<PG_LEVEL_2M));
732 pos = end_pfn << PAGE_SHIFT;
733 }
726 734
727 /* big page (1G) range */ 735 /* big page (1G) range */
728 start_pfn = end_pfn; 736 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
729 end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); 737 << (PUD_SHIFT - PAGE_SHIFT);
730 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 738 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
739 if (start_pfn < end_pfn) {
740 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
731 page_size_mask & 741 page_size_mask &
732 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); 742 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
743 pos = end_pfn << PAGE_SHIFT;
744 }
733 745
734 /* tail is not big page (1G) alignment */ 746 /* tail is not big page (1G) alignment */
735 start_pfn = end_pfn; 747 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
736 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 748 << (PMD_SHIFT - PAGE_SHIFT);
737 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 749 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
738 page_size_mask & (1<<PG_LEVEL_2M)); 750 if (start_pfn < end_pfn) {
751 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
752 page_size_mask & (1<<PG_LEVEL_2M));
753 pos = end_pfn << PAGE_SHIFT;
754 }
739 755
740 /* tail is not big page (2M) alignment */ 756 /* tail is not big page (2M) alignment */
741 start_pfn = end_pfn; 757 start_pfn = pos>>PAGE_SHIFT;
742 end_pfn = end>>PAGE_SHIFT; 758 end_pfn = end>>PAGE_SHIFT;
743 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 759 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
744 760
@@ -842,7 +858,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
842 max_pfn_mapped = last_mapped_pfn; 858 max_pfn_mapped = last_mapped_pfn;
843 859
844 ret = __add_pages(zone, start_pfn, nr_pages); 860 ret = __add_pages(zone, start_pfn, nr_pages);
845 WARN_ON(1); 861 WARN_ON_ONCE(ret);
846 862
847 return ret; 863 return ret;
848} 864}
@@ -884,6 +900,7 @@ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
884void __init mem_init(void) 900void __init mem_init(void)
885{ 901{
886 long codesize, reservedpages, datasize, initsize; 902 long codesize, reservedpages, datasize, initsize;
903 unsigned long absent_pages;
887 904
888 start_periodic_check_for_corruption(); 905 start_periodic_check_for_corruption();
889 906
@@ -899,8 +916,9 @@ void __init mem_init(void)
899#else 916#else
900 totalram_pages = free_all_bootmem(); 917 totalram_pages = free_all_bootmem();
901#endif 918#endif
902 reservedpages = max_pfn - totalram_pages - 919
903 absent_pages_in_range(0, max_pfn); 920 absent_pages = absent_pages_in_range(0, max_pfn);
921 reservedpages = max_pfn - totalram_pages - absent_pages;
904 after_bootmem = 1; 922 after_bootmem = 1;
905 923
906 codesize = (unsigned long) &_etext - (unsigned long) &_text; 924 codesize = (unsigned long) &_etext - (unsigned long) &_text;
@@ -917,10 +935,11 @@ void __init mem_init(void)
917 VSYSCALL_END - VSYSCALL_START); 935 VSYSCALL_END - VSYSCALL_START);
918 936
919 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 937 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
920 "%ldk reserved, %ldk data, %ldk init)\n", 938 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
921 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 939 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
922 max_pfn << (PAGE_SHIFT-10), 940 max_pfn << (PAGE_SHIFT-10),
923 codesize >> 10, 941 codesize >> 10,
942 absent_pages << (PAGE_SHIFT-10),
924 reservedpages << (PAGE_SHIFT-10), 943 reservedpages << (PAGE_SHIFT-10),
925 datasize >> 10, 944 datasize >> 10,
926 initsize >> 10); 945 initsize >> 10);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index ae71e11eb3e5..d4c4307ff3e0 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
387 unsigned long size) 387 unsigned long size)
388{ 388{
389 unsigned long flags; 389 unsigned long flags;
390 void *ret; 390 void __iomem *ret;
391 int err; 391 int err;
392 392
393 /* 393 /*
@@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
399 if (err < 0) 399 if (err < 0)
400 return NULL; 400 return NULL;
401 401
402 ret = (void *) __ioremap_caller(phys_addr, size, flags, 402 ret = __ioremap_caller(phys_addr, size, flags,
403 __builtin_return_address(0)); 403 __builtin_return_address(0));
404 404
405 free_memtype(phys_addr, phys_addr + size); 405 free_memtype(phys_addr, phys_addr + size);
406 return (void __iomem *)ret; 406 return ret;
407} 407}
408 408
409void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 409void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
@@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
622 __early_set_fixmap(idx, 0, __pgprot(0)); 622 __early_set_fixmap(idx, 0, __pgprot(0));
623} 623}
624 624
625static void *prev_map[FIX_BTMAPS_SLOTS] __initdata; 625static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
626static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 626static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
627static int __init check_early_ioremap_leak(void) 627static int __init check_early_ioremap_leak(void)
628{ 628{
@@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void)
645} 645}
646late_initcall(check_early_ioremap_leak); 646late_initcall(check_early_ioremap_leak);
647 647
648static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 648static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
649{ 649{
650 unsigned long offset, last_addr; 650 unsigned long offset, last_addr;
651 unsigned int nrpages; 651 unsigned int nrpages;
@@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size,
713 if (early_ioremap_debug) 713 if (early_ioremap_debug)
714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
715 715
716 prev_map[slot] = (void *) (offset + fix_to_virt(idx0)); 716 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
717 return prev_map[slot]; 717 return prev_map[slot];
718} 718}
719 719
720/* Remap an IO device */ 720/* Remap an IO device */
721void __init *early_ioremap(unsigned long phys_addr, unsigned long size) 721void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
722{ 722{
723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
724} 724}
725 725
726/* Remap memory */ 726/* Remap memory */
727void __init *early_memremap(unsigned long phys_addr, unsigned long size) 727void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
728{ 728{
729 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 729 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
730} 730}
731 731
732void __init early_iounmap(void *addr, unsigned long size) 732void __init early_iounmap(void __iomem *addr, unsigned long size)
733{ 733{
734 unsigned long virt_addr; 734 unsigned long virt_addr;
735 unsigned long offset; 735 unsigned long offset;
@@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size)
779 --idx; 779 --idx;
780 --nrpages; 780 --nrpages;
781 } 781 }
782 prev_map[slot] = 0; 782 prev_map[slot] = NULL;
783} 783}
784 784
785void __this_fixmap_does_not_exist(void) 785void __this_fixmap_does_not_exist(void)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 738fd0f24958..eb1bf000d12e 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
481 return 1; 481 return 1;
482} 482}
483#else 483#else
484/* This check is needed to avoid cache aliasing when PAT is enabled */
484static inline int range_is_allowed(unsigned long pfn, unsigned long size) 485static inline int range_is_allowed(unsigned long pfn, unsigned long size)
485{ 486{
486 u64 from = ((u64)pfn) << PAGE_SHIFT; 487 u64 from = ((u64)pfn) << PAGE_SHIFT;
487 u64 to = from + size; 488 u64 to = from + size;
488 u64 cursor = from; 489 u64 cursor = from;
489 490
491 if (!pat_enabled)
492 return 1;
493
490 while (cursor < to) { 494 while (cursor < to) {
491 if (!devmem_is_allowed(pfn)) { 495 if (!devmem_is_allowed(pfn)) {
492 printk(KERN_INFO 496 printk(KERN_INFO