aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-30 21:33:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-30 21:33:46 -0400
commit74c75f524ec5a48a00a8f01864a754c1d0e4a44b (patch)
treea86b28f0c13a0b91048a1c21d0c77222bf7d4b7e
parent0b23e30b48b0b634fdc8c8198ea9dfec8c091968 (diff)
parent1c4acdb467f8a6704855a5670ff3d82e3c18eb0b (diff)
Merge branch 'x86-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: cpu_index build fix x86/voyager: fix missing cpu_index initialisation x86/voyager: fix compile breakage caused by dc1e35c6e95e8923cf1d3510438b63c600fee1e2 x86: fix /dev/mem mmap breakage when PAT is disabled x86/voyager: fix compile breakage casued by x86: move prefill_possible_map calling early x86: use CONFIG_X86_SMP instead of CONFIG_SMP x86/voyager: fix boot breakage caused by x86: boot secondary cpus through initial_code x86, uv: fix compile error in uv_hub.h i386/PAE: fix pud_page() x86: remove debug code from arch_add_memory() x86: start annotating early ioremap pointers with __iomem x86: two trivial sparse annotations x86: fix init_memory_mapping for [dc000000 - e0000000) - v2
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/pgtable-3level.h4
-rw-r--r--arch/x86/include/asm/smp.h6
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h1
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vsmp_64.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/init_64.c52
-rw-r--r--arch/x86/mm/ioremap.c22
-rw-r--r--arch/x86/mm/pat.c4
14 files changed, 85 insertions, 40 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d11d7b513191..6f20718d3156 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -231,6 +231,10 @@ config SMP
231 231
232 If you don't know what to do here, say N. 232 If you don't know what to do here, say N.
233 233
234config X86_HAS_BOOT_CPU_ID
235 def_bool y
236 depends on X86_VOYAGER
237
234config X86_FIND_SMP_CONFIG 238config X86_FIND_SMP_CONFIG
235 def_bool y 239 def_bool y
236 depends on X86_MPPARSE || X86_VOYAGER 240 depends on X86_MPPARSE || X86_VOYAGER
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 5618a103f395..ac2abc88cd95 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -82,9 +82,9 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
82extern void early_ioremap_init(void); 82extern void early_ioremap_init(void);
83extern void early_ioremap_clear(void); 83extern void early_ioremap_clear(void);
84extern void early_ioremap_reset(void); 84extern void early_ioremap_reset(void);
85extern void *early_ioremap(unsigned long offset, unsigned long size); 85extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
86extern void *early_memremap(unsigned long offset, unsigned long size); 86extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
87extern void early_iounmap(void *addr, unsigned long size); 87extern void early_iounmap(void __iomem *addr, unsigned long size);
88extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 88extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
89 89
90 90
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index fb16cec702e4..52597aeadfff 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -120,13 +120,13 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 120 write_cr3(pgd);
121} 121}
122 122
123#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK)) 123#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
124 124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK)) 125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126 126
127 127
128/* Find an entry in the second-level page table.. */ 128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ 129#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
130 pmd_index(address)) 130 pmd_index(address))
131 131
132#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 2766021aef80..d12811ce51d9 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -225,5 +225,11 @@ static inline int hard_smp_processor_id(void)
225 225
226#endif /* CONFIG_X86_LOCAL_APIC */ 226#endif /* CONFIG_X86_LOCAL_APIC */
227 227
228#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
229extern unsigned char boot_cpu_id;
230#else
231#define boot_cpu_id 0
232#endif
233
228#endif /* __ASSEMBLY__ */ 234#endif /* __ASSEMBLY__ */
229#endif /* _ASM_X86_SMP_H */ 235#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index c6ad93e315c8..7a5782610b2b 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/timer.h>
16#include <asm/types.h> 17#include <asm/types.h>
17#include <asm/percpu.h> 18#include <asm/percpu.h>
18 19
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 0d9c993aa93e..ef8f831af823 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
69 */ 69 */
70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 70void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
71{ 71{
72#ifdef CONFIG_SMP 72#ifdef CONFIG_X86_SMP
73 unsigned int eax, ebx, ecx, edx, sub_index; 73 unsigned int eax, ebx, ecx, edx, sub_index;
74 unsigned int ht_mask_width, core_plus_mask_width; 74 unsigned int ht_mask_width, core_plus_mask_width;
75 unsigned int core_select_mask, core_level_siblings; 75 unsigned int core_select_mask, core_level_siblings;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 25581dcb280e..003a65395bd5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -549,6 +549,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
549 this_cpu->c_early_init(c); 549 this_cpu->c_early_init(c);
550 550
551 validate_pat_support(c); 551 validate_pat_support(c);
552
553#ifdef CONFIG_SMP
554 c->cpu_index = boot_cpu_id;
555#endif
552} 556}
553 557
554void __init early_cpu_init(void) 558void __init early_cpu_init(void)
@@ -1134,7 +1138,7 @@ void __cpuinit cpu_init(void)
1134 /* 1138 /*
1135 * Boot processor to setup the FP and extended state context info. 1139 * Boot processor to setup the FP and extended state context info.
1136 */ 1140 */
1137 if (!smp_processor_id()) 1141 if (smp_processor_id() == boot_cpu_id)
1138 init_thread_xstate(); 1142 init_thread_xstate();
1139 1143
1140 xsave_init(); 1144 xsave_init();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 161bb850fc47..62348e4fd8d1 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -759,7 +759,7 @@ __cpuinit int unsynchronized_tsc(void)
759 if (!cpu_has_tsc || tsc_unstable) 759 if (!cpu_has_tsc || tsc_unstable)
760 return 1; 760 return 1;
761 761
762#ifdef CONFIG_SMP 762#ifdef CONFIG_X86_SMP
763 if (apic_is_clustered_box()) 763 if (apic_is_clustered_box())
764 return 1; 764 return 1;
765#endif 765#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 7766d36983fc..a688f3bfaec2 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -78,7 +78,7 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
78 78
79static void __init set_vsmp_pv_ops(void) 79static void __init set_vsmp_pv_ops(void)
80{ 80{
81 void *address; 81 void __iomem *address;
82 unsigned int cap, ctl, cfg; 82 unsigned int cap, ctl, cfg;
83 83
84 /* set vSMP magic bits to indicate vSMP capable kernel */ 84 /* set vSMP magic bits to indicate vSMP capable kernel */
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 0f6e8a6523ae..7f4c6af14351 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -90,6 +90,7 @@ static void ack_vic_irq(unsigned int irq);
90static void vic_enable_cpi(void); 90static void vic_enable_cpi(void);
91static void do_boot_cpu(__u8 cpuid); 91static void do_boot_cpu(__u8 cpuid);
92static void do_quad_bootstrap(void); 92static void do_quad_bootstrap(void);
93static void initialize_secondary(void);
93 94
94int hard_smp_processor_id(void); 95int hard_smp_processor_id(void);
95int safe_smp_processor_id(void); 96int safe_smp_processor_id(void);
@@ -344,6 +345,12 @@ static void do_quad_bootstrap(void)
344 } 345 }
345} 346}
346 347
348void prefill_possible_map(void)
349{
350 /* This is empty on voyager because we need a much
351 * earlier detection which is done in find_smp_config */
352}
353
347/* Set up all the basic stuff: read the SMP config and make all the 354/* Set up all the basic stuff: read the SMP config and make all the
348 * SMP information reflect only the boot cpu. All others will be 355 * SMP information reflect only the boot cpu. All others will be
349 * brought on-line later. */ 356 * brought on-line later. */
@@ -413,6 +420,7 @@ void __init smp_store_cpu_info(int id)
413 struct cpuinfo_x86 *c = &cpu_data(id); 420 struct cpuinfo_x86 *c = &cpu_data(id);
414 421
415 *c = boot_cpu_data; 422 *c = boot_cpu_data;
423 c->cpu_index = id;
416 424
417 identify_secondary_cpu(c); 425 identify_secondary_cpu(c);
418} 426}
@@ -650,6 +658,8 @@ void __init smp_boot_cpus(void)
650 smp_tune_scheduling(); 658 smp_tune_scheduling();
651 */ 659 */
652 smp_store_cpu_info(boot_cpu_id); 660 smp_store_cpu_info(boot_cpu_id);
661 /* setup the jump vector */
662 initial_code = (unsigned long)initialize_secondary;
653 printk("CPU%d: ", boot_cpu_id); 663 printk("CPU%d: ", boot_cpu_id);
654 print_cpu_info(&cpu_data(boot_cpu_id)); 664 print_cpu_info(&cpu_data(boot_cpu_id));
655 665
@@ -702,7 +712,7 @@ void __init smp_boot_cpus(void)
702 712
703/* Reload the secondary CPUs task structure (this function does not 713/* Reload the secondary CPUs task structure (this function does not
704 * return ) */ 714 * return ) */
705void __init initialize_secondary(void) 715static void __init initialize_secondary(void)
706{ 716{
707#if 0 717#if 0
708 // AC kernels only 718 // AC kernels only
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 4ba373c5b8c8..be54176e9eb2 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
233 len = (unsigned long) nr_pages << PAGE_SHIFT; 233 len = (unsigned long) nr_pages << PAGE_SHIFT;
234 end = start + len; 234 end = start + len;
235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 235 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
236 start, len))) 236 (void __user *)start, len)))
237 goto slow_irqon; 237 goto slow_irqon;
238 238
239 /* 239 /*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ad38648bddbd..9db01db6e3cd 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -671,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
671 unsigned long last_map_addr = 0; 671 unsigned long last_map_addr = 0;
672 unsigned long page_size_mask = 0; 672 unsigned long page_size_mask = 0;
673 unsigned long start_pfn, end_pfn; 673 unsigned long start_pfn, end_pfn;
674 unsigned long pos;
674 675
675 struct map_range mr[NR_RANGE_MR]; 676 struct map_range mr[NR_RANGE_MR];
676 int nr_range, i; 677 int nr_range, i;
677 int use_pse, use_gbpages; 678 int use_pse, use_gbpages;
678 679
679 printk(KERN_INFO "init_memory_mapping\n"); 680 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
680 681
681 /* 682 /*
682 * Find space for the kernel direct mapping tables. 683 * Find space for the kernel direct mapping tables.
@@ -710,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
710 711
711 /* head if not big page alignment ?*/ 712 /* head if not big page alignment ?*/
712 start_pfn = start >> PAGE_SHIFT; 713 start_pfn = start >> PAGE_SHIFT;
713 end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) 714 pos = start_pfn << PAGE_SHIFT;
715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
714 << (PMD_SHIFT - PAGE_SHIFT); 716 << (PMD_SHIFT - PAGE_SHIFT);
715 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 717 if (start_pfn < end_pfn) {
718 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
719 pos = end_pfn << PAGE_SHIFT;
720 }
716 721
717 /* big page (2M) range*/ 722 /* big page (2M) range*/
718 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) 723 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
719 << (PMD_SHIFT - PAGE_SHIFT); 724 << (PMD_SHIFT - PAGE_SHIFT);
720 end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT) 725 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
721 << (PUD_SHIFT - PAGE_SHIFT); 726 << (PUD_SHIFT - PAGE_SHIFT);
722 if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) 727 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
723 end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); 728 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
724 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 729 if (start_pfn < end_pfn) {
725 page_size_mask & (1<<PG_LEVEL_2M)); 730 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
731 page_size_mask & (1<<PG_LEVEL_2M));
732 pos = end_pfn << PAGE_SHIFT;
733 }
726 734
727 /* big page (1G) range */ 735 /* big page (1G) range */
728 start_pfn = end_pfn; 736 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
729 end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); 737 << (PUD_SHIFT - PAGE_SHIFT);
730 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 738 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
739 if (start_pfn < end_pfn) {
740 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
731 page_size_mask & 741 page_size_mask &
732 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); 742 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
743 pos = end_pfn << PAGE_SHIFT;
744 }
733 745
734 /* tail is not big page (1G) alignment */ 746 /* tail is not big page (1G) alignment */
735 start_pfn = end_pfn; 747 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
736 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 748 << (PMD_SHIFT - PAGE_SHIFT);
737 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 749 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
738 page_size_mask & (1<<PG_LEVEL_2M)); 750 if (start_pfn < end_pfn) {
751 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
752 page_size_mask & (1<<PG_LEVEL_2M));
753 pos = end_pfn << PAGE_SHIFT;
754 }
739 755
740 /* tail is not big page (2M) alignment */ 756 /* tail is not big page (2M) alignment */
741 start_pfn = end_pfn; 757 start_pfn = pos>>PAGE_SHIFT;
742 end_pfn = end>>PAGE_SHIFT; 758 end_pfn = end>>PAGE_SHIFT;
743 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 759 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
744 760
@@ -842,7 +858,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
842 max_pfn_mapped = last_mapped_pfn; 858 max_pfn_mapped = last_mapped_pfn;
843 859
844 ret = __add_pages(zone, start_pfn, nr_pages); 860 ret = __add_pages(zone, start_pfn, nr_pages);
845 WARN_ON(1); 861 WARN_ON_ONCE(ret);
846 862
847 return ret; 863 return ret;
848} 864}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index ae71e11eb3e5..d4c4307ff3e0 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
387 unsigned long size) 387 unsigned long size)
388{ 388{
389 unsigned long flags; 389 unsigned long flags;
390 void *ret; 390 void __iomem *ret;
391 int err; 391 int err;
392 392
393 /* 393 /*
@@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
399 if (err < 0) 399 if (err < 0)
400 return NULL; 400 return NULL;
401 401
402 ret = (void *) __ioremap_caller(phys_addr, size, flags, 402 ret = __ioremap_caller(phys_addr, size, flags,
403 __builtin_return_address(0)); 403 __builtin_return_address(0));
404 404
405 free_memtype(phys_addr, phys_addr + size); 405 free_memtype(phys_addr, phys_addr + size);
406 return (void __iomem *)ret; 406 return ret;
407} 407}
408 408
409void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 409void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
@@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
622 __early_set_fixmap(idx, 0, __pgprot(0)); 622 __early_set_fixmap(idx, 0, __pgprot(0));
623} 623}
624 624
625static void *prev_map[FIX_BTMAPS_SLOTS] __initdata; 625static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
626static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 626static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
627static int __init check_early_ioremap_leak(void) 627static int __init check_early_ioremap_leak(void)
628{ 628{
@@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void)
645} 645}
646late_initcall(check_early_ioremap_leak); 646late_initcall(check_early_ioremap_leak);
647 647
648static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 648static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
649{ 649{
650 unsigned long offset, last_addr; 650 unsigned long offset, last_addr;
651 unsigned int nrpages; 651 unsigned int nrpages;
@@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size,
713 if (early_ioremap_debug) 713 if (early_ioremap_debug)
714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 714 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
715 715
716 prev_map[slot] = (void *) (offset + fix_to_virt(idx0)); 716 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
717 return prev_map[slot]; 717 return prev_map[slot];
718} 718}
719 719
720/* Remap an IO device */ 720/* Remap an IO device */
721void __init *early_ioremap(unsigned long phys_addr, unsigned long size) 721void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
722{ 722{
723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); 723 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
724} 724}
725 725
726/* Remap memory */ 726/* Remap memory */
727void __init *early_memremap(unsigned long phys_addr, unsigned long size) 727void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
728{ 728{
729 return __early_ioremap(phys_addr, size, PAGE_KERNEL); 729 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
730} 730}
731 731
732void __init early_iounmap(void *addr, unsigned long size) 732void __init early_iounmap(void __iomem *addr, unsigned long size)
733{ 733{
734 unsigned long virt_addr; 734 unsigned long virt_addr;
735 unsigned long offset; 735 unsigned long offset;
@@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size)
779 --idx; 779 --idx;
780 --nrpages; 780 --nrpages;
781 } 781 }
782 prev_map[slot] = 0; 782 prev_map[slot] = NULL;
783} 783}
784 784
785void __this_fixmap_does_not_exist(void) 785void __this_fixmap_does_not_exist(void)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 738fd0f24958..eb1bf000d12e 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
481 return 1; 481 return 1;
482} 482}
483#else 483#else
484/* This check is needed to avoid cache aliasing when PAT is enabled */
484static inline int range_is_allowed(unsigned long pfn, unsigned long size) 485static inline int range_is_allowed(unsigned long pfn, unsigned long size)
485{ 486{
486 u64 from = ((u64)pfn) << PAGE_SHIFT; 487 u64 from = ((u64)pfn) << PAGE_SHIFT;
487 u64 to = from + size; 488 u64 to = from + size;
488 u64 cursor = from; 489 u64 cursor = from;
489 490
491 if (!pat_enabled)
492 return 1;
493
490 while (cursor < to) { 494 while (cursor < to) {
491 if (!devmem_is_allowed(pfn)) { 495 if (!devmem_is_allowed(pfn)) {
492 printk(KERN_INFO 496 printk(KERN_INFO