aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-05-17 12:24:04 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-05-17 12:24:04 -0400
commitac1d426e825ab5778995f2f6f053ca2e6b45c622 (patch)
tree75b91356ca39463e0112931aa6790802fb1e07a2 /arch/arm/mm
parentfda0e18c8a7a3e02747c2b045b4fcd2c920410b9 (diff)
parenta3685f00652af83f12b63e3b4ef48f29581ba48b (diff)
Merge branch 'devel-stable' into devel
Conflicts: arch/arm/Kconfig arch/arm/include/asm/system.h arch/arm/mm/Kconfig
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig17
-rw-r--r--arch/arm/mm/abort-ev7.S21
-rw-r--r--arch/arm/mm/alignment.c24
-rw-r--r--arch/arm/mm/cache-l2x0.c10
-rw-r--r--arch/arm/mm/copypage-fa.c2
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/fault-armv.c1
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/init.c15
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/mm/pgd.c1
-rw-r--r--arch/arm/mm/proc-sa1100.S2
14 files changed, 186 insertions, 49 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 33027301639e..346ae14824a5 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -572,6 +572,8 @@ config CPU_TLB_V6
572config CPU_TLB_V7 572config CPU_TLB_V7
573 bool 573 bool
574 574
575config VERIFY_PERMISSION_FAULT
576 bool
575endif 577endif
576 578
577config CPU_HAS_ASID 579config CPU_HAS_ASID
@@ -736,6 +738,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
736config OUTER_CACHE 738config OUTER_CACHE
737 bool 739 bool
738 740
741config OUTER_CACHE_SYNC
742 bool
743 help
744 The outer cache has a outer_cache_fns.sync function pointer
745 that can be used to drain the write buffer of the outer cache.
746
739config CACHE_FEROCEON_L2 747config CACHE_FEROCEON_L2
740 bool "Enable the Feroceon L2 cache controller" 748 bool "Enable the Feroceon L2 cache controller"
741 depends on ARCH_KIRKWOOD || ARCH_MV78XX0 749 depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -758,12 +766,13 @@ config CACHE_L2X0
758 ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 766 ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
759 default y 767 default y
760 select OUTER_CACHE 768 select OUTER_CACHE
769 select OUTER_CACHE_SYNC
761 help 770 help
762 This option enables the L2x0 PrimeCell. 771 This option enables the L2x0 PrimeCell.
763 772
764config CACHE_TAUROS2 773config CACHE_TAUROS2
765 bool "Enable the Tauros2 L2 cache controller" 774 bool "Enable the Tauros2 L2 cache controller"
766 depends on ARCH_DOVE 775 depends on (ARCH_DOVE || ARCH_MMP)
767 default y 776 default y
768 select OUTER_CACHE 777 select OUTER_CACHE
769 help 778 help
@@ -801,3 +810,9 @@ config ARM_DMA_MEM_BUFFERABLE
801 behaviour. Therefore, we offer this as an option. 810 behaviour. Therefore, we offer this as an option.
802 811
803 You are recommended say 'Y' here and debug any affected drivers. 812 You are recommended say 'Y' here and debug any affected drivers.
813
814config ARCH_HAS_BARRIERS
815 bool
816 help
817 This option allows the use of custom mandatory barriers
818 included via the mach/barriers.h file.
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 2e6dc040c654..ec88b157d3bb 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -29,5 +29,26 @@ ENTRY(v7_early_abort)
29 * V6 code adjusts the returned DFSR. 29 * V6 code adjusts the returned DFSR.
30 * New designs should not need to patch up faults. 30 * New designs should not need to patch up faults.
31 */ 31 */
32
33#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
34 /*
35 * Detect erroneous permission failures and fix
36 */
37 ldr r3, =0x40d @ On permission fault
38 and r3, r1, r3
39 cmp r3, #0x0d
40 movne pc, lr
41
42 mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
43 isb
44 mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
45 and r3, r2, #0x7b @ On translation fault
46 cmp r3, #0x0b
47 movne pc, lr
48 bic r1, r1, #0xf @ Fix up FSR FS[5:0]
49 and r2, r2, #0x7e
50 orr r1, r1, r2, LSR #1
51#endif
52
32 mov pc, lr 53 mov pc, lr
33ENDPROC(v7_early_abort) 54ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 28b7c2776198..6f98c358989a 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -167,15 +167,15 @@ union offset_union {
167 THUMB( "1: "ins" %1, [%2]\n" ) \ 167 THUMB( "1: "ins" %1, [%2]\n" ) \
168 THUMB( " add %2, %2, #1\n" ) \ 168 THUMB( " add %2, %2, #1\n" ) \
169 "2:\n" \ 169 "2:\n" \
170 " .section .fixup,\"ax\"\n" \ 170 " .pushsection .fixup,\"ax\"\n" \
171 " .align 2\n" \ 171 " .align 2\n" \
172 "3: mov %0, #1\n" \ 172 "3: mov %0, #1\n" \
173 " b 2b\n" \ 173 " b 2b\n" \
174 " .previous\n" \ 174 " .popsection\n" \
175 " .section __ex_table,\"a\"\n" \ 175 " .pushsection __ex_table,\"a\"\n" \
176 " .align 3\n" \ 176 " .align 3\n" \
177 " .long 1b, 3b\n" \ 177 " .long 1b, 3b\n" \
178 " .previous\n" \ 178 " .popsection\n" \
179 : "=r" (err), "=&r" (val), "=r" (addr) \ 179 : "=r" (err), "=&r" (val), "=r" (addr) \
180 : "0" (err), "2" (addr)) 180 : "0" (err), "2" (addr))
181 181
@@ -227,16 +227,16 @@ union offset_union {
227 " mov %1, %1, "NEXT_BYTE"\n" \ 227 " mov %1, %1, "NEXT_BYTE"\n" \
228 "2: "ins" %1, [%2]\n" \ 228 "2: "ins" %1, [%2]\n" \
229 "3:\n" \ 229 "3:\n" \
230 " .section .fixup,\"ax\"\n" \ 230 " .pushsection .fixup,\"ax\"\n" \
231 " .align 2\n" \ 231 " .align 2\n" \
232 "4: mov %0, #1\n" \ 232 "4: mov %0, #1\n" \
233 " b 3b\n" \ 233 " b 3b\n" \
234 " .previous\n" \ 234 " .popsection\n" \
235 " .section __ex_table,\"a\"\n" \ 235 " .pushsection __ex_table,\"a\"\n" \
236 " .align 3\n" \ 236 " .align 3\n" \
237 " .long 1b, 4b\n" \ 237 " .long 1b, 4b\n" \
238 " .long 2b, 4b\n" \ 238 " .long 2b, 4b\n" \
239 " .previous\n" \ 239 " .popsection\n" \
240 : "=r" (err), "=&r" (v), "=&r" (a) \ 240 : "=r" (err), "=&r" (v), "=&r" (a) \
241 : "0" (err), "1" (v), "2" (a)); \ 241 : "0" (err), "1" (v), "2" (a)); \
242 if (err) \ 242 if (err) \
@@ -267,18 +267,18 @@ union offset_union {
267 " mov %1, %1, "NEXT_BYTE"\n" \ 267 " mov %1, %1, "NEXT_BYTE"\n" \
268 "4: "ins" %1, [%2]\n" \ 268 "4: "ins" %1, [%2]\n" \
269 "5:\n" \ 269 "5:\n" \
270 " .section .fixup,\"ax\"\n" \ 270 " .pushsection .fixup,\"ax\"\n" \
271 " .align 2\n" \ 271 " .align 2\n" \
272 "6: mov %0, #1\n" \ 272 "6: mov %0, #1\n" \
273 " b 5b\n" \ 273 " b 5b\n" \
274 " .previous\n" \ 274 " .popsection\n" \
275 " .section __ex_table,\"a\"\n" \ 275 " .pushsection __ex_table,\"a\"\n" \
276 " .align 3\n" \ 276 " .align 3\n" \
277 " .long 1b, 6b\n" \ 277 " .long 1b, 6b\n" \
278 " .long 2b, 6b\n" \ 278 " .long 2b, 6b\n" \
279 " .long 3b, 6b\n" \ 279 " .long 3b, 6b\n" \
280 " .long 4b, 6b\n" \ 280 " .long 4b, 6b\n" \
281 " .previous\n" \ 281 " .popsection\n" \
282 : "=r" (err), "=&r" (v), "=&r" (a) \ 282 : "=r" (err), "=&r" (v), "=&r" (a) \
283 : "0" (err), "1" (v), "2" (a)); \ 283 : "0" (err), "1" (v), "2" (a)); \
284 if (err) \ 284 if (err) \
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 78f0fc8595e2..9819869d2bc9 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -94,6 +94,15 @@ static inline void l2x0_flush_line(unsigned long addr)
94} 94}
95#endif 95#endif
96 96
97static void l2x0_cache_sync(void)
98{
99 unsigned long flags;
100
101 spin_lock_irqsave(&l2x0_lock, flags);
102 cache_sync();
103 spin_unlock_irqrestore(&l2x0_lock, flags);
104}
105
97static inline void l2x0_inv_all(void) 106static inline void l2x0_inv_all(void)
98{ 107{
99 unsigned long flags; 108 unsigned long flags;
@@ -252,6 +261,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
252 outer_cache.inv_range = l2x0_inv_range; 261 outer_cache.inv_range = l2x0_inv_range;
253 outer_cache.clean_range = l2x0_clean_range; 262 outer_cache.clean_range = l2x0_clean_range;
254 outer_cache.flush_range = l2x0_flush_range; 263 outer_cache.flush_range = l2x0_flush_range;
264 outer_cache.sync = l2x0_cache_sync;
255 265
256 printk(KERN_INFO "%s cache controller enabled\n", type); 266 printk(KERN_INFO "%s cache controller enabled\n", type);
257 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 267 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index b2a6008b0111..d2852e1635b1 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom)
40} 40}
41 41
42void fa_copy_user_highpage(struct page *to, struct page *from, 42void fa_copy_user_highpage(struct page *to, struct page *from,
43 unsigned long vaddr) 43 unsigned long vaddr, struct vm_area_struct *vma)
44{ 44{
45 void *kto, *kfrom; 45 void *kto, *kfrom;
46 46
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0da7eccf7749..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -11,7 +11,7 @@
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/slab.h> 14#include <linux/gfp.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/init.h> 17#include <linux/init.h>
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 464 vaddr += offset;
465 op(vaddr, len, dir); 465 op(vaddr, len, dir);
466 kunmap_high(page); 466 kunmap_high(page);
467 } else if (cache_is_vipt()) {
468 pte_t saved_pte;
469 vaddr = kmap_high_l1_vipt(page, &saved_pte);
470 op(vaddr + offset, len, dir);
471 kunmap_high_l1_vipt(page, saved_pte);
467 } 472 }
468 } else { 473 } else {
469 vaddr = page_address(page) + offset; 474 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 82df01a72f4a..9b906dec1ca1 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -16,6 +16,7 @@
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/gfp.h>
19 20
20#include <asm/bugs.h> 21#include <asm/bugs.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 125 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 126 return pte_page(*pte);
126} 127}
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 105d1d4f420b..1ba6cf5a2c02 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -16,6 +16,7 @@
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/gfp.h>
19 20
20#include <asm/mach-types.h> 21#include <asm/mach-types.h>
21#include <asm/sections.h> 22#include <asm/sections.h>
@@ -84,9 +85,6 @@ void show_mem(void)
84 printk("Mem-info:\n"); 85 printk("Mem-info:\n");
85 show_free_areas(); 86 show_free_areas();
86 for_each_online_node(node) { 87 for_each_online_node(node) {
87 pg_data_t *n = NODE_DATA(node);
88 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
89
90 for_each_nodebank (i,mi,node) { 88 for_each_nodebank (i,mi,node) {
91 struct membank *bank = &mi->bank[i]; 89 struct membank *bank = &mi->bank[i];
92 unsigned int pfn1, pfn2; 90 unsigned int pfn1, pfn2;
@@ -95,8 +93,8 @@ void show_mem(void)
95 pfn1 = bank_pfn_start(bank); 93 pfn1 = bank_pfn_start(bank);
96 pfn2 = bank_pfn_end(bank); 94 pfn2 = bank_pfn_end(bank);
97 95
98 page = map + pfn1; 96 page = pfn_to_page(pfn1);
99 end = map + pfn2; 97 end = pfn_to_page(pfn2 - 1) + 1;
100 98
101 do { 99 do {
102 total++; 100 total++;
@@ -568,9 +566,6 @@ void __init mem_init(void)
568 reserved_pages = free_pages = 0; 566 reserved_pages = free_pages = 0;
569 567
570 for_each_online_node(node) { 568 for_each_online_node(node) {
571 pg_data_t *n = NODE_DATA(node);
572 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
573
574 for_each_nodebank(i, &meminfo, node) { 569 for_each_nodebank(i, &meminfo, node) {
575 struct membank *bank = &meminfo.bank[i]; 570 struct membank *bank = &meminfo.bank[i];
576 unsigned int pfn1, pfn2; 571 unsigned int pfn1, pfn2;
@@ -579,8 +574,8 @@ void __init mem_init(void)
579 pfn1 = bank_pfn_start(bank); 574 pfn1 = bank_pfn_start(bank);
580 pfn2 = bank_pfn_end(bank); 575 pfn2 = bank_pfn_end(bank);
581 576
582 page = map + pfn1; 577 page = pfn_to_page(pfn1);
583 end = map + pfn2; 578 end = pfn_to_page(pfn2 - 1) + 1;
584 579
585 do { 580 do {
586 if (PageReserved(page)) 581 if (PageReserved(page))
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 69852003675f..e7113d0b8168 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -421,6 +421,10 @@ static void __init build_mem_type_table(void)
421 user_pgprot |= L_PTE_SHARED; 421 user_pgprot |= L_PTE_SHARED;
422 kern_pgprot |= L_PTE_SHARED; 422 kern_pgprot |= L_PTE_SHARED;
423 vecs_pgprot |= L_PTE_SHARED; 423 vecs_pgprot |= L_PTE_SHARED;
424 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
425 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
426 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
427 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
424 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 428 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
425 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 429 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
426#endif 430#endif
@@ -1087,10 +1091,12 @@ void setup_mm_for_reboot(char mode)
1087 pgd_t *pgd; 1091 pgd_t *pgd;
1088 int i; 1092 int i;
1089 1093
1090 if (current->mm && current->mm->pgd) 1094 /*
1091 pgd = current->mm->pgd; 1095 * We need to access to user-mode page tables here. For kernel threads
1092 else 1096 * we don't have any user-mode mappings so we use the context that we
1093 pgd = init_mm.pgd; 1097 * "borrowed".
1098 */
1099 pgd = current->active_mm->pgd;
1094 1100
1095 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1101 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1096 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1102 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 2690146161ba..be5f58e153bf 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/gfp.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
12 13
13#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index ee7700242c19..5c47760c2064 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init)
45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
46 mov pc, lr 46 mov pc, lr
47 47
48 .previous 48 .section .text
49 49
50/* 50/*
51 * cpu_sa1100_proc_fin() 51 * cpu_sa1100_proc_fin()