aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-19 05:27:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-19 05:28:41 -0400
commit929bf0d0156562ce631728b6fa53d68004d456d2 (patch)
tree739063990a8077b29ef97e69d73bce94573daae4 /arch/arm/mm
parentdef0a9b2573e00ab0b486cb5382625203ab4c4a6 (diff)
parent202c4675c55ddf6b443c7e057d2dff6b42ef71aa (diff)
Merge branch 'linus' into perfcounters/core
Merge reason: Bring in tracing changes we depend on. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-v7.S16
-rw-r--r--arch/arm/mm/dma-mapping.c94
-rw-r--r--arch/arm/mm/fault.c24
-rw-r--r--arch/arm/mm/flush.c9
-rw-r--r--arch/arm/mm/highmem.c8
-rw-r--r--arch/arm/mm/init.c32
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/proc-macros.S8
-rw-r--r--arch/arm/mm/proc-v7.S7
-rw-r--r--arch/arm/mm/proc-xscale.S2
12 files changed, 180 insertions, 43 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 83c025e72ceb..5fe595aeba69 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -758,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
758config CACHE_L2X0 758config CACHE_L2X0
759 bool "Enable the L2x0 outer cache controller" 759 bool "Enable the L2x0 outer cache controller"
760 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ 760 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
761 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX 761 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
762 default y 762 default y
763 select OUTER_CACHE 763 select OUTER_CACHE
764 help 764 help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 03cd27d917b9..b270d6228fe2 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -159,7 +159,9 @@ union offset_union {
159 159
160#define __get8_unaligned_check(ins,val,addr,err) \ 160#define __get8_unaligned_check(ins,val,addr,err) \
161 __asm__( \ 161 __asm__( \
162 "1: "ins" %1, [%2], #1\n" \ 162 ARM( "1: "ins" %1, [%2], #1\n" ) \
163 THUMB( "1: "ins" %1, [%2]\n" ) \
164 THUMB( " add %2, %2, #1\n" ) \
163 "2:\n" \ 165 "2:\n" \
164 " .section .fixup,\"ax\"\n" \ 166 " .section .fixup,\"ax\"\n" \
165 " .align 2\n" \ 167 " .align 2\n" \
@@ -215,7 +217,9 @@ union offset_union {
215 do { \ 217 do { \
216 unsigned int err = 0, v = val, a = addr; \ 218 unsigned int err = 0, v = val, a = addr; \
217 __asm__( FIRST_BYTE_16 \ 219 __asm__( FIRST_BYTE_16 \
218 "1: "ins" %1, [%2], #1\n" \ 220 ARM( "1: "ins" %1, [%2], #1\n" ) \
221 THUMB( "1: "ins" %1, [%2]\n" ) \
222 THUMB( " add %2, %2, #1\n" ) \
219 " mov %1, %1, "NEXT_BYTE"\n" \ 223 " mov %1, %1, "NEXT_BYTE"\n" \
220 "2: "ins" %1, [%2]\n" \ 224 "2: "ins" %1, [%2]\n" \
221 "3:\n" \ 225 "3:\n" \
@@ -245,11 +249,17 @@ union offset_union {
245 do { \ 249 do { \
246 unsigned int err = 0, v = val, a = addr; \ 250 unsigned int err = 0, v = val, a = addr; \
247 __asm__( FIRST_BYTE_32 \ 251 __asm__( FIRST_BYTE_32 \
248 "1: "ins" %1, [%2], #1\n" \ 252 ARM( "1: "ins" %1, [%2], #1\n" ) \
253 THUMB( "1: "ins" %1, [%2]\n" ) \
254 THUMB( " add %2, %2, #1\n" ) \
249 " mov %1, %1, "NEXT_BYTE"\n" \ 255 " mov %1, %1, "NEXT_BYTE"\n" \
250 "2: "ins" %1, [%2], #1\n" \ 256 ARM( "2: "ins" %1, [%2], #1\n" ) \
257 THUMB( "2: "ins" %1, [%2]\n" ) \
258 THUMB( " add %2, %2, #1\n" ) \
251 " mov %1, %1, "NEXT_BYTE"\n" \ 259 " mov %1, %1, "NEXT_BYTE"\n" \
252 "3: "ins" %1, [%2], #1\n" \ 260 ARM( "3: "ins" %1, [%2], #1\n" ) \
261 THUMB( "3: "ins" %1, [%2]\n" ) \
262 THUMB( " add %2, %2, #1\n" ) \
253 " mov %1, %1, "NEXT_BYTE"\n" \ 263 " mov %1, %1, "NEXT_BYTE"\n" \
254 "4: "ins" %1, [%2]\n" \ 264 "4: "ins" %1, [%2]\n" \
255 "5:\n" \ 265 "5:\n" \
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index be93ff02a98d..bda0ec31a4e2 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -21,7 +21,7 @@
21 * 21 *
22 * Flush the whole D-cache. 22 * Flush the whole D-cache.
23 * 23 *
24 * Corrupted registers: r0-r5, r7, r9-r11 24 * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
25 * 25 *
26 * - mm - mm_struct describing address space 26 * - mm - mm_struct describing address space
27 */ 27 */
@@ -51,8 +51,12 @@ loop1:
51loop2: 51loop2:
52 mov r9, r4 @ create working copy of max way size 52 mov r9, r4 @ create working copy of max way size
53loop3: 53loop3:
54 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 54 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
55 orr r11, r11, r7, lsl r2 @ factor index number into r11 55 THUMB( lsl r6, r9, r5 )
56 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
57 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
58 THUMB( lsl r6, r7, r2 )
59 THUMB( orr r11, r11, r6 ) @ factor index number into r11
56 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 60 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
57 subs r9, r9, #1 @ decrement the way 61 subs r9, r9, #1 @ decrement the way
58 bge loop3 62 bge loop3
@@ -82,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
82 * 86 *
83 */ 87 */
84ENTRY(v7_flush_kern_cache_all) 88ENTRY(v7_flush_kern_cache_all)
85 stmfd sp!, {r4-r5, r7, r9-r11, lr} 89 ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
90 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
86 bl v7_flush_dcache_all 91 bl v7_flush_dcache_all
87 mov r0, #0 92 mov r0, #0
88 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 93 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
89 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 94 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
95 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
90 mov pc, lr 96 mov pc, lr
91ENDPROC(v7_flush_kern_cache_all) 97ENDPROC(v7_flush_kern_cache_all)
92 98
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 510c179b0ac8..b30925fcbcdc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -36,7 +36,34 @@
36#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 36#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
38 38
39static u64 get_coherent_dma_mask(struct device *dev)
40{
41 u64 mask = ISA_DMA_THRESHOLD;
42
43 if (dev) {
44 mask = dev->coherent_dma_mask;
45
46 /*
47 * Sanity check the DMA mask - it must be non-zero, and
48 * must be able to be satisfied by a DMA allocation.
49 */
50 if (mask == 0) {
51 dev_warn(dev, "coherent DMA mask is unset\n");
52 return 0;
53 }
54
55 if ((~mask) & ISA_DMA_THRESHOLD) {
56 dev_warn(dev, "coherent DMA mask %#llx is smaller "
57 "than system GFP_DMA mask %#llx\n",
58 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59 return 0;
60 }
61 }
39 62
63 return mask;
64}
65
66#ifdef CONFIG_MMU
40/* 67/*
41 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
42 */ 69 */
@@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
152 struct page *page; 179 struct page *page;
153 struct arm_vm_region *c; 180 struct arm_vm_region *c;
154 unsigned long order; 181 unsigned long order;
155 u64 mask = ISA_DMA_THRESHOLD, limit; 182 u64 mask = get_coherent_dma_mask(dev);
183 u64 limit;
156 184
157 if (!consistent_pte[0]) { 185 if (!consistent_pte[0]) {
158 printk(KERN_ERR "%s: not initialised\n", __func__); 186 printk(KERN_ERR "%s: not initialised\n", __func__);
@@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
160 return NULL; 188 return NULL;
161 } 189 }
162 190
163 if (dev) { 191 if (!mask)
164 mask = dev->coherent_dma_mask; 192 goto no_page;
165
166 /*
167 * Sanity check the DMA mask - it must be non-zero, and
168 * must be able to be satisfied by a DMA allocation.
169 */
170 if (mask == 0) {
171 dev_warn(dev, "coherent DMA mask is unset\n");
172 goto no_page;
173 }
174
175 if ((~mask) & ISA_DMA_THRESHOLD) {
176 dev_warn(dev, "coherent DMA mask %#llx is smaller "
177 "than system GFP_DMA mask %#llx\n",
178 mask, (unsigned long long)ISA_DMA_THRESHOLD);
179 goto no_page;
180 }
181 }
182 193
183 /* 194 /*
184 * Sanity check the allocation size. 195 * Sanity check the allocation size.
@@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
267 *handle = ~0; 278 *handle = ~0;
268 return NULL; 279 return NULL;
269} 280}
281#else /* !CONFIG_MMU */
282static void *
283__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
284 pgprot_t prot)
285{
286 void *virt;
287 u64 mask = get_coherent_dma_mask(dev);
288
289 if (!mask)
290 goto error;
291
292 if (mask != 0xffffffff)
293 gfp |= GFP_DMA;
294 virt = kmalloc(size, gfp);
295 if (!virt)
296 goto error;
297
298 *handle = virt_to_dma(dev, virt);
299 return virt;
300
301error:
302 *handle = ~0;
303 return NULL;
304}
305#endif /* CONFIG_MMU */
270 306
271/* 307/*
272 * Allocate DMA-coherent memory space and return both the kernel remapped 308 * Allocate DMA-coherent memory space and return both the kernel remapped
@@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine);
311static int dma_mmap(struct device *dev, struct vm_area_struct *vma, 347static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
312 void *cpu_addr, dma_addr_t dma_addr, size_t size) 348 void *cpu_addr, dma_addr_t dma_addr, size_t size)
313{ 349{
350 int ret = -ENXIO;
351#ifdef CONFIG_MMU
314 unsigned long flags, user_size, kern_size; 352 unsigned long flags, user_size, kern_size;
315 struct arm_vm_region *c; 353 struct arm_vm_region *c;
316 int ret = -ENXIO;
317 354
318 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 355 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
319 356
@@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
334 vma->vm_page_prot); 371 vma->vm_page_prot);
335 } 372 }
336 } 373 }
374#endif /* CONFIG_MMU */
337 375
338 return ret; 376 return ret;
339} 377}
@@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
358 * free a page as defined by the above mapping. 396 * free a page as defined by the above mapping.
359 * Must not be called with IRQs disabled. 397 * Must not be called with IRQs disabled.
360 */ 398 */
399#ifdef CONFIG_MMU
361void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 400void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
362{ 401{
363 struct arm_vm_region *c; 402 struct arm_vm_region *c;
@@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
444 __func__, cpu_addr); 483 __func__, cpu_addr);
445 dump_stack(); 484 dump_stack();
446} 485}
486#else /* !CONFIG_MMU */
487void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
488{
489 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
490 return;
491 kfree(cpu_addr);
492}
493#endif /* CONFIG_MMU */
447EXPORT_SYMBOL(dma_free_coherent); 494EXPORT_SYMBOL(dma_free_coherent);
448 495
449/* 496/*
@@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent);
451 */ 498 */
452static int __init consistent_init(void) 499static int __init consistent_init(void)
453{ 500{
501 int ret = 0;
502#ifdef CONFIG_MMU
454 pgd_t *pgd; 503 pgd_t *pgd;
455 pmd_t *pmd; 504 pmd_t *pmd;
456 pte_t *pte; 505 pte_t *pte;
457 int ret = 0, i = 0; 506 int i = 0;
458 u32 base = CONSISTENT_BASE; 507 u32 base = CONSISTENT_BASE;
459 508
460 do { 509 do {
@@ -477,6 +526,7 @@ static int __init consistent_init(void)
477 consistent_pte[i++] = pte; 526 consistent_pte[i++] = pte;
478 base += (1 << PGDIR_SHIFT); 527 base += (1 << PGDIR_SHIFT);
479 } while (base < CONSISTENT_END); 528 } while (base < CONSISTENT_END);
529#endif /* !CONFIG_MMU */
480 530
481 return ret; 531 return ret;
482} 532}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 6fdcbb709827..cc8829d7e116 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -16,6 +16,8 @@
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/page-flags.h> 18#include <linux/page-flags.h>
19#include <linux/sched.h>
20#include <linux/highmem.h>
19 21
20#include <asm/system.h> 22#include <asm/system.h>
21#include <asm/pgtable.h> 23#include <asm/pgtable.h>
@@ -23,6 +25,7 @@
23 25
24#include "fault.h" 26#include "fault.h"
25 27
28#ifdef CONFIG_MMU
26 29
27#ifdef CONFIG_KPROBES 30#ifdef CONFIG_KPROBES
28static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) 31static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
@@ -97,6 +100,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
97 100
98 printk("\n"); 101 printk("\n");
99} 102}
103#else /* CONFIG_MMU */
104void show_pte(struct mm_struct *mm, unsigned long addr)
105{ }
106#endif /* CONFIG_MMU */
100 107
101/* 108/*
102 * Oops. The kernel tried to access some page that wasn't present. 109 * Oops. The kernel tried to access some page that wasn't present.
@@ -171,6 +178,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
171 __do_kernel_fault(mm, addr, fsr, regs); 178 __do_kernel_fault(mm, addr, fsr, regs);
172} 179}
173 180
181#ifdef CONFIG_MMU
174#define VM_FAULT_BADMAP 0x010000 182#define VM_FAULT_BADMAP 0x010000
175#define VM_FAULT_BADACCESS 0x020000 183#define VM_FAULT_BADACCESS 0x020000
176 184
@@ -322,6 +330,13 @@ no_context:
322 __do_kernel_fault(mm, addr, fsr, regs); 330 __do_kernel_fault(mm, addr, fsr, regs);
323 return 0; 331 return 0;
324} 332}
333#else /* CONFIG_MMU */
334static int
335do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
336{
337 return 0;
338}
339#endif /* CONFIG_MMU */
325 340
326/* 341/*
327 * First Level Translation Fault Handler 342 * First Level Translation Fault Handler
@@ -340,6 +355,7 @@ no_context:
340 * interrupt or a critical region, and should only copy the information 355 * interrupt or a critical region, and should only copy the information
341 * from the master page table, nothing more. 356 * from the master page table, nothing more.
342 */ 357 */
358#ifdef CONFIG_MMU
343static int __kprobes 359static int __kprobes
344do_translation_fault(unsigned long addr, unsigned int fsr, 360do_translation_fault(unsigned long addr, unsigned int fsr,
345 struct pt_regs *regs) 361 struct pt_regs *regs)
@@ -378,6 +394,14 @@ bad_area:
378 do_bad_area(addr, fsr, regs); 394 do_bad_area(addr, fsr, regs);
379 return 0; 395 return 0;
380} 396}
397#else /* CONFIG_MMU */
398static int
399do_translation_fault(unsigned long addr, unsigned int fsr,
400 struct pt_regs *regs)
401{
402 return 0;
403}
404#endif /* CONFIG_MMU */
381 405
382/* 406/*
383 * Some section permission faults need to be handled gracefully. 407 * Some section permission faults need to be handled gracefully.
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c07222eb5ce0..575f3ad722e7 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -144,7 +144,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
144 * page. This ensures that data in the physical page is mutually 144 * page. This ensures that data in the physical page is mutually
145 * coherent with the kernels mapping. 145 * coherent with the kernels mapping.
146 */ 146 */
147 __cpuc_flush_dcache_page(page_address(page)); 147#ifdef CONFIG_HIGHMEM
148 /*
149 * kmap_atomic() doesn't set the page virtual address, and
150 * kunmap_atomic() takes care of cache flushing already.
151 */
152 if (page_address(page))
153#endif
154 __cpuc_flush_dcache_page(page_address(page));
148 155
149 /* 156 /*
150 * If this is a page cache page, and we have an aliasing VIPT cache, 157 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index a34954d9df7d..73cae57fa707 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
40{ 40{
41 unsigned int idx; 41 unsigned int idx;
42 unsigned long vaddr; 42 unsigned long vaddr;
43 void *kmap;
43 44
44 pagefault_disable(); 45 pagefault_disable();
45 if (!PageHighMem(page)) 46 if (!PageHighMem(page))
46 return page_address(page); 47 return page_address(page);
47 48
49 kmap = kmap_high_get(page);
50 if (kmap)
51 return kmap;
52
48 idx = type + KM_TYPE_NR * smp_processor_id(); 53 idx = type + KM_TYPE_NR * smp_processor_id();
49 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
50#ifdef CONFIG_DEBUG_HIGHMEM 55#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
80#else 85#else
81 (void) idx; /* to kill a warning */ 86 (void) idx; /* to kill a warning */
82#endif 87#endif
88 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
89 /* this address was obtained through kmap_high_get() */
90 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
83 } 91 }
84 pagefault_enable(); 92 pagefault_enable();
85} 93}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3a7279c1ce5e..ea36186f32c3 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,6 +15,7 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/sort.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19 20
20#include <asm/mach-types.h> 21#include <asm/mach-types.h>
@@ -349,12 +350,43 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
349 free_area_init_node(node, zone_size, min, zhole_size); 350 free_area_init_node(node, zone_size, min, zhole_size);
350} 351}
351 352
353#ifndef CONFIG_SPARSEMEM
354int pfn_valid(unsigned long pfn)
355{
356 struct meminfo *mi = &meminfo;
357 unsigned int left = 0, right = mi->nr_banks;
358
359 do {
360 unsigned int mid = (right + left) / 2;
361 struct membank *bank = &mi->bank[mid];
362
363 if (pfn < bank_pfn_start(bank))
364 right = mid;
365 else if (pfn >= bank_pfn_end(bank))
366 left = mid + 1;
367 else
368 return 1;
369 } while (left < right);
370 return 0;
371}
372EXPORT_SYMBOL(pfn_valid);
373#endif
374
375static int __init meminfo_cmp(const void *_a, const void *_b)
376{
377 const struct membank *a = _a, *b = _b;
378 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
379 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
380}
381
352void __init bootmem_init(void) 382void __init bootmem_init(void)
353{ 383{
354 struct meminfo *mi = &meminfo; 384 struct meminfo *mi = &meminfo;
355 unsigned long min, max_low, max_high; 385 unsigned long min, max_low, max_high;
356 int node, initrd_node; 386 int node, initrd_node;
357 387
388 sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
389
358 /* 390 /*
359 * Locate which node contains the ramdisk image, if any. 391 * Locate which node contains the ramdisk image, if any.
360 */ 392 */
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index ad7bacc693b2..900811cc9130 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -12,6 +12,7 @@
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/sections.h> 13#include <asm/sections.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/setup.h>
15#include <asm/mach/arch.h> 16#include <asm/mach/arch.h>
16 17
17#include "mm.h" 18#include "mm.h"
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 54b1f721dec8..7d63beaf9745 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -77,19 +77,15 @@
77 * Sanity check the PTE configuration for the code below - which makes 77 * Sanity check the PTE configuration for the code below - which makes
78 * certain assumptions about how these bits are layed out. 78 * certain assumptions about how these bits are layed out.
79 */ 79 */
80#ifdef CONFIG_MMU
80#if L_PTE_SHARED != PTE_EXT_SHARED 81#if L_PTE_SHARED != PTE_EXT_SHARED
81#error PTE shared bit mismatch 82#error PTE shared bit mismatch
82#endif 83#endif
83#if L_PTE_BUFFERABLE != PTE_BUFFERABLE
84#error PTE bufferable bit mismatch
85#endif
86#if L_PTE_CACHEABLE != PTE_CACHEABLE
87#error PTE cacheable bit mismatch
88#endif
89#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ 84#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
90 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED 85 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
91#error Invalid Linux PTE bit settings 86#error Invalid Linux PTE bit settings
92#endif 87#endif
88#endif /* CONFIG_MMU */
93 89
94/* 90/*
95 * The ARMv6 and ARMv7 set_pte_ext translation function. 91 * The ARMv6 and ARMv7 set_pte_ext translation function.
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 180a08d03a03..f3fa1c32fe92 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -127,7 +127,9 @@ ENDPROC(cpu_v7_switch_mm)
127 */ 127 */
128ENTRY(cpu_v7_set_pte_ext) 128ENTRY(cpu_v7_set_pte_ext)
129#ifdef CONFIG_MMU 129#ifdef CONFIG_MMU
130 str r1, [r0], #-2048 @ linux version 130 ARM( str r1, [r0], #-2048 ) @ linux version
131 THUMB( str r1, [r0] ) @ linux version
132 THUMB( sub r0, r0, #2048 )
131 133
132 bic r3, r1, #0x000003f0 134 bic r3, r1, #0x000003f0
133 bic r3, r3, #PTE_TYPE_MASK 135 bic r3, r3, #PTE_TYPE_MASK
@@ -232,7 +234,6 @@ __v7_setup:
232 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 234 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
233 mov r10, #0x1f @ domains 0, 1 = manager 235 mov r10, #0x1f @ domains 0, 1 = manager
234 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 236 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
235#endif
236 /* 237 /*
237 * Memory region attributes with SCTLR.TRE=1 238 * Memory region attributes with SCTLR.TRE=1
238 * 239 *
@@ -265,6 +266,7 @@ __v7_setup:
265 ldr r6, =0x40e040e0 @ NMRR 266 ldr r6, =0x40e040e0 @ NMRR
266 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 267 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
267 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 268 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
269#endif
268 adr r5, v7_crval 270 adr r5, v7_crval
269 ldmia r5, {r5, r6} 271 ldmia r5, {r5, r6}
270#ifdef CONFIG_CPU_ENDIAN_BE8 272#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -273,6 +275,7 @@ __v7_setup:
273 mrc p15, 0, r0, c1, c0, 0 @ read control register 275 mrc p15, 0, r0, c1, c0, 0 @ read control register
274 bic r0, r0, r5 @ clear bits them 276 bic r0, r0, r5 @ clear bits them
275 orr r0, r0, r6 @ set them 277 orr r0, r0, r6 @ set them
278 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
276 mov pc, lr @ return to head.S:__ret 279 mov pc, lr @ return to head.S:__ret
277ENDPROC(__v7_setup) 280ENDPROC(__v7_setup)
278 281
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 0cce37b93937..423394260bcb 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -17,7 +17,7 @@
17 * 17 *
18 * 2001 Sep 08: 18 * 2001 Sep 08:
19 * Completely revisited, many important fixes 19 * Completely revisited, many important fixes
20 * Nicolas Pitre <nico@cam.org> 20 * Nicolas Pitre <nico@fluxnic.net>
21 */ 21 */
22 22
23#include <linux/linkage.h> 23#include <linux/linkage.h>