aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-nommu.S1
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/cache-xsc3l2.c3
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/dma-mapping.c (renamed from arch/arm/mm/consistent.c)104
-rw-r--r--arch/arm/mm/extable.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c7
-rw-r--r--arch/arm/mm/flush.c1
-rw-r--r--arch/arm/mm/init.c193
-rw-r--r--arch/arm/mm/iomap.c3
-rw-r--r--arch/arm/mm/ioremap.c12
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c34
-rw-r--r--arch/arm/mm/nommu.c18
-rw-r--r--arch/arm/mm/proc-v7.S10
-rw-r--r--arch/arm/mm/tlb-v7.S2
22 files changed, 316 insertions, 102 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 2e27a8c8372b..480f78a3611a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux arm-specific parts of the memory manager. 2# Makefile for the linux arm-specific parts of the memory manager.
3# 3#
4 4
5obj-y := consistent.o extable.o fault.o init.o \ 5obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index eb90bce38e14..2e6dc040c654 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -30,3 +30,4 @@ ENTRY(v7_early_abort)
30 * New designs should not need to patch up faults. 30 * New designs should not need to patch up faults.
31 */ 31 */
32 mov pc, lr 32 mov pc, lr
33ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
index a7cc7f9ee45d..625e580945b5 100644
--- a/arch/arm/mm/abort-nommu.S
+++ b/arch/arm/mm/abort-nommu.S
@@ -17,3 +17,4 @@ ENTRY(nommu_early_abort)
17 mov r0, #0 @ clear r0, r1 (no FSR/FAR) 17 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
18 mov r1, #0 18 mov r1, #0
19 mov pc, lr 19 mov pc, lr
20ENDPROC(nommu_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e162cca5917f..133e65d166b3 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,8 +17,8 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/uaccess.h>
20 21
21#include <asm/uaccess.h>
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23 23
24#include "fault.h" 24#include "fault.h"
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 76b800a95191..b480f1d3591f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -18,9 +18,9 @@
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/io.h>
21 22
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/io.h>
24#include <asm/hardware/cache-l2x0.h> 24#include <asm/hardware/cache-l2x0.h>
25 25
26#define CACHE_LINE_SIZE 32 26#define CACHE_LINE_SIZE 32
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 35ffc4d95997..d19c2bec2b1f 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -66,6 +66,7 @@ finished:
66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
67 isb 67 isb
68 mov pc, lr 68 mov pc, lr
69ENDPROC(v7_flush_dcache_all)
69 70
70/* 71/*
71 * v7_flush_cache_all() 72 * v7_flush_cache_all()
@@ -85,6 +86,7 @@ ENTRY(v7_flush_kern_cache_all)
85 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 86 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
86 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 87 ldmfd sp!, {r4-r5, r7, r9-r11, lr}
87 mov pc, lr 88 mov pc, lr
89ENDPROC(v7_flush_kern_cache_all)
88 90
89/* 91/*
90 * v7_flush_cache_all() 92 * v7_flush_cache_all()
@@ -110,6 +112,8 @@ ENTRY(v7_flush_user_cache_all)
110 */ 112 */
111ENTRY(v7_flush_user_cache_range) 113ENTRY(v7_flush_user_cache_range)
112 mov pc, lr 114 mov pc, lr
115ENDPROC(v7_flush_user_cache_all)
116ENDPROC(v7_flush_user_cache_range)
113 117
114/* 118/*
115 * v7_coherent_kern_range(start,end) 119 * v7_coherent_kern_range(start,end)
@@ -155,6 +159,8 @@ ENTRY(v7_coherent_user_range)
155 dsb 159 dsb
156 isb 160 isb
157 mov pc, lr 161 mov pc, lr
162ENDPROC(v7_coherent_kern_range)
163ENDPROC(v7_coherent_user_range)
158 164
159/* 165/*
160 * v7_flush_kern_dcache_page(kaddr) 166 * v7_flush_kern_dcache_page(kaddr)
@@ -174,6 +180,7 @@ ENTRY(v7_flush_kern_dcache_page)
174 blo 1b 180 blo 1b
175 dsb 181 dsb
176 mov pc, lr 182 mov pc, lr
183ENDPROC(v7_flush_kern_dcache_page)
177 184
178/* 185/*
179 * v7_dma_inv_range(start,end) 186 * v7_dma_inv_range(start,end)
@@ -202,6 +209,7 @@ ENTRY(v7_dma_inv_range)
202 blo 1b 209 blo 1b
203 dsb 210 dsb
204 mov pc, lr 211 mov pc, lr
212ENDPROC(v7_dma_inv_range)
205 213
206/* 214/*
207 * v7_dma_clean_range(start,end) 215 * v7_dma_clean_range(start,end)
@@ -219,6 +227,7 @@ ENTRY(v7_dma_clean_range)
219 blo 1b 227 blo 1b
220 dsb 228 dsb
221 mov pc, lr 229 mov pc, lr
230ENDPROC(v7_dma_clean_range)
222 231
223/* 232/*
224 * v7_dma_flush_range(start,end) 233 * v7_dma_flush_range(start,end)
@@ -236,6 +245,7 @@ ENTRY(v7_dma_flush_range)
236 blo 1b 245 blo 1b
237 dsb 246 dsb
238 mov pc, lr 247 mov pc, lr
248ENDPROC(v7_dma_flush_range)
239 249
240 __INITDATA 250 __INITDATA
241 251
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 158bd96763d3..10b1bae1a258 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -18,10 +18,11 @@
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/io.h>
21 22
22#include <asm/system.h> 23#include <asm/system.h>
24#include <asm/cputype.h>
23#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
24#include <asm/io.h>
25 26
26#define CR_L2 (1 << 26) 27#define CR_L2 (1 << 26)
27 28
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3adb79257f43..0e21c0767580 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -16,6 +16,7 @@
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
19 20
20#include "mm.h" 21#include "mm.h"
21 22
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/dma-mapping.c
index db7b3e38ef1d..67960017dc8f 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mm/consistent.c 2 * linux/arch/arm/mm/dma-mapping.c
3 * 3 *
4 * Copyright (C) 2000-2004 Russell King 4 * Copyright (C) 2000-2004 Russell King
5 * 5 *
@@ -512,3 +512,105 @@ void dma_cache_maint(const void *start, size_t size, int direction)
512 } 512 }
513} 513}
514EXPORT_SYMBOL(dma_cache_maint); 514EXPORT_SYMBOL(dma_cache_maint);
515
516/**
517 * dma_map_sg - map a set of SG buffers for streaming mode DMA
518 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
519 * @sg: list of buffers
520 * @nents: number of buffers to map
521 * @dir: DMA transfer direction
522 *
523 * Map a set of buffers described by scatterlist in streaming mode for DMA.
524 * This is the scatter-gather version of the dma_map_single interface.
525 * Here the scatter gather list elements are each tagged with the
526 * appropriate dma address and length. They are obtained via
527 * sg_dma_{address,length}.
528 *
529 * Device ownership issues as mentioned for dma_map_single are the same
530 * here.
531 */
532int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
533 enum dma_data_direction dir)
534{
535 struct scatterlist *s;
536 int i, j;
537
538 for_each_sg(sg, s, nents, i) {
539 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
540 s->length, dir);
541 if (dma_mapping_error(dev, s->dma_address))
542 goto bad_mapping;
543 }
544 return nents;
545
546 bad_mapping:
547 for_each_sg(sg, s, i, j)
548 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
549 return 0;
550}
551EXPORT_SYMBOL(dma_map_sg);
552
553/**
554 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
555 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
556 * @sg: list of buffers
557 * @nents: number of buffers to unmap (returned from dma_map_sg)
558 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
559 *
560 * Unmap a set of streaming mode DMA translations. Again, CPU access
561 * rules concerning calls here are the same as for dma_unmap_single().
562 */
563void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
564 enum dma_data_direction dir)
565{
566 struct scatterlist *s;
567 int i;
568
569 for_each_sg(sg, s, nents, i)
570 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
571}
572EXPORT_SYMBOL(dma_unmap_sg);
573
574/**
575 * dma_sync_sg_for_cpu
576 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
577 * @sg: list of buffers
578 * @nents: number of buffers to map (returned from dma_map_sg)
579 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
580 */
581void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
582 int nents, enum dma_data_direction dir)
583{
584 struct scatterlist *s;
585 int i;
586
587 for_each_sg(sg, s, nents, i) {
588 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
589 sg_dma_len(s), dir);
590 }
591}
592EXPORT_SYMBOL(dma_sync_sg_for_cpu);
593
594/**
595 * dma_sync_sg_for_device
596 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
597 * @sg: list of buffers
598 * @nents: number of buffers to map (returned from dma_map_sg)
599 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
600 */
601void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
602 int nents, enum dma_data_direction dir)
603{
604 struct scatterlist *s;
605 int i;
606
607 for_each_sg(sg, s, nents, i) {
608 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
609 sg_dma_len(s), dir))
610 continue;
611
612 if (!arch_is_coherent())
613 dma_cache_maint(sg_virt(s), s->length, dir);
614 }
615}
616EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9592c3ee4cb2..9d285626bc7d 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -2,7 +2,7 @@
2 * linux/arch/arm/mm/extable.c 2 * linux/arch/arm/mm/extable.c
3 */ 3 */
4#include <linux/module.h> 4#include <linux/module.h>
5#include <asm/uaccess.h> 5#include <linux/uaccess.h>
6 6
7int fixup_exception(struct pt_regs *regs) 7int fixup_exception(struct pt_regs *regs)
8{ 8{
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index a8ec97b4752e..af6ed6ef9a81 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -17,7 +17,9 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19 19
20#include <asm/bugs.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/cachetype.h>
21#include <asm/pgtable.h> 23#include <asm/pgtable.h>
22#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
23 25
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 28ad7ab1c0cd..2df8d9facf57 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -13,11 +13,11 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kprobes.h> 15#include <linux/kprobes.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
20#include <asm/uaccess.h>
21 21
22#include "fault.h" 22#include "fault.h"
23 23
@@ -72,9 +72,8 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
72 } 72 }
73 73
74 pmd = pmd_offset(pgd, addr); 74 pmd = pmd_offset(pgd, addr);
75#if PTRS_PER_PMD != 1 75 if (PTRS_PER_PMD != 1)
76 printk(", *pmd=%08lx", pmd_val(*pmd)); 76 printk(", *pmd=%08lx", pmd_val(*pmd));
77#endif
78 77
79 if (pmd_none(*pmd)) 78 if (pmd_none(*pmd))
80 break; 79 break;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 029ee65fda2b..0fa9bf388f0b 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -12,6 +12,7 @@
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h>
15#include <asm/system.h> 16#include <asm/system.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
17 18
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 30a69d67d673..82c4b4217989 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -26,9 +26,42 @@
26 26
27#include "mm.h" 27#include "mm.h"
28 28
29extern void _text, _etext, __data_start, _end, __init_begin, __init_end; 29static unsigned long phys_initrd_start __initdata = 0;
30extern unsigned long phys_initrd_start; 30static unsigned long phys_initrd_size __initdata = 0;
31extern unsigned long phys_initrd_size; 31
32static void __init early_initrd(char **p)
33{
34 unsigned long start, size;
35
36 start = memparse(*p, p);
37 if (**p == ',') {
38 size = memparse((*p) + 1, p);
39
40 phys_initrd_start = start;
41 phys_initrd_size = size;
42 }
43}
44__early_param("initrd=", early_initrd);
45
46static int __init parse_tag_initrd(const struct tag *tag)
47{
48 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
49 "please update your bootloader.\n");
50 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
51 phys_initrd_size = tag->u.initrd.size;
52 return 0;
53}
54
55__tagtable(ATAG_INITRD, parse_tag_initrd);
56
57static int __init parse_tag_initrd2(const struct tag *tag)
58{
59 phys_initrd_start = tag->u.initrd.start;
60 phys_initrd_size = tag->u.initrd.size;
61 return 0;
62}
63
64__tagtable(ATAG_INITRD2, parse_tag_initrd2);
32 65
33/* 66/*
34 * This is used to pass memory configuration data from paging_init 67 * This is used to pass memory configuration data from paging_init
@@ -36,10 +69,6 @@ extern unsigned long phys_initrd_size;
36 */ 69 */
37static struct meminfo meminfo = { 0, }; 70static struct meminfo meminfo = { 0, };
38 71
39#define for_each_nodebank(iter,mi,no) \
40 for (iter = 0; iter < mi->nr_banks; iter++) \
41 if (mi->bank[iter].node == no)
42
43void show_mem(void) 72void show_mem(void)
44{ 73{
45 int free = 0, total = 0, reserved = 0; 74 int free = 0, total = 0, reserved = 0;
@@ -50,14 +79,15 @@ void show_mem(void)
50 show_free_areas(); 79 show_free_areas();
51 for_each_online_node(node) { 80 for_each_online_node(node) {
52 pg_data_t *n = NODE_DATA(node); 81 pg_data_t *n = NODE_DATA(node);
53 struct page *map = n->node_mem_map - n->node_start_pfn; 82 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
54 83
55 for_each_nodebank (i,mi,node) { 84 for_each_nodebank (i,mi,node) {
85 struct membank *bank = &mi->bank[i];
56 unsigned int pfn1, pfn2; 86 unsigned int pfn1, pfn2;
57 struct page *page, *end; 87 struct page *page, *end;
58 88
59 pfn1 = __phys_to_pfn(mi->bank[i].start); 89 pfn1 = bank_pfn_start(bank);
60 pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start); 90 pfn2 = bank_pfn_end(bank);
61 91
62 page = map + pfn1; 92 page = map + pfn1;
63 end = map + pfn2; 93 end = map + pfn2;
@@ -96,17 +126,17 @@ void show_mem(void)
96static unsigned int __init 126static unsigned int __init
97find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) 127find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
98{ 128{
99 unsigned int start_pfn, bank, bootmap_pfn; 129 unsigned int start_pfn, i, bootmap_pfn;
100 130
101 start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; 131 start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
102 bootmap_pfn = 0; 132 bootmap_pfn = 0;
103 133
104 for_each_nodebank(bank, mi, node) { 134 for_each_nodebank(i, mi, node) {
135 struct membank *bank = &mi->bank[i];
105 unsigned int start, end; 136 unsigned int start, end;
106 137
107 start = mi->bank[bank].start >> PAGE_SHIFT; 138 start = bank_pfn_start(bank);
108 end = (mi->bank[bank].size + 139 end = bank_pfn_end(bank);
109 mi->bank[bank].start) >> PAGE_SHIFT;
110 140
111 if (end < start_pfn) 141 if (end < start_pfn)
112 continue; 142 continue;
@@ -145,13 +175,10 @@ static int __init check_initrd(struct meminfo *mi)
145 initrd_node = -1; 175 initrd_node = -1;
146 176
147 for (i = 0; i < mi->nr_banks; i++) { 177 for (i = 0; i < mi->nr_banks; i++) {
148 unsigned long bank_end; 178 struct membank *bank = &mi->bank[i];
149 179 if (bank_phys_start(bank) <= phys_initrd_start &&
150 bank_end = mi->bank[i].start + mi->bank[i].size; 180 end <= bank_phys_end(bank))
151 181 initrd_node = bank->node;
152 if (mi->bank[i].start <= phys_initrd_start &&
153 end <= bank_end)
154 initrd_node = mi->bank[i].node;
155 } 182 }
156 } 183 }
157 184
@@ -171,19 +198,17 @@ static inline void map_memory_bank(struct membank *bank)
171#ifdef CONFIG_MMU 198#ifdef CONFIG_MMU
172 struct map_desc map; 199 struct map_desc map;
173 200
174 map.pfn = __phys_to_pfn(bank->start); 201 map.pfn = bank_pfn_start(bank);
175 map.virtual = __phys_to_virt(bank->start); 202 map.virtual = __phys_to_virt(bank_phys_start(bank));
176 map.length = bank->size; 203 map.length = bank_phys_size(bank);
177 map.type = MT_MEMORY; 204 map.type = MT_MEMORY;
178 205
179 create_mapping(&map); 206 create_mapping(&map);
180#endif 207#endif
181} 208}
182 209
183static unsigned long __init 210static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
184bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
185{ 211{
186 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
187 unsigned long start_pfn, end_pfn, boot_pfn; 212 unsigned long start_pfn, end_pfn, boot_pfn;
188 unsigned int boot_pages; 213 unsigned int boot_pages;
189 pg_data_t *pgdat; 214 pg_data_t *pgdat;
@@ -199,8 +224,8 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
199 struct membank *bank = &mi->bank[i]; 224 struct membank *bank = &mi->bank[i];
200 unsigned long start, end; 225 unsigned long start, end;
201 226
202 start = bank->start >> PAGE_SHIFT; 227 start = bank_pfn_start(bank);
203 end = (bank->start + bank->size) >> PAGE_SHIFT; 228 end = bank_pfn_end(bank);
204 229
205 if (start_pfn > start) 230 if (start_pfn > start)
206 start_pfn = start; 231 start_pfn = start;
@@ -230,8 +255,11 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
230 pgdat = NODE_DATA(node); 255 pgdat = NODE_DATA(node);
231 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); 256 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
232 257
233 for_each_nodebank(i, mi, node) 258 for_each_nodebank(i, mi, node) {
234 free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size); 259 struct membank *bank = &mi->bank[i];
260 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
261 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
262 }
235 263
236 /* 264 /*
237 * Reserve the bootmem bitmap for this node. 265 * Reserve the bootmem bitmap for this node.
@@ -239,31 +267,39 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
239 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 267 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
240 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 268 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
241 269
242 /* 270 return end_pfn;
243 * Reserve any special node zero regions. 271}
244 */
245 if (node == 0)
246 reserve_node_zero(pgdat);
247 272
273static void __init bootmem_reserve_initrd(int node)
274{
248#ifdef CONFIG_BLK_DEV_INITRD 275#ifdef CONFIG_BLK_DEV_INITRD
249 /* 276 pg_data_t *pgdat = NODE_DATA(node);
250 * If the initrd is in this node, reserve its memory. 277 int res;
251 */ 278
252 if (node == initrd_node) { 279 res = reserve_bootmem_node(pgdat, phys_initrd_start,
253 int res = reserve_bootmem_node(pgdat, phys_initrd_start, 280 phys_initrd_size, BOOTMEM_EXCLUSIVE);
254 phys_initrd_size, BOOTMEM_EXCLUSIVE); 281
255 282 if (res == 0) {
256 if (res == 0) { 283 initrd_start = __phys_to_virt(phys_initrd_start);
257 initrd_start = __phys_to_virt(phys_initrd_start); 284 initrd_end = initrd_start + phys_initrd_size;
258 initrd_end = initrd_start + phys_initrd_size; 285 } else {
259 } else { 286 printk(KERN_ERR
260 printk(KERN_ERR 287 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
261 "INITRD: 0x%08lx+0x%08lx overlaps in-use " 288 "memory region - disabling initrd\n",
262 "memory region - disabling initrd\n", 289 phys_initrd_start, phys_initrd_size);
263 phys_initrd_start, phys_initrd_size);
264 }
265 } 290 }
266#endif 291#endif
292}
293
294static void __init bootmem_free_node(int node, struct meminfo *mi)
295{
296 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
297 unsigned long start_pfn, end_pfn;
298 pg_data_t *pgdat = NODE_DATA(node);
299 int i;
300
301 start_pfn = pgdat->bdata->node_min_pfn;
302 end_pfn = pgdat->bdata->node_low_pfn;
267 303
268 /* 304 /*
269 * initialise the zones within this node. 305 * initialise the zones within this node.
@@ -284,7 +320,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
284 */ 320 */
285 zhole_size[0] = zone_size[0]; 321 zhole_size[0] = zone_size[0];
286 for_each_nodebank(i, mi, node) 322 for_each_nodebank(i, mi, node)
287 zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; 323 zhole_size[0] -= bank_pfn_size(&mi->bank[i]);
288 324
289 /* 325 /*
290 * Adjust the sizes according to any special requirements for 326 * Adjust the sizes according to any special requirements for
@@ -293,21 +329,12 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
293 arch_adjust_zones(node, zone_size, zhole_size); 329 arch_adjust_zones(node, zone_size, zhole_size);
294 330
295 free_area_init_node(node, zone_size, start_pfn, zhole_size); 331 free_area_init_node(node, zone_size, start_pfn, zhole_size);
296
297 return end_pfn;
298} 332}
299 333
300void __init bootmem_init(struct meminfo *mi) 334void __init bootmem_init(struct meminfo *mi)
301{ 335{
302 unsigned long memend_pfn = 0; 336 unsigned long memend_pfn = 0;
303 int node, initrd_node, i; 337 int node, initrd_node;
304
305 /*
306 * Invalidate the node number for empty or invalid memory banks
307 */
308 for (i = 0; i < mi->nr_banks; i++)
309 if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES)
310 mi->bank[i].node = -1;
311 338
312 memcpy(&meminfo, mi, sizeof(meminfo)); 339 memcpy(&meminfo, mi, sizeof(meminfo));
313 340
@@ -320,9 +347,19 @@ void __init bootmem_init(struct meminfo *mi)
320 * Run through each node initialising the bootmem allocator. 347 * Run through each node initialising the bootmem allocator.
321 */ 348 */
322 for_each_node(node) { 349 for_each_node(node) {
323 unsigned long end_pfn; 350 unsigned long end_pfn = bootmem_init_node(node, mi);
324 351
325 end_pfn = bootmem_init_node(node, initrd_node, mi); 352 /*
353 * Reserve any special node zero regions.
354 */
355 if (node == 0)
356 reserve_node_zero(NODE_DATA(node));
357
358 /*
359 * If the initrd is in this node, reserve its memory.
360 */
361 if (node == initrd_node)
362 bootmem_reserve_initrd(node);
326 363
327 /* 364 /*
328 * Remember the highest memory PFN. 365 * Remember the highest memory PFN.
@@ -331,6 +368,19 @@ void __init bootmem_init(struct meminfo *mi)
331 memend_pfn = end_pfn; 368 memend_pfn = end_pfn;
332 } 369 }
333 370
371 /*
372 * sparse_init() needs the bootmem allocator up and running.
373 */
374 sparse_init();
375
376 /*
377 * Now free memory in each node - free_area_init_node needs
378 * the sparse mem_map arrays initialized by sparse_init()
379 * for memmap_init_zone(), otherwise all PFNs are invalid.
380 */
381 for_each_node(node)
382 bootmem_free_node(node, mi);
383
334 high_memory = __va(memend_pfn << PAGE_SHIFT); 384 high_memory = __va(memend_pfn << PAGE_SHIFT);
335 385
336 /* 386 /*
@@ -401,7 +451,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
401 * information on the command line. 451 * information on the command line.
402 */ 452 */
403 for_each_nodebank(i, mi, node) { 453 for_each_nodebank(i, mi, node) {
404 bank_start = mi->bank[i].start >> PAGE_SHIFT; 454 struct membank *bank = &mi->bank[i];
455
456 bank_start = bank_pfn_start(bank);
405 if (bank_start < prev_bank_end) { 457 if (bank_start < prev_bank_end) {
406 printk(KERN_ERR "MEM: unordered memory banks. " 458 printk(KERN_ERR "MEM: unordered memory banks. "
407 "Not freeing memmap.\n"); 459 "Not freeing memmap.\n");
@@ -415,8 +467,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
415 if (prev_bank_end && prev_bank_end != bank_start) 467 if (prev_bank_end && prev_bank_end != bank_start)
416 free_memmap(node, prev_bank_end, bank_start); 468 free_memmap(node, prev_bank_end, bank_start);
417 469
418 prev_bank_end = (mi->bank[i].start + 470 prev_bank_end = bank_pfn_end(bank);
419 mi->bank[i].size) >> PAGE_SHIFT;
420 } 471 }
421} 472}
422 473
@@ -461,8 +512,8 @@ void __init mem_init(void)
461 512
462 num_physpages = 0; 513 num_physpages = 0;
463 for (i = 0; i < meminfo.nr_banks; i++) { 514 for (i = 0; i < meminfo.nr_banks; i++) {
464 num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; 515 num_physpages += bank_pfn_size(&meminfo.bank[i]);
465 printk(" %ldMB", meminfo.bank[i].size >> 20); 516 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
466 } 517 }
467 518
468 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 519 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 7429f8c01015..ffad039cbb73 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -7,8 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/ioport.h> 9#include <linux/ioport.h>
10 10#include <linux/io.h>
11#include <asm/io.h>
12 11
13#ifdef __io 12#ifdef __io
14void __iomem *ioport_map(unsigned long port, unsigned int nr) 13void __iomem *ioport_map(unsigned long port, unsigned int nr)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index b81dbf9ffb77..8a41912ec7c5 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -24,9 +24,10 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/io.h>
27 28
29#include <asm/cputype.h>
28#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
29#include <asm/io.h>
30#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
31#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
32#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
@@ -332,15 +333,14 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
332} 333}
333EXPORT_SYMBOL(__arm_ioremap); 334EXPORT_SYMBOL(__arm_ioremap);
334 335
335void __iounmap(volatile void __iomem *addr) 336void __iounmap(volatile void __iomem *io_addr)
336{ 337{
338 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
337#ifndef CONFIG_SMP 339#ifndef CONFIG_SMP
338 struct vm_struct **p, *tmp; 340 struct vm_struct **p, *tmp;
339#endif 341#endif
340 unsigned int section_mapping = 0; 342 unsigned int section_mapping = 0;
341 343
342 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
343
344#ifndef CONFIG_SMP 344#ifndef CONFIG_SMP
345 /* 345 /*
346 * If this is a section based mapping we need to handle it 346 * If this is a section based mapping we need to handle it
@@ -351,7 +351,7 @@ void __iounmap(volatile void __iomem *addr)
351 */ 351 */
352 write_lock(&vmlist_lock); 352 write_lock(&vmlist_lock);
353 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 353 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
354 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 354 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
355 if (tmp->flags & VM_ARM_SECTION_MAPPING) { 355 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
356 *p = tmp->next; 356 *p = tmp->next;
357 unmap_area_sections((unsigned long)tmp->addr, 357 unmap_area_sections((unsigned long)tmp->addr,
@@ -366,6 +366,6 @@ void __iounmap(volatile void __iomem *addr)
366#endif 366#endif
367 367
368 if (!section_mapping) 368 if (!section_mapping)
369 vunmap((void __force *)addr); 369 vunmap(addr);
370} 370}
371EXPORT_SYMBOL(__iounmap); 371EXPORT_SYMBOL(__iounmap);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 7647c597fc59..96590104ba0f 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -35,3 +35,5 @@ struct pglist_data;
35void __init create_mapping(struct map_desc *md); 35void __init create_mapping(struct map_desc *md);
36void __init bootmem_init(struct meminfo *mi); 36void __init bootmem_init(struct meminfo *mi);
37void reserve_node_zero(struct pglist_data *pgdat); 37void reserve_node_zero(struct pglist_data *pgdat);
38
39extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 3f6dc40b8353..5358fcc7f61e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -6,6 +6,8 @@
6#include <linux/mman.h> 6#include <linux/mman.h>
7#include <linux/shm.h> 7#include <linux/shm.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/io.h>
10#include <asm/cputype.h>
9#include <asm/system.h> 11#include <asm/system.h>
10 12
11#define COLOUR_ALIGN(addr,pgoff) \ 13#define COLOUR_ALIGN(addr,pgoff) \
@@ -37,8 +39,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
37 * caches alias. This is indicated by bits 9 and 21 of the 39 * caches alias. This is indicated by bits 9 and 21 of the
38 * cache type register. 40 * cache type register.
39 */ 41 */
40 cache_type = read_cpuid(CPUID_CACHETYPE); 42 cache_type = read_cpuid_cachetype();
41 if (cache_type != read_cpuid(CPUID_ID)) { 43 if (cache_type != read_cpuid_id()) {
42 aliasing = (cache_type | cache_type >> 12) & (1 << 11); 44 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43 if (aliasing) 45 if (aliasing)
44 do_align = filp || flags & MAP_SHARED; 46 do_align = filp || flags & MAP_SHARED;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25d9a11eb617..e7af83e569d7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17 17
18#include <asm/cputype.h>
18#include <asm/mach-types.h> 19#include <asm/mach-types.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
20#include <asm/sizes.h> 21#include <asm/sizes.h>
@@ -27,9 +28,6 @@
27 28
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 29DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29 30
30extern void _stext, _etext, __data_start, _end;
31extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33/* 31/*
34 * empty_zero_page is a special page that is used for 32 * empty_zero_page is a special page that is used for
35 * zero-initialized data and COW. 33 * zero-initialized data and COW.
@@ -568,12 +566,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
568 create_mapping(io_desc + i); 566 create_mapping(io_desc + i);
569} 567}
570 568
569static unsigned long __initdata vmalloc_reserve = SZ_128M;
570
571/*
572 * vmalloc=size forces the vmalloc area to be exactly 'size'
573 * bytes. This can be used to increase (or decrease) the vmalloc
574 * area - the default is 128m.
575 */
576static void __init early_vmalloc(char **arg)
577{
578 vmalloc_reserve = memparse(*arg, arg);
579
580 if (vmalloc_reserve < SZ_16M) {
581 vmalloc_reserve = SZ_16M;
582 printk(KERN_WARNING
583 "vmalloc area too small, limiting to %luMB\n",
584 vmalloc_reserve >> 20);
585 }
586}
587__early_param("vmalloc=", early_vmalloc);
588
589#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
590
571static int __init check_membank_valid(struct membank *mb) 591static int __init check_membank_valid(struct membank *mb)
572{ 592{
573 /* 593 /*
574 * Check whether this memory region has non-zero size. 594 * Check whether this memory region has non-zero size or
595 * invalid node number.
575 */ 596 */
576 if (mb->size == 0) 597 if (mb->size == 0 || mb->node >= MAX_NUMNODES)
577 return 0; 598 return 0;
578 599
579 /* 600 /*
@@ -607,8 +628,7 @@ static int __init check_membank_valid(struct membank *mb)
607 628
608static void __init sanity_check_meminfo(struct meminfo *mi) 629static void __init sanity_check_meminfo(struct meminfo *mi)
609{ 630{
610 int i; 631 int i, j;
611 int j;
612 632
613 for (i = 0, j = 0; i < mi->nr_banks; i++) { 633 for (i = 0, j = 0; i < mi->nr_banks; i++) {
614 if (check_membank_valid(&mi->bank[i])) 634 if (check_membank_valid(&mi->bank[i]))
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 63c62fdea521..07b62b238979 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -7,16 +7,14 @@
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/pagemap.h> 8#include <linux/pagemap.h>
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/io.h>
10 11
11#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
12#include <asm/io.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/mach/arch.h> 14#include <asm/mach/arch.h>
15 15
16#include "mm.h" 16#include "mm.h"
17 17
18extern void _stext, __data_start, _end;
19
20/* 18/*
21 * Reserve the various regions of node 0 19 * Reserve the various regions of node 0
22 */ 20 */
@@ -43,12 +41,26 @@ void __init reserve_node_zero(pg_data_t *pgdat)
43 BOOTMEM_DEFAULT); 41 BOOTMEM_DEFAULT);
44} 42}
45 43
44static void __init sanity_check_meminfo(struct meminfo *mi)
45{
46 int i, j;
47
48 for (i = 0, j = 0; i < mi->nr_banks; i++) {
49 struct membank *mb = &mi->bank[i];
50
51 if (mb->size != 0 && mb->node < MAX_NUMNODES)
52 mi->bank[j++] = mi->bank[i];
53 }
54 mi->nr_banks = j;
55}
56
46/* 57/*
47 * paging_init() sets up the page tables, initialises the zone memory 58 * paging_init() sets up the page tables, initialises the zone memory
48 * maps, and sets up the zero page, bad page and bad page tables. 59 * maps, and sets up the zero page, bad page and bad page tables.
49 */ 60 */
50void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) 61void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
51{ 62{
63 sanity_check_meminfo(mi);
52 bootmem_init(mi); 64 bootmem_init(mi);
53} 65}
54 66
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b49f9a4c82c8..a67e26f3dce2 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -25,9 +25,11 @@
25 25
26ENTRY(cpu_v7_proc_init) 26ENTRY(cpu_v7_proc_init)
27 mov pc, lr 27 mov pc, lr
28ENDPROC(cpu_v7_proc_init)
28 29
29ENTRY(cpu_v7_proc_fin) 30ENTRY(cpu_v7_proc_fin)
30 mov pc, lr 31 mov pc, lr
32ENDPROC(cpu_v7_proc_fin)
31 33
32/* 34/*
33 * cpu_v7_reset(loc) 35 * cpu_v7_reset(loc)
@@ -43,6 +45,7 @@ ENTRY(cpu_v7_proc_fin)
43 .align 5 45 .align 5
44ENTRY(cpu_v7_reset) 46ENTRY(cpu_v7_reset)
45 mov pc, r0 47 mov pc, r0
48ENDPROC(cpu_v7_reset)
46 49
47/* 50/*
48 * cpu_v7_do_idle() 51 * cpu_v7_do_idle()
@@ -52,8 +55,9 @@ ENTRY(cpu_v7_reset)
52 * IRQs are already disabled. 55 * IRQs are already disabled.
53 */ 56 */
54ENTRY(cpu_v7_do_idle) 57ENTRY(cpu_v7_do_idle)
55 .long 0xe320f003 @ ARM V7 WFI instruction 58 wfi
56 mov pc, lr 59 mov pc, lr
60ENDPROC(cpu_v7_do_idle)
57 61
58ENTRY(cpu_v7_dcache_clean_area) 62ENTRY(cpu_v7_dcache_clean_area)
59#ifndef TLB_CAN_READ_FROM_L1_CACHE 63#ifndef TLB_CAN_READ_FROM_L1_CACHE
@@ -65,6 +69,7 @@ ENTRY(cpu_v7_dcache_clean_area)
65 dsb 69 dsb
66#endif 70#endif
67 mov pc, lr 71 mov pc, lr
72ENDPROC(cpu_v7_dcache_clean_area)
68 73
69/* 74/*
70 * cpu_v7_switch_mm(pgd_phys, tsk) 75 * cpu_v7_switch_mm(pgd_phys, tsk)
@@ -89,6 +94,7 @@ ENTRY(cpu_v7_switch_mm)
89 isb 94 isb
90#endif 95#endif
91 mov pc, lr 96 mov pc, lr
97ENDPROC(cpu_v7_switch_mm)
92 98
93/* 99/*
94 * cpu_v7_set_pte_ext(ptep, pte) 100 * cpu_v7_set_pte_ext(ptep, pte)
@@ -141,6 +147,7 @@ ENTRY(cpu_v7_set_pte_ext)
141 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 147 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
142#endif 148#endif
143 mov pc, lr 149 mov pc, lr
150ENDPROC(cpu_v7_set_pte_ext)
144 151
145cpu_v7_name: 152cpu_v7_name:
146 .ascii "ARMv7 Processor" 153 .ascii "ARMv7 Processor"
@@ -188,6 +195,7 @@ __v7_setup:
188 bic r0, r0, r5 @ clear bits them 195 bic r0, r0, r5 @ clear bits them
189 orr r0, r0, r6 @ set them 196 orr r0, r0, r6 @ set them
190 mov pc, lr @ return to head.S:__ret 197 mov pc, lr @ return to head.S:__ret
198ENDPROC(__v7_setup)
191 199
192 /* 200 /*
193 * V X F I D LR 201 * V X F I D LR
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index b56dda8052f7..24ba5109f2e7 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -51,6 +51,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
51 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB 51 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
52 dsb 52 dsb
53 mov pc, lr 53 mov pc, lr
54ENDPROC(v7wbi_flush_user_tlb_range)
54 55
55/* 56/*
56 * v7wbi_flush_kern_tlb_range(start,end) 57 * v7wbi_flush_kern_tlb_range(start,end)
@@ -77,6 +78,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
77 dsb 78 dsb
78 isb 79 isb
79 mov pc, lr 80 mov pc, lr
81ENDPROC(v7wbi_flush_kern_tlb_range)
80 82
81 .section ".text.init", #alloc, #execinstr 83 .section ".text.init", #alloc, #execinstr
82 84