aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/cache-fa.S12
-rw-r--r--arch/arm/mm/cache-l2x0.c78
-rw-r--r--arch/arm/mm/cache-v3.S10
-rw-r--r--arch/arm/mm/cache-v4.S10
-rw-r--r--arch/arm/mm/cache-v4wb.S12
-rw-r--r--arch/arm/mm/cache-v4wt.S12
-rw-r--r--arch/arm/mm/fault-armv.c28
-rw-r--r--arch/arm/mm/init.c155
-rw-r--r--arch/arm/mm/mmu.c73
-rw-r--r--arch/arm/mm/proc-arm1020.S15
-rw-r--r--arch/arm/mm/proc-arm1020e.S15
-rw-r--r--arch/arm/mm/proc-arm1022.S15
-rw-r--r--arch/arm/mm/proc-arm1026.S15
-rw-r--r--arch/arm/mm/proc-arm920.S12
-rw-r--r--arch/arm/mm/proc-arm922.S12
-rw-r--r--arch/arm/mm/proc-arm925.S12
-rw-r--r--arch/arm/mm/proc-arm926.S12
-rw-r--r--arch/arm/mm/proc-arm940.S12
-rw-r--r--arch/arm/mm/proc-arm946.S12
-rw-r--r--arch/arm/mm/proc-feroceon.S13
-rw-r--r--arch/arm/mm/proc-xsc3.S12
-rw-r--r--arch/arm/mm/proc-xscale.S12
23 files changed, 478 insertions, 89 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index a0a2928ae4dd..4414a01e1e8a 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -779,6 +779,14 @@ config CACHE_L2X0
779 help 779 help
780 This option enables the L2x0 PrimeCell. 780 This option enables the L2x0 PrimeCell.
781 781
782config CACHE_PL310
783 bool
784 depends on CACHE_L2X0
785 default y if CPU_V7 && !CPU_V6
786 help
787 This option enables optimisations for the PL310 cache
788 controller.
789
782config CACHE_TAUROS2 790config CACHE_TAUROS2
783 bool "Enable the Tauros2 L2 cache controller" 791 bool "Enable the Tauros2 L2 cache controller"
784 depends on (ARCH_DOVE || ARCH_MMP) 792 depends on (ARCH_DOVE || ARCH_MMP)
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 7148e53e6078..1fa6f71470de 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -38,6 +38,17 @@
38#define CACHE_DLIMIT (CACHE_DSIZE * 2) 38#define CACHE_DLIMIT (CACHE_DSIZE * 2)
39 39
40/* 40/*
41 * flush_icache_all()
42 *
43 * Unconditionally clean and invalidate the entire icache.
44 */
45ENTRY(fa_flush_icache_all)
46 mov r0, #0
47 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
48 mov pc, lr
49ENDPROC(fa_flush_icache_all)
50
51/*
41 * flush_user_cache_all() 52 * flush_user_cache_all()
42 * 53 *
43 * Clean and invalidate all cache entries in a particular address 54 * Clean and invalidate all cache entries in a particular address
@@ -233,6 +244,7 @@ ENDPROC(fa_dma_unmap_area)
233 244
234 .type fa_cache_fns, #object 245 .type fa_cache_fns, #object
235ENTRY(fa_cache_fns) 246ENTRY(fa_cache_fns)
247 .long fa_flush_icache_all
236 .long fa_flush_kern_cache_all 248 .long fa_flush_kern_cache_all
237 .long fa_flush_user_cache_all 249 .long fa_flush_user_cache_all
238 .long fa_flush_user_cache_range 250 .long fa_flush_user_cache_range
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9982eb385c0f..170c9bb95866 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,14 +28,24 @@
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_SPINLOCK(l2x0_lock);
30static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 30static uint32_t l2x0_way_mask; /* Bitmask of active ways */
31static uint32_t l2x0_size;
31 32
32static inline void cache_wait(void __iomem *reg, unsigned long mask) 33static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
33{ 34{
34 /* wait for the operation to complete */ 35 /* wait for cache operation by line or way to complete */
35 while (readl_relaxed(reg) & mask) 36 while (readl_relaxed(reg) & mask)
36 ; 37 ;
37} 38}
38 39
40#ifdef CONFIG_CACHE_PL310
41static inline void cache_wait(void __iomem *reg, unsigned long mask)
42{
43 /* cache operations by line are atomic on PL310 */
44}
45#else
46#define cache_wait cache_wait_way
47#endif
48
39static inline void cache_sync(void) 49static inline void cache_sync(void)
40{ 50{
41 void __iomem *base = l2x0_base; 51 void __iomem *base = l2x0_base;
@@ -103,14 +113,40 @@ static void l2x0_cache_sync(void)
103 spin_unlock_irqrestore(&l2x0_lock, flags); 113 spin_unlock_irqrestore(&l2x0_lock, flags);
104} 114}
105 115
106static inline void l2x0_inv_all(void) 116static void l2x0_flush_all(void)
117{
118 unsigned long flags;
119
120 /* clean all ways */
121 spin_lock_irqsave(&l2x0_lock, flags);
122 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
123 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
124 cache_sync();
125 spin_unlock_irqrestore(&l2x0_lock, flags);
126}
127
128static void l2x0_clean_all(void)
129{
130 unsigned long flags;
131
132 /* clean all ways */
133 spin_lock_irqsave(&l2x0_lock, flags);
134 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
135 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
136 cache_sync();
137 spin_unlock_irqrestore(&l2x0_lock, flags);
138}
139
140static void l2x0_inv_all(void)
107{ 141{
108 unsigned long flags; 142 unsigned long flags;
109 143
110 /* invalidate all ways */ 144 /* invalidate all ways */
111 spin_lock_irqsave(&l2x0_lock, flags); 145 spin_lock_irqsave(&l2x0_lock, flags);
146 /* Invalidating when L2 is enabled is a nono */
147 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
112 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 149 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
114 cache_sync(); 150 cache_sync();
115 spin_unlock_irqrestore(&l2x0_lock, flags); 151 spin_unlock_irqrestore(&l2x0_lock, flags);
116} 152}
@@ -159,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
159 void __iomem *base = l2x0_base; 195 void __iomem *base = l2x0_base;
160 unsigned long flags; 196 unsigned long flags;
161 197
198 if ((end - start) >= l2x0_size) {
199 l2x0_clean_all();
200 return;
201 }
202
162 spin_lock_irqsave(&l2x0_lock, flags); 203 spin_lock_irqsave(&l2x0_lock, flags);
163 start &= ~(CACHE_LINE_SIZE - 1); 204 start &= ~(CACHE_LINE_SIZE - 1);
164 while (start < end) { 205 while (start < end) {
@@ -184,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
184 void __iomem *base = l2x0_base; 225 void __iomem *base = l2x0_base;
185 unsigned long flags; 226 unsigned long flags;
186 227
228 if ((end - start) >= l2x0_size) {
229 l2x0_flush_all();
230 return;
231 }
232
187 spin_lock_irqsave(&l2x0_lock, flags); 233 spin_lock_irqsave(&l2x0_lock, flags);
188 start &= ~(CACHE_LINE_SIZE - 1); 234 start &= ~(CACHE_LINE_SIZE - 1);
189 while (start < end) { 235 while (start < end) {
@@ -206,10 +252,20 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
206 spin_unlock_irqrestore(&l2x0_lock, flags); 252 spin_unlock_irqrestore(&l2x0_lock, flags);
207} 253}
208 254
255static void l2x0_disable(void)
256{
257 unsigned long flags;
258
259 spin_lock_irqsave(&l2x0_lock, flags);
260 writel(0, l2x0_base + L2X0_CTRL);
261 spin_unlock_irqrestore(&l2x0_lock, flags);
262}
263
209void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 264void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
210{ 265{
211 __u32 aux; 266 __u32 aux;
212 __u32 cache_id; 267 __u32 cache_id;
268 __u32 way_size = 0;
213 int ways; 269 int ways;
214 const char *type; 270 const char *type;
215 271
@@ -244,6 +300,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
244 l2x0_way_mask = (1 << ways) - 1; 300 l2x0_way_mask = (1 << ways) - 1;
245 301
246 /* 302 /*
303 * L2 cache Size = Way size * Number of ways
304 */
305 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
306 way_size = 1 << (way_size + 3);
307 l2x0_size = ways * way_size * SZ_1K;
308
309 /*
247 * Check if l2x0 controller is already enabled. 310 * Check if l2x0 controller is already enabled.
248 * If you are booting from non-secure mode 311 * If you are booting from non-secure mode
249 * accessing the below registers will fault. 312 * accessing the below registers will fault.
@@ -263,8 +326,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
263 outer_cache.clean_range = l2x0_clean_range; 326 outer_cache.clean_range = l2x0_clean_range;
264 outer_cache.flush_range = l2x0_flush_range; 327 outer_cache.flush_range = l2x0_flush_range;
265 outer_cache.sync = l2x0_cache_sync; 328 outer_cache.sync = l2x0_cache_sync;
329 outer_cache.flush_all = l2x0_flush_all;
330 outer_cache.inv_all = l2x0_inv_all;
331 outer_cache.disable = l2x0_disable;
266 332
267 printk(KERN_INFO "%s cache controller enabled\n", type); 333 printk(KERN_INFO "%s cache controller enabled\n", type);
268 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 334 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
269 ways, cache_id, aux); 335 ways, cache_id, aux, l2x0_size);
270} 336}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2ff3c599fee..2e2bc406a18d 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -13,6 +13,15 @@
13#include "proc-macros.S" 13#include "proc-macros.S"
14 14
15/* 15/*
16 * flush_icache_all()
17 *
18 * Unconditionally clean and invalidate the entire icache.
19 */
20ENTRY(v3_flush_icache_all)
21 mov pc, lr
22ENDPROC(v3_flush_icache_all)
23
24/*
16 * flush_user_cache_all() 25 * flush_user_cache_all()
17 * 26 *
18 * Invalidate all cache entries in a particular address 27 * Invalidate all cache entries in a particular address
@@ -122,6 +131,7 @@ ENDPROC(v3_dma_map_area)
122 131
123 .type v3_cache_fns, #object 132 .type v3_cache_fns, #object
124ENTRY(v3_cache_fns) 133ENTRY(v3_cache_fns)
134 .long v3_flush_icache_all
125 .long v3_flush_kern_cache_all 135 .long v3_flush_kern_cache_all
126 .long v3_flush_user_cache_all 136 .long v3_flush_user_cache_all
127 .long v3_flush_user_cache_range 137 .long v3_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 4810f7e3e813..a8fefb523f19 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -13,6 +13,15 @@
13#include "proc-macros.S" 13#include "proc-macros.S"
14 14
15/* 15/*
16 * flush_icache_all()
17 *
18 * Unconditionally clean and invalidate the entire icache.
19 */
20ENTRY(v4_flush_icache_all)
21 mov pc, lr
22ENDPROC(v4_flush_icache_all)
23
24/*
16 * flush_user_cache_all() 25 * flush_user_cache_all()
17 * 26 *
18 * Invalidate all cache entries in a particular address 27 * Invalidate all cache entries in a particular address
@@ -134,6 +143,7 @@ ENDPROC(v4_dma_map_area)
134 143
135 .type v4_cache_fns, #object 144 .type v4_cache_fns, #object
136ENTRY(v4_cache_fns) 145ENTRY(v4_cache_fns)
146 .long v4_flush_icache_all
137 .long v4_flush_kern_cache_all 147 .long v4_flush_kern_cache_all
138 .long v4_flush_user_cache_all 148 .long v4_flush_user_cache_all
139 .long v4_flush_user_cache_range 149 .long v4_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index df8368afa102..d3644db467b7 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -51,6 +51,17 @@ flush_base:
51 .text 51 .text
52 52
53/* 53/*
54 * flush_icache_all()
55 *
56 * Unconditionally clean and invalidate the entire icache.
57 */
58ENTRY(v4wb_flush_icache_all)
59 mov r0, #0
60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
61 mov pc, lr
62ENDPROC(v4wb_flush_icache_all)
63
64/*
54 * flush_user_cache_all() 65 * flush_user_cache_all()
55 * 66 *
56 * Clean and invalidate all cache entries in a particular address 67 * Clean and invalidate all cache entries in a particular address
@@ -244,6 +255,7 @@ ENDPROC(v4wb_dma_unmap_area)
244 255
245 .type v4wb_cache_fns, #object 256 .type v4wb_cache_fns, #object
246ENTRY(v4wb_cache_fns) 257ENTRY(v4wb_cache_fns)
258 .long v4wb_flush_icache_all
247 .long v4wb_flush_kern_cache_all 259 .long v4wb_flush_kern_cache_all
248 .long v4wb_flush_user_cache_all 260 .long v4wb_flush_user_cache_all
249 .long v4wb_flush_user_cache_range 261 .long v4wb_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 45c70312f43b..49c2b66cf3dd 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -41,6 +41,17 @@
41#define CACHE_DLIMIT 16384 41#define CACHE_DLIMIT 16384
42 42
43/* 43/*
44 * flush_icache_all()
45 *
46 * Unconditionally clean and invalidate the entire icache.
47 */
48ENTRY(v4wt_flush_icache_all)
49 mov r0, #0
50 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
51 mov pc, lr
52ENDPROC(v4wt_flush_icache_all)
53
54/*
44 * flush_user_cache_all() 55 * flush_user_cache_all()
45 * 56 *
46 * Invalidate all cache entries in a particular address 57 * Invalidate all cache entries in a particular address
@@ -188,6 +199,7 @@ ENDPROC(v4wt_dma_map_area)
188 199
189 .type v4wt_cache_fns, #object 200 .type v4wt_cache_fns, #object
190ENTRY(v4wt_cache_fns) 201ENTRY(v4wt_cache_fns)
202 .long v4wt_flush_icache_all
191 .long v4wt_flush_kern_cache_all 203 .long v4wt_flush_kern_cache_all
192 .long v4wt_flush_user_cache_all 204 .long v4wt_flush_user_cache_all
193 .long v4wt_flush_user_cache_range 205 .long v4wt_flush_user_cache_range
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index c493d7244d3d..83e59f870426 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -66,6 +66,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
66 return ret; 66 return ret;
67} 67}
68 68
69#if USE_SPLIT_PTLOCKS
70/*
71 * If we are using split PTE locks, then we need to take the page
72 * lock here. Otherwise we are using shared mm->page_table_lock
73 * which is already locked, thus cannot take it.
74 */
75static inline void do_pte_lock(spinlock_t *ptl)
76{
77 /*
78 * Use nested version here to indicate that we are already
79 * holding one similar spinlock.
80 */
81 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
82}
83
84static inline void do_pte_unlock(spinlock_t *ptl)
85{
86 spin_unlock(ptl);
87}
88#else /* !USE_SPLIT_PTLOCKS */
89static inline void do_pte_lock(spinlock_t *ptl) {}
90static inline void do_pte_unlock(spinlock_t *ptl) {}
91#endif /* USE_SPLIT_PTLOCKS */
92
69static int adjust_pte(struct vm_area_struct *vma, unsigned long address, 93static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
70 unsigned long pfn) 94 unsigned long pfn)
71{ 95{
@@ -90,11 +114,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
90 */ 114 */
91 ptl = pte_lockptr(vma->vm_mm, pmd); 115 ptl = pte_lockptr(vma->vm_mm, pmd);
92 pte = pte_offset_map(pmd, address); 116 pte = pte_offset_map(pmd, address);
93 spin_lock(ptl); 117 do_pte_lock(ptl);
94 118
95 ret = do_adjust_pte(vma, address, pfn, pte); 119 ret = do_adjust_pte(vma, address, pfn, pte);
96 120
97 spin_unlock(ptl); 121 do_pte_unlock(ptl);
98 pte_unmap(pte); 122 pte_unmap(pte);
99 123
100 return ret; 124 return ret;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7fd9b5eb177f..5164069ced42 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -18,6 +18,7 @@
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/gfp.h> 19#include <linux/gfp.h>
20#include <linux/memblock.h> 20#include <linux/memblock.h>
21#include <linux/sort.h>
21 22
22#include <asm/mach-types.h> 23#include <asm/mach-types.h>
23#include <asm/sections.h> 24#include <asm/sections.h>
@@ -121,9 +122,10 @@ void show_mem(void)
121 printk("%d pages swap cached\n", cached); 122 printk("%d pages swap cached\n", cached);
122} 123}
123 124
124static void __init find_limits(struct meminfo *mi, 125static void __init find_limits(unsigned long *min, unsigned long *max_low,
125 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 126 unsigned long *max_high)
126{ 127{
128 struct meminfo *mi = &meminfo;
127 int i; 129 int i;
128 130
129 *min = -1UL; 131 *min = -1UL;
@@ -147,14 +149,13 @@ static void __init find_limits(struct meminfo *mi,
147 } 149 }
148} 150}
149 151
150static void __init arm_bootmem_init(struct meminfo *mi, 152static void __init arm_bootmem_init(unsigned long start_pfn,
151 unsigned long start_pfn, unsigned long end_pfn) 153 unsigned long end_pfn)
152{ 154{
153 struct memblock_region *reg; 155 struct memblock_region *reg;
154 unsigned int boot_pages; 156 unsigned int boot_pages;
155 phys_addr_t bitmap; 157 phys_addr_t bitmap;
156 pg_data_t *pgdat; 158 pg_data_t *pgdat;
157 int i;
158 159
159 /* 160 /*
160 * Allocate the bootmem bitmap page. This must be in a region 161 * Allocate the bootmem bitmap page. This must be in a region
@@ -172,30 +173,39 @@ static void __init arm_bootmem_init(struct meminfo *mi,
172 pgdat = NODE_DATA(0); 173 pgdat = NODE_DATA(0);
173 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); 174 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
174 175
175 for_each_bank(i, mi) { 176 /* Free the lowmem regions from memblock into bootmem. */
176 struct membank *bank = &mi->bank[i]; 177 for_each_memblock(memory, reg) {
177 if (!bank->highmem) 178 unsigned long start = memblock_region_memory_base_pfn(reg);
178 free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); 179 unsigned long end = memblock_region_memory_end_pfn(reg);
180
181 if (end >= end_pfn)
182 end = end_pfn;
183 if (start >= end)
184 break;
185
186 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
179 } 187 }
180 188
181 /* 189 /* Reserve the lowmem memblock reserved regions in bootmem. */
182 * Reserve the memblock reserved regions in bootmem.
183 */
184 for_each_memblock(reserved, reg) { 190 for_each_memblock(reserved, reg) {
185 phys_addr_t start = memblock_region_reserved_base_pfn(reg); 191 unsigned long start = memblock_region_reserved_base_pfn(reg);
186 phys_addr_t end = memblock_region_reserved_end_pfn(reg); 192 unsigned long end = memblock_region_reserved_end_pfn(reg);
187 if (start >= start_pfn && end <= end_pfn) 193
188 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 194 if (end >= end_pfn)
189 (end - start) << PAGE_SHIFT, 195 end = end_pfn;
190 BOOTMEM_DEFAULT); 196 if (start >= end)
197 break;
198
199 reserve_bootmem(__pfn_to_phys(start),
200 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
191 } 201 }
192} 202}
193 203
194static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, 204static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
195 unsigned long max_low, unsigned long max_high) 205 unsigned long max_high)
196{ 206{
197 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 207 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
198 int i; 208 struct memblock_region *reg;
199 209
200 /* 210 /*
201 * initialise the zones. 211 * initialise the zones.
@@ -217,13 +227,20 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
217 * holes = node_size - sum(bank_sizes) 227 * holes = node_size - sum(bank_sizes)
218 */ 228 */
219 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 229 memcpy(zhole_size, zone_size, sizeof(zhole_size));
220 for_each_bank(i, mi) { 230 for_each_memblock(memory, reg) {
221 int idx = 0; 231 unsigned long start = memblock_region_memory_base_pfn(reg);
232 unsigned long end = memblock_region_memory_end_pfn(reg);
233
234 if (start < max_low) {
235 unsigned long low_end = min(end, max_low);
236 zhole_size[0] -= low_end - start;
237 }
222#ifdef CONFIG_HIGHMEM 238#ifdef CONFIG_HIGHMEM
223 if (mi->bank[i].highmem) 239 if (end > max_low) {
224 idx = ZONE_HIGHMEM; 240 unsigned long high_start = max(start, max_low);
241 zhole_size[ZONE_HIGHMEM] -= end - high_start;
242 }
225#endif 243#endif
226 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
227 } 244 }
228 245
229 /* 246 /*
@@ -256,10 +273,19 @@ static void arm_memory_present(void)
256} 273}
257#endif 274#endif
258 275
276static int __init meminfo_cmp(const void *_a, const void *_b)
277{
278 const struct membank *a = _a, *b = _b;
279 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
280 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
281}
282
259void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 283void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
260{ 284{
261 int i; 285 int i;
262 286
287 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
288
263 memblock_init(); 289 memblock_init();
264 for (i = 0; i < mi->nr_banks; i++) 290 for (i = 0; i < mi->nr_banks; i++)
265 memblock_add(mi->bank[i].start, mi->bank[i].size); 291 memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -292,14 +318,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
292 318
293void __init bootmem_init(void) 319void __init bootmem_init(void)
294{ 320{
295 struct meminfo *mi = &meminfo;
296 unsigned long min, max_low, max_high; 321 unsigned long min, max_low, max_high;
297 322
298 max_low = max_high = 0; 323 max_low = max_high = 0;
299 324
300 find_limits(mi, &min, &max_low, &max_high); 325 find_limits(&min, &max_low, &max_high);
301 326
302 arm_bootmem_init(mi, min, max_low); 327 arm_bootmem_init(min, max_low);
303 328
304 /* 329 /*
305 * Sparsemem tries to allocate bootmem in memory_present(), 330 * Sparsemem tries to allocate bootmem in memory_present(),
@@ -317,7 +342,7 @@ void __init bootmem_init(void)
317 * the sparse mem_map arrays initialized by sparse_init() 342 * the sparse mem_map arrays initialized by sparse_init()
318 * for memmap_init_zone(), otherwise all PFNs are invalid. 343 * for memmap_init_zone(), otherwise all PFNs are invalid.
319 */ 344 */
320 arm_bootmem_free(mi, min, max_low, max_high); 345 arm_bootmem_free(min, max_low, max_high);
321 346
322 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 347 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
323 348
@@ -411,6 +436,56 @@ static void __init free_unused_memmap(struct meminfo *mi)
411 } 436 }
412} 437}
413 438
439static void __init free_highpages(void)
440{
441#ifdef CONFIG_HIGHMEM
442 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
443 struct memblock_region *mem, *res;
444
445 /* set highmem page free */
446 for_each_memblock(memory, mem) {
447 unsigned long start = memblock_region_memory_base_pfn(mem);
448 unsigned long end = memblock_region_memory_end_pfn(mem);
449
450 /* Ignore complete lowmem entries */
451 if (end <= max_low)
452 continue;
453
454 /* Truncate partial highmem entries */
455 if (start < max_low)
456 start = max_low;
457
458 /* Find and exclude any reserved regions */
459 for_each_memblock(reserved, res) {
460 unsigned long res_start, res_end;
461
462 res_start = memblock_region_reserved_base_pfn(res);
463 res_end = memblock_region_reserved_end_pfn(res);
464
465 if (res_end < start)
466 continue;
467 if (res_start < start)
468 res_start = start;
469 if (res_start > end)
470 res_start = end;
471 if (res_end > end)
472 res_end = end;
473 if (res_start != start)
474 totalhigh_pages += free_area(start, res_start,
475 NULL);
476 start = res_end;
477 if (start == end)
478 break;
479 }
480
481 /* And now free anything which remains */
482 if (start < end)
483 totalhigh_pages += free_area(start, end, NULL);
484 }
485 totalram_pages += totalhigh_pages;
486#endif
487}
488
414/* 489/*
415 * mem_init() marks the free areas in the mem_map and tells us how much 490 * mem_init() marks the free areas in the mem_map and tells us how much
416 * memory is free. This is done after various parts of the system have 491 * memory is free. This is done after various parts of the system have
@@ -419,6 +494,7 @@ static void __init free_unused_memmap(struct meminfo *mi)
419void __init mem_init(void) 494void __init mem_init(void)
420{ 495{
421 unsigned long reserved_pages, free_pages; 496 unsigned long reserved_pages, free_pages;
497 struct memblock_region *reg;
422 int i; 498 int i;
423#ifdef CONFIG_HAVE_TCM 499#ifdef CONFIG_HAVE_TCM
424 /* These pointers are filled in on TCM detection */ 500 /* These pointers are filled in on TCM detection */
@@ -439,16 +515,7 @@ void __init mem_init(void)
439 __phys_to_pfn(__pa(swapper_pg_dir)), NULL); 515 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
440#endif 516#endif
441 517
442#ifdef CONFIG_HIGHMEM 518 free_highpages();
443 /* set highmem page free */
444 for_each_bank (i, &meminfo) {
445 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
446 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
447 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
448 totalhigh_pages += free_area(start, end, NULL);
449 }
450 totalram_pages += totalhigh_pages;
451#endif
452 519
453 reserved_pages = free_pages = 0; 520 reserved_pages = free_pages = 0;
454 521
@@ -478,9 +545,11 @@ void __init mem_init(void)
478 */ 545 */
479 printk(KERN_INFO "Memory:"); 546 printk(KERN_INFO "Memory:");
480 num_physpages = 0; 547 num_physpages = 0;
481 for (i = 0; i < meminfo.nr_banks; i++) { 548 for_each_memblock(memory, reg) {
482 num_physpages += bank_pfn_size(&meminfo.bank[i]); 549 unsigned long pages = memblock_region_memory_end_pfn(reg) -
483 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); 550 memblock_region_memory_base_pfn(reg);
551 num_physpages += pages;
552 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
484 } 553 }
485 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 554 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
486 555
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c32f731d56d3..72ad3e1f56cf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,7 +14,6 @@
14#include <linux/mman.h> 14#include <linux/mman.h>
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/sort.h>
18#include <linux/fs.h> 17#include <linux/fs.h>
19 18
20#include <asm/cputype.h> 19#include <asm/cputype.h>
@@ -265,17 +264,17 @@ static struct mem_type mem_types[] = {
265 .domain = DOMAIN_KERNEL, 264 .domain = DOMAIN_KERNEL,
266 }, 265 },
267 [MT_MEMORY_DTCM] = { 266 [MT_MEMORY_DTCM] = {
268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | 267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
269 L_PTE_DIRTY | L_PTE_WRITE, 268 L_PTE_WRITE,
270 .prot_l1 = PMD_TYPE_TABLE, 269 .prot_l1 = PMD_TYPE_TABLE,
271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 270 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
272 .domain = DOMAIN_KERNEL, 271 .domain = DOMAIN_KERNEL,
273 }, 272 },
274 [MT_MEMORY_ITCM] = { 273 [MT_MEMORY_ITCM] = {
275 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 274 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
276 L_PTE_USER | L_PTE_EXEC, 275 L_PTE_WRITE | L_PTE_EXEC,
277 .prot_l1 = PMD_TYPE_TABLE, 276 .prot_l1 = PMD_TYPE_TABLE,
278 .domain = DOMAIN_IO, 277 .domain = DOMAIN_KERNEL,
279 }, 278 },
280}; 279};
281 280
@@ -745,13 +744,14 @@ static int __init early_vmalloc(char *arg)
745} 744}
746early_param("vmalloc", early_vmalloc); 745early_param("vmalloc", early_vmalloc);
747 746
748phys_addr_t lowmem_end_addr; 747static phys_addr_t lowmem_limit __initdata = 0;
749 748
750static void __init sanity_check_meminfo(void) 749static void __init sanity_check_meminfo(void)
751{ 750{
752 int i, j, highmem = 0; 751 int i, j, highmem = 0;
753 752
754 lowmem_end_addr = __pa(vmalloc_min - 1) + 1; 753 lowmem_limit = __pa(vmalloc_min - 1) + 1;
754 memblock_set_current_limit(lowmem_limit);
755 755
756 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 756 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
757 struct membank *bank = &meminfo.bank[j]; 757 struct membank *bank = &meminfo.bank[j];
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
852static inline void prepare_page_table(void) 852static inline void prepare_page_table(void)
853{ 853{
854 unsigned long addr; 854 unsigned long addr;
855 phys_addr_t end;
855 856
856 /* 857 /*
857 * Clear out all the mappings below the kernel image. 858 * Clear out all the mappings below the kernel image.
@@ -867,10 +868,17 @@ static inline void prepare_page_table(void)
867 pmd_clear(pmd_off_k(addr)); 868 pmd_clear(pmd_off_k(addr));
868 869
869 /* 870 /*
871 * Find the end of the first block of lowmem.
872 */
873 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
874 if (end >= lowmem_limit)
875 end = lowmem_limit;
876
877 /*
870 * Clear out all the kernel space mappings, except for the first 878 * Clear out all the kernel space mappings, except for the first
871 * memory bank, up to the end of the vmalloc region. 879 * memory bank, up to the end of the vmalloc region.
872 */ 880 */
873 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); 881 for (addr = __phys_to_virt(end);
874 addr < VMALLOC_END; addr += PGDIR_SIZE) 882 addr < VMALLOC_END; addr += PGDIR_SIZE)
875 pmd_clear(pmd_off_k(addr)); 883 pmd_clear(pmd_off_k(addr));
876} 884}
@@ -987,37 +995,28 @@ static void __init kmap_init(void)
987#endif 995#endif
988} 996}
989 997
990static inline void map_memory_bank(struct membank *bank)
991{
992 struct map_desc map;
993
994 map.pfn = bank_pfn_start(bank);
995 map.virtual = __phys_to_virt(bank_phys_start(bank));
996 map.length = bank_phys_size(bank);
997 map.type = MT_MEMORY;
998
999 create_mapping(&map);
1000}
1001
1002static void __init map_lowmem(void) 998static void __init map_lowmem(void)
1003{ 999{
1004 struct meminfo *mi = &meminfo; 1000 struct memblock_region *reg;
1005 int i;
1006 1001
1007 /* Map all the lowmem memory banks. */ 1002 /* Map all the lowmem memory banks. */
1008 for (i = 0; i < mi->nr_banks; i++) { 1003 for_each_memblock(memory, reg) {
1009 struct membank *bank = &mi->bank[i]; 1004 phys_addr_t start = reg->base;
1005 phys_addr_t end = start + reg->size;
1006 struct map_desc map;
1007
1008 if (end > lowmem_limit)
1009 end = lowmem_limit;
1010 if (start >= end)
1011 break;
1010 1012
1011 if (!bank->highmem) 1013 map.pfn = __phys_to_pfn(start);
1012 map_memory_bank(bank); 1014 map.virtual = __phys_to_virt(start);
1013 } 1015 map.length = end - start;
1014} 1016 map.type = MT_MEMORY;
1015 1017
1016static int __init meminfo_cmp(const void *_a, const void *_b) 1018 create_mapping(&map);
1017{ 1019 }
1018 const struct membank *a = _a, *b = _b;
1019 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
1020 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
1021} 1020}
1022 1021
1023/* 1022/*
@@ -1028,8 +1027,6 @@ void __init paging_init(struct machine_desc *mdesc)
1028{ 1027{
1029 void *zero_page; 1028 void *zero_page;
1030 1029
1031 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
1032
1033 build_mem_type_table(); 1030 build_mem_type_table();
1034 sanity_check_meminfo(); 1031 sanity_check_meminfo();
1035 prepare_page_table(); 1032 prepare_page_table();
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index a6f5f8475b96..bcf748d9f4e2 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020_do_idle)
119/* ================================= CACHE ================================ */ 119/* ================================= CACHE ================================ */
120 120
121 .align 5 121 .align 5
122
123/*
124 * flush_icache_all()
125 *
126 * Unconditionally clean and invalidate the entire icache.
127 */
128ENTRY(arm1020_flush_icache_all)
129#ifndef CONFIG_CPU_ICACHE_DISABLE
130 mov r0, #0
131 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
132#endif
133 mov pc, lr
134ENDPROC(arm1020_flush_icache_all)
135
122/* 136/*
123 * flush_user_cache_all() 137 * flush_user_cache_all()
124 * 138 *
@@ -351,6 +365,7 @@ ENTRY(arm1020_dma_unmap_area)
351ENDPROC(arm1020_dma_unmap_area) 365ENDPROC(arm1020_dma_unmap_area)
352 366
353ENTRY(arm1020_cache_fns) 367ENTRY(arm1020_cache_fns)
368 .long arm1020_flush_icache_all
354 .long arm1020_flush_kern_cache_all 369 .long arm1020_flush_kern_cache_all
355 .long arm1020_flush_user_cache_all 370 .long arm1020_flush_user_cache_all
356 .long arm1020_flush_user_cache_range 371 .long arm1020_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index afc06b9c3133..ab7ec26657ea 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020e_do_idle)
119/* ================================= CACHE ================================ */ 119/* ================================= CACHE ================================ */
120 120
121 .align 5 121 .align 5
122
123/*
124 * flush_icache_all()
125 *
126 * Unconditionally clean and invalidate the entire icache.
127 */
128ENTRY(arm1020e_flush_icache_all)
129#ifndef CONFIG_CPU_ICACHE_DISABLE
130 mov r0, #0
131 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
132#endif
133 mov pc, lr
134ENDPROC(arm1020e_flush_icache_all)
135
122/* 136/*
123 * flush_user_cache_all() 137 * flush_user_cache_all()
124 * 138 *
@@ -337,6 +351,7 @@ ENTRY(arm1020e_dma_unmap_area)
337ENDPROC(arm1020e_dma_unmap_area) 351ENDPROC(arm1020e_dma_unmap_area)
338 352
339ENTRY(arm1020e_cache_fns) 353ENTRY(arm1020e_cache_fns)
354 .long arm1020e_flush_icache_all
340 .long arm1020e_flush_kern_cache_all 355 .long arm1020e_flush_kern_cache_all
341 .long arm1020e_flush_user_cache_all 356 .long arm1020e_flush_user_cache_all
342 .long arm1020e_flush_user_cache_range 357 .long arm1020e_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8915e0ba3fe5..831c5e54e22f 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1022_do_idle)
108/* ================================= CACHE ================================ */ 108/* ================================= CACHE ================================ */
109 109
110 .align 5 110 .align 5
111
112/*
113 * flush_icache_all()
114 *
115 * Unconditionally clean and invalidate the entire icache.
116 */
117ENTRY(arm1022_flush_icache_all)
118#ifndef CONFIG_CPU_ICACHE_DISABLE
119 mov r0, #0
120 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
121#endif
122 mov pc, lr
123ENDPROC(arm1022_flush_icache_all)
124
111/* 125/*
112 * flush_user_cache_all() 126 * flush_user_cache_all()
113 * 127 *
@@ -326,6 +340,7 @@ ENTRY(arm1022_dma_unmap_area)
326ENDPROC(arm1022_dma_unmap_area) 340ENDPROC(arm1022_dma_unmap_area)
327 341
328ENTRY(arm1022_cache_fns) 342ENTRY(arm1022_cache_fns)
343 .long arm1022_flush_icache_all
329 .long arm1022_flush_kern_cache_all 344 .long arm1022_flush_kern_cache_all
330 .long arm1022_flush_user_cache_all 345 .long arm1022_flush_user_cache_all
331 .long arm1022_flush_user_cache_range 346 .long arm1022_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index ff446c5d476f..e3f7e9a166bf 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1026_do_idle)
108/* ================================= CACHE ================================ */ 108/* ================================= CACHE ================================ */
109 109
110 .align 5 110 .align 5
111
112/*
113 * flush_icache_all()
114 *
115 * Unconditionally clean and invalidate the entire icache.
116 */
117ENTRY(arm1026_flush_icache_all)
118#ifndef CONFIG_CPU_ICACHE_DISABLE
119 mov r0, #0
120 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
121#endif
122 mov pc, lr
123ENDPROC(arm1026_flush_icache_all)
124
111/* 125/*
112 * flush_user_cache_all() 126 * flush_user_cache_all()
113 * 127 *
@@ -320,6 +334,7 @@ ENTRY(arm1026_dma_unmap_area)
320ENDPROC(arm1026_dma_unmap_area) 334ENDPROC(arm1026_dma_unmap_area)
321 335
322ENTRY(arm1026_cache_fns) 336ENTRY(arm1026_cache_fns)
337 .long arm1026_flush_icache_all
323 .long arm1026_flush_kern_cache_all 338 .long arm1026_flush_kern_cache_all
324 .long arm1026_flush_user_cache_all 339 .long arm1026_flush_user_cache_all
325 .long arm1026_flush_user_cache_range 340 .long arm1026_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index fecf570939f3..6109f278a904 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -110,6 +110,17 @@ ENTRY(cpu_arm920_do_idle)
110#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 110#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
111 111
112/* 112/*
113 * flush_icache_all()
114 *
115 * Unconditionally clean and invalidate the entire icache.
116 */
117ENTRY(arm920_flush_icache_all)
118 mov r0, #0
119 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
120 mov pc, lr
121ENDPROC(arm920_flush_icache_all)
122
123/*
113 * flush_user_cache_all() 124 * flush_user_cache_all()
114 * 125 *
115 * Invalidate all cache entries in a particular address 126 * Invalidate all cache entries in a particular address
@@ -305,6 +316,7 @@ ENTRY(arm920_dma_unmap_area)
305ENDPROC(arm920_dma_unmap_area) 316ENDPROC(arm920_dma_unmap_area)
306 317
307ENTRY(arm920_cache_fns) 318ENTRY(arm920_cache_fns)
319 .long arm920_flush_icache_all
308 .long arm920_flush_kern_cache_all 320 .long arm920_flush_kern_cache_all
309 .long arm920_flush_user_cache_all 321 .long arm920_flush_user_cache_all
310 .long arm920_flush_user_cache_range 322 .long arm920_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index e3cbf87c9480..bb2f0f46a5e6 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -112,6 +112,17 @@ ENTRY(cpu_arm922_do_idle)
112#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 112#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
113 113
114/* 114/*
115 * flush_icache_all()
116 *
117 * Unconditionally clean and invalidate the entire icache.
118 */
119ENTRY(arm922_flush_icache_all)
120 mov r0, #0
121 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
122 mov pc, lr
123ENDPROC(arm922_flush_icache_all)
124
125/*
115 * flush_user_cache_all() 126 * flush_user_cache_all()
116 * 127 *
117 * Clean and invalidate all cache entries in a particular 128 * Clean and invalidate all cache entries in a particular
@@ -307,6 +318,7 @@ ENTRY(arm922_dma_unmap_area)
307ENDPROC(arm922_dma_unmap_area) 318ENDPROC(arm922_dma_unmap_area)
308 319
309ENTRY(arm922_cache_fns) 320ENTRY(arm922_cache_fns)
321 .long arm922_flush_icache_all
310 .long arm922_flush_kern_cache_all 322 .long arm922_flush_kern_cache_all
311 .long arm922_flush_user_cache_all 323 .long arm922_flush_user_cache_all
312 .long arm922_flush_user_cache_range 324 .long arm922_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 572424c867b5..c13e01accfe2 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -145,6 +145,17 @@ ENTRY(cpu_arm925_do_idle)
145 mov pc, lr 145 mov pc, lr
146 146
147/* 147/*
148 * flush_icache_all()
149 *
150 * Unconditionally clean and invalidate the entire icache.
151 */
152ENTRY(arm925_flush_icache_all)
153 mov r0, #0
154 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
155 mov pc, lr
156ENDPROC(arm925_flush_icache_all)
157
158/*
148 * flush_user_cache_all() 159 * flush_user_cache_all()
149 * 160 *
150 * Clean and invalidate all cache entries in a particular 161 * Clean and invalidate all cache entries in a particular
@@ -362,6 +373,7 @@ ENTRY(arm925_dma_unmap_area)
362ENDPROC(arm925_dma_unmap_area) 373ENDPROC(arm925_dma_unmap_area)
363 374
364ENTRY(arm925_cache_fns) 375ENTRY(arm925_cache_fns)
376 .long arm925_flush_icache_all
365 .long arm925_flush_kern_cache_all 377 .long arm925_flush_kern_cache_all
366 .long arm925_flush_user_cache_all 378 .long arm925_flush_user_cache_all
367 .long arm925_flush_user_cache_range 379 .long arm925_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 63d168b4ebe6..42eb4315740b 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -111,6 +111,17 @@ ENTRY(cpu_arm926_do_idle)
111 mov pc, lr 111 mov pc, lr
112 112
113/* 113/*
114 * flush_icache_all()
115 *
116 * Unconditionally clean and invalidate the entire icache.
117 */
118ENTRY(arm926_flush_icache_all)
119 mov r0, #0
120 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
121 mov pc, lr
122ENDPROC(arm926_flush_icache_all)
123
124/*
114 * flush_user_cache_all() 125 * flush_user_cache_all()
115 * 126 *
116 * Clean and invalidate all cache entries in a particular 127 * Clean and invalidate all cache entries in a particular
@@ -325,6 +336,7 @@ ENTRY(arm926_dma_unmap_area)
325ENDPROC(arm926_dma_unmap_area) 336ENDPROC(arm926_dma_unmap_area)
326 337
327ENTRY(arm926_cache_fns) 338ENTRY(arm926_cache_fns)
339 .long arm926_flush_icache_all
328 .long arm926_flush_kern_cache_all 340 .long arm926_flush_kern_cache_all
329 .long arm926_flush_user_cache_all 341 .long arm926_flush_user_cache_all
330 .long arm926_flush_user_cache_range 342 .long arm926_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index f6a62822418e..7b11cdb9935f 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -68,6 +68,17 @@ ENTRY(cpu_arm940_do_idle)
68 mov pc, lr 68 mov pc, lr
69 69
70/* 70/*
71 * flush_icache_all()
72 *
73 * Unconditionally clean and invalidate the entire icache.
74 */
75ENTRY(arm940_flush_icache_all)
76 mov r0, #0
77 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
78 mov pc, lr
79ENDPROC(arm940_flush_icache_all)
80
81/*
71 * flush_user_cache_all() 82 * flush_user_cache_all()
72 */ 83 */
73ENTRY(arm940_flush_user_cache_all) 84ENTRY(arm940_flush_user_cache_all)
@@ -254,6 +265,7 @@ ENTRY(arm940_dma_unmap_area)
254ENDPROC(arm940_dma_unmap_area) 265ENDPROC(arm940_dma_unmap_area)
255 266
256ENTRY(arm940_cache_fns) 267ENTRY(arm940_cache_fns)
268 .long arm940_flush_icache_all
257 .long arm940_flush_kern_cache_all 269 .long arm940_flush_kern_cache_all
258 .long arm940_flush_user_cache_all 270 .long arm940_flush_user_cache_all
259 .long arm940_flush_user_cache_range 271 .long arm940_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index ea2e7f2eb95b..1a5bbf080342 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -75,6 +75,17 @@ ENTRY(cpu_arm946_do_idle)
75 mov pc, lr 75 mov pc, lr
76 76
77/* 77/*
78 * flush_icache_all()
79 *
80 * Unconditionally clean and invalidate the entire icache.
81 */
82ENTRY(arm946_flush_icache_all)
83 mov r0, #0
84 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
85 mov pc, lr
86ENDPROC(arm946_flush_icache_all)
87
88/*
78 * flush_user_cache_all() 89 * flush_user_cache_all()
79 */ 90 */
80ENTRY(arm946_flush_user_cache_all) 91ENTRY(arm946_flush_user_cache_all)
@@ -296,6 +307,7 @@ ENTRY(arm946_dma_unmap_area)
296ENDPROC(arm946_dma_unmap_area) 307ENDPROC(arm946_dma_unmap_area)
297 308
298ENTRY(arm946_cache_fns) 309ENTRY(arm946_cache_fns)
310 .long arm946_flush_icache_all
299 .long arm946_flush_kern_cache_all 311 .long arm946_flush_kern_cache_all
300 .long arm946_flush_user_cache_all 312 .long arm946_flush_user_cache_all
301 .long arm946_flush_user_cache_range 313 .long arm946_flush_user_cache_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 578da69200cf..b4597edbff97 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -124,6 +124,17 @@ ENTRY(cpu_feroceon_do_idle)
124 mov pc, lr 124 mov pc, lr
125 125
126/* 126/*
127 * flush_icache_all()
128 *
129 * Unconditionally clean and invalidate the entire icache.
130 */
131ENTRY(feroceon_flush_icache_all)
132 mov r0, #0
133 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
134 mov pc, lr
135ENDPROC(feroceon_flush_icache_all)
136
137/*
127 * flush_user_cache_all() 138 * flush_user_cache_all()
128 * 139 *
129 * Clean and invalidate all cache entries in a particular 140 * Clean and invalidate all cache entries in a particular
@@ -401,6 +412,7 @@ ENTRY(feroceon_dma_unmap_area)
401ENDPROC(feroceon_dma_unmap_area) 412ENDPROC(feroceon_dma_unmap_area)
402 413
403ENTRY(feroceon_cache_fns) 414ENTRY(feroceon_cache_fns)
415 .long feroceon_flush_icache_all
404 .long feroceon_flush_kern_cache_all 416 .long feroceon_flush_kern_cache_all
405 .long feroceon_flush_user_cache_all 417 .long feroceon_flush_user_cache_all
406 .long feroceon_flush_user_cache_range 418 .long feroceon_flush_user_cache_range
@@ -412,6 +424,7 @@ ENTRY(feroceon_cache_fns)
412 .long feroceon_dma_flush_range 424 .long feroceon_dma_flush_range
413 425
414ENTRY(feroceon_range_cache_fns) 426ENTRY(feroceon_range_cache_fns)
427 .long feroceon_flush_icache_all
415 .long feroceon_flush_kern_cache_all 428 .long feroceon_flush_kern_cache_all
416 .long feroceon_flush_user_cache_all 429 .long feroceon_flush_user_cache_all
417 .long feroceon_flush_user_cache_range 430 .long feroceon_flush_user_cache_range
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index cad07e403044..ec26355cb7c2 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -141,6 +141,17 @@ ENTRY(cpu_xsc3_do_idle)
141/* ================================= CACHE ================================ */ 141/* ================================= CACHE ================================ */
142 142
143/* 143/*
144 * flush_icache_all()
145 *
146 * Unconditionally clean and invalidate the entire icache.
147 */
148ENTRY(xsc3_flush_icache_all)
149 mov r0, #0
150 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
151 mov pc, lr
152ENDPROC(xsc3_flush_icache_all)
153
154/*
144 * flush_user_cache_all() 155 * flush_user_cache_all()
145 * 156 *
146 * Invalidate all cache entries in a particular address 157 * Invalidate all cache entries in a particular address
@@ -325,6 +336,7 @@ ENTRY(xsc3_dma_unmap_area)
325ENDPROC(xsc3_dma_unmap_area) 336ENDPROC(xsc3_dma_unmap_area)
326 337
327ENTRY(xsc3_cache_fns) 338ENTRY(xsc3_cache_fns)
339 .long xsc3_flush_icache_all
328 .long xsc3_flush_kern_cache_all 340 .long xsc3_flush_kern_cache_all
329 .long xsc3_flush_user_cache_all 341 .long xsc3_flush_user_cache_all
330 .long xsc3_flush_user_cache_range 342 .long xsc3_flush_user_cache_range
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index cb245edb2c2b..523408c0bb38 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -181,6 +181,17 @@ ENTRY(cpu_xscale_do_idle)
181/* ================================= CACHE ================================ */ 181/* ================================= CACHE ================================ */
182 182
183/* 183/*
184 * flush_icache_all()
185 *
186 * Unconditionally clean and invalidate the entire icache.
187 */
188ENTRY(xscale_flush_icache_all)
189 mov r0, #0
190 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
191 mov pc, lr
192ENDPROC(xscale_flush_icache_all)
193
194/*
184 * flush_user_cache_all() 195 * flush_user_cache_all()
185 * 196 *
186 * Invalidate all cache entries in a particular address 197 * Invalidate all cache entries in a particular address
@@ -397,6 +408,7 @@ ENTRY(xscale_dma_unmap_area)
397ENDPROC(xscale_dma_unmap_area) 408ENDPROC(xscale_dma_unmap_area)
398 409
399ENTRY(xscale_cache_fns) 410ENTRY(xscale_cache_fns)
411 .long xscale_flush_icache_all
400 .long xscale_flush_kern_cache_all 412 .long xscale_flush_kern_cache_all
401 .long xscale_flush_user_cache_all 413 .long xscale_flush_user_cache_all
402 .long xscale_flush_user_cache_range 414 .long xscale_flush_user_cache_range