aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-17 18:22:23 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-17 18:22:23 -0500
commit6665398afafcb1c75d933c1452a9010644aba3e6 (patch)
tree6a6dce2ac7835de25f422330ea224a01eef55635 /arch/arm
parentc0caac93f873cd3402b63246bf94d904afc4f5fd (diff)
parentbf32eb85492af197ea5ff20e0be56f667a80584d (diff)
Merge branch 'cache' (early part)
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/common/dmabounce.c12
-rw-r--r--arch/arm/include/asm/cacheflush.h17
-rw-r--r--arch/arm/mm/cache-fa.S11
-rw-r--r--arch/arm/mm/cache-l2x0.c93
-rw-r--r--arch/arm/mm/cache-v3.S9
-rw-r--r--arch/arm/mm/cache-v4.S9
-rw-r--r--arch/arm/mm/cache-v4wb.S11
-rw-r--r--arch/arm/mm/cache-v4wt.S11
-rw-r--r--arch/arm/mm/cache-v6.S11
-rw-r--r--arch/arm/mm/cache-v7.S13
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm/mm/highmem.c2
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-arm1020.S11
-rw-r--r--arch/arm/mm/proc-arm1020e.S11
-rw-r--r--arch/arm/mm/proc-arm1022.S11
-rw-r--r--arch/arm/mm/proc-arm1026.S11
-rw-r--r--arch/arm/mm/proc-arm920.S11
-rw-r--r--arch/arm/mm/proc-arm922.S11
-rw-r--r--arch/arm/mm/proc-arm925.S11
-rw-r--r--arch/arm/mm/proc-arm926.S11
-rw-r--r--arch/arm/mm/proc-arm940.S9
-rw-r--r--arch/arm/mm/proc-arm946.S11
-rw-r--r--arch/arm/mm/proc-feroceon.S15
-rw-r--r--arch/arm/mm/proc-mohawk.S11
-rw-r--r--arch/arm/mm/proc-syms.c3
-rw-r--r--arch/arm/mm/proc-xsc3.S11
-rw-r--r--arch/arm/mm/proc-xscale.S13
28 files changed, 213 insertions, 153 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 5a375e5fef21..bc90364a96c7 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
308 memcpy(ptr, buf->safe, size); 308 memcpy(ptr, buf->safe, size);
309 309
310 /* 310 /*
311 * DMA buffers must have the same cache properties 311 * Since we may have written to a page cache page,
312 * as if they were really used for DMA - which means 312 * we need to ensure that the data will be coherent
313 * data must be written back to RAM. Note that 313 * with user mappings.
314 * we don't use dmac_flush_range() here for the
315 * bidirectional case because we know the cache
316 * lines will be coherent with the data written.
317 */ 314 */
318 dmac_clean_range(ptr, ptr + size); 315 __cpuc_flush_kernel_dcache_area(ptr, size);
319 outer_clean_range(__pa(ptr), __pa(ptr) + size);
320 } 316 }
321 free_safe_buffer(dev->archdata.dmabounce, buf); 317 free_safe_buffer(dev->archdata.dmabounce, buf);
322 } 318 }
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 73eceb87e588..730aefcfbee3 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -211,7 +211,7 @@ struct cpu_cache_fns {
211 211
212 void (*coherent_kern_range)(unsigned long, unsigned long); 212 void (*coherent_kern_range)(unsigned long, unsigned long);
213 void (*coherent_user_range)(unsigned long, unsigned long); 213 void (*coherent_user_range)(unsigned long, unsigned long);
214 void (*flush_kern_dcache_page)(void *); 214 void (*flush_kern_dcache_area)(void *, size_t);
215 215
216 void (*dma_inv_range)(const void *, const void *); 216 void (*dma_inv_range)(const void *, const void *);
217 void (*dma_clean_range)(const void *, const void *); 217 void (*dma_clean_range)(const void *, const void *);
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
236#define __cpuc_flush_user_range cpu_cache.flush_user_range 236#define __cpuc_flush_user_range cpu_cache.flush_user_range
237#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 237#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
238#define __cpuc_coherent_user_range cpu_cache.coherent_user_range 238#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
239#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page 239#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
240 240
241/* 241/*
242 * These are private to the dma-mapping API. Do not use directly. 242 * These are private to the dma-mapping API. Do not use directly.
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
255#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 255#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
256#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) 256#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
257#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 257#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
258#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) 258#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
259 259
260extern void __cpuc_flush_kern_all(void); 260extern void __cpuc_flush_kern_all(void);
261extern void __cpuc_flush_user_all(void); 261extern void __cpuc_flush_user_all(void);
262extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 262extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
263extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 263extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
264extern void __cpuc_coherent_user_range(unsigned long, unsigned long); 264extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
265extern void __cpuc_flush_dcache_page(void *); 265extern void __cpuc_flush_dcache_area(void *, size_t);
266 266
267/* 267/*
268 * These are private to the dma-mapping API. Do not use directly. 268 * These are private to the dma-mapping API. Do not use directly.
@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
448{ 448{
449 /* highmem pages are always flushed upon kunmap already */ 449 /* highmem pages are always flushed upon kunmap already */
450 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) 450 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
451 __cpuc_flush_dcache_page(page_address(page)); 451 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
452} 452}
453 453
454#define flush_dcache_mmap_lock(mapping) \ 454#define flush_dcache_mmap_lock(mapping) \
@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
465 */ 465 */
466#define flush_icache_page(vma,page) do { } while (0) 466#define flush_icache_page(vma,page) do { } while (0)
467 467
468static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
469 unsigned offset, size_t size)
470{
471 const void *start = (void __force *)virt + offset;
472 dmac_inv_range(start, start + size);
473}
474
475/* 468/*
476 * flush_cache_vmap() is used when creating mappings (eg, via vmap, 469 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
477 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT 470 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index b63a8f7b95cf..a89444a3c016 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
127 mov pc, lr 127 mov pc, lr
128 128
129/* 129/*
130 * flush_kern_dcache_page(kaddr) 130 * flush_kern_dcache_area(void *addr, size_t size)
131 * 131 *
132 * Ensure that the data held in the page kaddr is written back 132 * Ensure that the data held in the page kaddr is written back
133 * to the page in question. 133 * to the page in question.
134 * 134 *
135 * - kaddr - kernel address (guaranteed to be page aligned) 135 * - addr - kernel address
136 * - size - size of region
136 */ 137 */
137ENTRY(fa_flush_kern_dcache_page) 138ENTRY(fa_flush_kern_dcache_area)
138 add r1, r0, #PAGE_SZ 139 add r1, r0, r1
1391: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 1401: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
140 add r0, r0, #CACHE_DLINESIZE 141 add r0, r0, #CACHE_DLINESIZE
141 cmp r0, r1 142 cmp r0, r1
@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
213 .long fa_flush_user_cache_range 214 .long fa_flush_user_cache_range
214 .long fa_coherent_kern_range 215 .long fa_coherent_kern_range
215 .long fa_coherent_user_range 216 .long fa_coherent_user_range
216 .long fa_flush_kern_dcache_page 217 .long fa_flush_kern_dcache_area
217 .long fa_dma_inv_range 218 .long fa_dma_inv_range
218 .long fa_dma_clean_range 219 .long fa_dma_clean_range
219 .long fa_dma_flush_range 220 .long fa_dma_flush_range
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 747f9a9021bb..cb8fc6573b1b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,69 +28,120 @@
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_SPINLOCK(l2x0_lock);
30 30
31static inline void sync_writel(unsigned long val, unsigned long reg, 31static inline void cache_wait(void __iomem *reg, unsigned long mask)
32 unsigned long complete_mask)
33{ 32{
34 unsigned long flags;
35
36 spin_lock_irqsave(&l2x0_lock, flags);
37 writel(val, l2x0_base + reg);
38 /* wait for the operation to complete */ 33 /* wait for the operation to complete */
39 while (readl(l2x0_base + reg) & complete_mask) 34 while (readl(reg) & mask)
40 ; 35 ;
41 spin_unlock_irqrestore(&l2x0_lock, flags);
42} 36}
43 37
44static inline void cache_sync(void) 38static inline void cache_sync(void)
45{ 39{
46 sync_writel(0, L2X0_CACHE_SYNC, 1); 40 void __iomem *base = l2x0_base;
41 writel(0, base + L2X0_CACHE_SYNC);
42 cache_wait(base + L2X0_CACHE_SYNC, 1);
47} 43}
48 44
49static inline void l2x0_inv_all(void) 45static inline void l2x0_inv_all(void)
50{ 46{
47 unsigned long flags;
48
51 /* invalidate all ways */ 49 /* invalidate all ways */
52 sync_writel(0xff, L2X0_INV_WAY, 0xff); 50 spin_lock_irqsave(&l2x0_lock, flags);
51 writel(0xff, l2x0_base + L2X0_INV_WAY);
52 cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
53 cache_sync(); 53 cache_sync();
54 spin_unlock_irqrestore(&l2x0_lock, flags);
54} 55}
55 56
56static void l2x0_inv_range(unsigned long start, unsigned long end) 57static void l2x0_inv_range(unsigned long start, unsigned long end)
57{ 58{
58 unsigned long addr; 59 void __iomem *base = l2x0_base;
60 unsigned long flags;
59 61
62 spin_lock_irqsave(&l2x0_lock, flags);
60 if (start & (CACHE_LINE_SIZE - 1)) { 63 if (start & (CACHE_LINE_SIZE - 1)) {
61 start &= ~(CACHE_LINE_SIZE - 1); 64 start &= ~(CACHE_LINE_SIZE - 1);
62 sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); 65 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
66 writel(start, base + L2X0_CLEAN_INV_LINE_PA);
63 start += CACHE_LINE_SIZE; 67 start += CACHE_LINE_SIZE;
64 } 68 }
65 69
66 if (end & (CACHE_LINE_SIZE - 1)) { 70 if (end & (CACHE_LINE_SIZE - 1)) {
67 end &= ~(CACHE_LINE_SIZE - 1); 71 end &= ~(CACHE_LINE_SIZE - 1);
68 sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); 72 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
73 writel(end, base + L2X0_CLEAN_INV_LINE_PA);
69 } 74 }
70 75
71 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 76 while (start < end) {
72 sync_writel(addr, L2X0_INV_LINE_PA, 1); 77 unsigned long blk_end = start + min(end - start, 4096UL);
78
79 while (start < blk_end) {
80 cache_wait(base + L2X0_INV_LINE_PA, 1);
81 writel(start, base + L2X0_INV_LINE_PA);
82 start += CACHE_LINE_SIZE;
83 }
84
85 if (blk_end < end) {
86 spin_unlock_irqrestore(&l2x0_lock, flags);
87 spin_lock_irqsave(&l2x0_lock, flags);
88 }
89 }
90 cache_wait(base + L2X0_INV_LINE_PA, 1);
73 cache_sync(); 91 cache_sync();
92 spin_unlock_irqrestore(&l2x0_lock, flags);
74} 93}
75 94
76static void l2x0_clean_range(unsigned long start, unsigned long end) 95static void l2x0_clean_range(unsigned long start, unsigned long end)
77{ 96{
78 unsigned long addr; 97 void __iomem *base = l2x0_base;
98 unsigned long flags;
79 99
100 spin_lock_irqsave(&l2x0_lock, flags);
80 start &= ~(CACHE_LINE_SIZE - 1); 101 start &= ~(CACHE_LINE_SIZE - 1);
81 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 102 while (start < end) {
82 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); 103 unsigned long blk_end = start + min(end - start, 4096UL);
104
105 while (start < blk_end) {
106 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
107 writel(start, base + L2X0_CLEAN_LINE_PA);
108 start += CACHE_LINE_SIZE;
109 }
110
111 if (blk_end < end) {
112 spin_unlock_irqrestore(&l2x0_lock, flags);
113 spin_lock_irqsave(&l2x0_lock, flags);
114 }
115 }
116 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
83 cache_sync(); 117 cache_sync();
118 spin_unlock_irqrestore(&l2x0_lock, flags);
84} 119}
85 120
86static void l2x0_flush_range(unsigned long start, unsigned long end) 121static void l2x0_flush_range(unsigned long start, unsigned long end)
87{ 122{
88 unsigned long addr; 123 void __iomem *base = l2x0_base;
124 unsigned long flags;
89 125
126 spin_lock_irqsave(&l2x0_lock, flags);
90 start &= ~(CACHE_LINE_SIZE - 1); 127 start &= ~(CACHE_LINE_SIZE - 1);
91 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 128 while (start < end) {
92 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); 129 unsigned long blk_end = start + min(end - start, 4096UL);
130
131 while (start < blk_end) {
132 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
133 writel(start, base + L2X0_CLEAN_INV_LINE_PA);
134 start += CACHE_LINE_SIZE;
135 }
136
137 if (blk_end < end) {
138 spin_unlock_irqrestore(&l2x0_lock, flags);
139 spin_lock_irqsave(&l2x0_lock, flags);
140 }
141 }
142 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 cache_sync(); 143 cache_sync();
144 spin_unlock_irqrestore(&l2x0_lock, flags);
94} 145}
95 146
96void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 147void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 8a4abebc478a..2a482731ea36 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
72 mov pc, lr 72 mov pc, lr
73 73
74/* 74/*
75 * flush_kern_dcache_page(void *page) 75 * flush_kern_dcache_area(void *page, size_t size)
76 * 76 *
77 * Ensure no D cache aliasing occurs, either with itself or 77 * Ensure no D cache aliasing occurs, either with itself or
78 * the I cache 78 * the I cache
79 * 79 *
80 * - addr - page aligned address 80 * - addr - kernel address
81 * - size - region size
81 */ 82 */
82ENTRY(v3_flush_kern_dcache_page) 83ENTRY(v3_flush_kern_dcache_area)
83 /* FALLTHROUGH */ 84 /* FALLTHROUGH */
84 85
85/* 86/*
@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
129 .long v3_flush_user_cache_range 130 .long v3_flush_user_cache_range
130 .long v3_coherent_kern_range 131 .long v3_coherent_kern_range
131 .long v3_coherent_user_range 132 .long v3_coherent_user_range
132 .long v3_flush_kern_dcache_page 133 .long v3_flush_kern_dcache_area
133 .long v3_dma_inv_range 134 .long v3_dma_inv_range
134 .long v3_dma_clean_range 135 .long v3_dma_clean_range
135 .long v3_dma_flush_range 136 .long v3_dma_flush_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 3668611cb400..5c7da3e372e9 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
82 mov pc, lr 82 mov pc, lr
83 83
84/* 84/*
85 * flush_kern_dcache_page(void *page) 85 * flush_kern_dcache_area(void *addr, size_t size)
86 * 86 *
87 * Ensure no D cache aliasing occurs, either with itself or 87 * Ensure no D cache aliasing occurs, either with itself or
88 * the I cache 88 * the I cache
89 * 89 *
90 * - addr - page aligned address 90 * - addr - kernel address
91 * - size - region size
91 */ 92 */
92ENTRY(v4_flush_kern_dcache_page) 93ENTRY(v4_flush_kern_dcache_area)
93 /* FALLTHROUGH */ 94 /* FALLTHROUGH */
94 95
95/* 96/*
@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
141 .long v4_flush_user_cache_range 142 .long v4_flush_user_cache_range
142 .long v4_coherent_kern_range 143 .long v4_coherent_kern_range
143 .long v4_coherent_user_range 144 .long v4_coherent_user_range
144 .long v4_flush_kern_dcache_page 145 .long v4_flush_kern_dcache_area
145 .long v4_dma_inv_range 146 .long v4_dma_inv_range
146 .long v4_dma_clean_range 147 .long v4_dma_clean_range
147 .long v4_dma_flush_range 148 .long v4_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 2ebc1b3bf856..3dbedf1ec0e7 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
114 mov pc, lr 114 mov pc, lr
115 115
116/* 116/*
117 * flush_kern_dcache_page(void *page) 117 * flush_kern_dcache_area(void *addr, size_t size)
118 * 118 *
119 * Ensure no D cache aliasing occurs, either with itself or 119 * Ensure no D cache aliasing occurs, either with itself or
120 * the I cache 120 * the I cache
121 * 121 *
122 * - addr - page aligned address 122 * - addr - kernel address
123 * - size - region size
123 */ 124 */
124ENTRY(v4wb_flush_kern_dcache_page) 125ENTRY(v4wb_flush_kern_dcache_area)
125 add r1, r0, #PAGE_SZ 126 add r1, r0, r1
126 /* fall through */ 127 /* fall through */
127 128
128/* 129/*
@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
224 .long v4wb_flush_user_cache_range 225 .long v4wb_flush_user_cache_range
225 .long v4wb_coherent_kern_range 226 .long v4wb_coherent_kern_range
226 .long v4wb_coherent_user_range 227 .long v4wb_coherent_user_range
227 .long v4wb_flush_kern_dcache_page 228 .long v4wb_flush_kern_dcache_area
228 .long v4wb_dma_inv_range 229 .long v4wb_dma_inv_range
229 .long v4wb_dma_clean_range 230 .long v4wb_dma_clean_range
230 .long v4wb_dma_flush_range 231 .long v4wb_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index c54fa2cc40e6..b3b7410270b4 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
117 mov pc, lr 117 mov pc, lr
118 118
119/* 119/*
120 * flush_kern_dcache_page(void *page) 120 * flush_kern_dcache_area(void *addr, size_t size)
121 * 121 *
122 * Ensure no D cache aliasing occurs, either with itself or 122 * Ensure no D cache aliasing occurs, either with itself or
123 * the I cache 123 * the I cache
124 * 124 *
125 * - addr - page aligned address 125 * - addr - kernel address
126 * - size - region size
126 */ 127 */
127ENTRY(v4wt_flush_kern_dcache_page) 128ENTRY(v4wt_flush_kern_dcache_area)
128 mov r2, #0 129 mov r2, #0
129 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache 130 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
130 add r1, r0, #PAGE_SZ 131 add r1, r0, r1
131 /* fallthrough */ 132 /* fallthrough */
132 133
133/* 134/*
@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
180 .long v4wt_flush_user_cache_range 181 .long v4wt_flush_user_cache_range
181 .long v4wt_coherent_kern_range 182 .long v4wt_coherent_kern_range
182 .long v4wt_coherent_user_range 183 .long v4wt_coherent_user_range
183 .long v4wt_flush_kern_dcache_page 184 .long v4wt_flush_kern_dcache_area
184 .long v4wt_dma_inv_range 185 .long v4wt_dma_inv_range
185 .long v4wt_dma_clean_range 186 .long v4wt_dma_clean_range
186 .long v4wt_dma_flush_range 187 .long v4wt_dma_flush_range
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 295e25dd6381..4ba0a24ce6f5 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
159ENDPROC(v6_coherent_kern_range) 159ENDPROC(v6_coherent_kern_range)
160 160
161/* 161/*
162 * v6_flush_kern_dcache_page(kaddr) 162 * v6_flush_kern_dcache_area(void *addr, size_t size)
163 * 163 *
164 * Ensure that the data held in the page kaddr is written back 164 * Ensure that the data held in the page kaddr is written back
165 * to the page in question. 165 * to the page in question.
166 * 166 *
167 * - kaddr - kernel address (guaranteed to be page aligned) 167 * - addr - kernel address
168 * - size - region size
168 */ 169 */
169ENTRY(v6_flush_kern_dcache_page) 170ENTRY(v6_flush_kern_dcache_area)
170 add r1, r0, #PAGE_SZ 171 add r1, r0, r1
1711: 1721:
172#ifdef HARVARD_CACHE 173#ifdef HARVARD_CACHE
173 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 174 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
271 .long v6_flush_user_cache_range 272 .long v6_flush_user_cache_range
272 .long v6_coherent_kern_range 273 .long v6_coherent_kern_range
273 .long v6_coherent_user_range 274 .long v6_coherent_user_range
274 .long v6_flush_kern_dcache_page 275 .long v6_flush_kern_dcache_area
275 .long v6_dma_inv_range 276 .long v6_dma_inv_range
276 .long v6_dma_clean_range 277 .long v6_dma_clean_range
277 .long v6_dma_flush_range 278 .long v6_dma_flush_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index e1bd9759617f..9073db849fb4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range)
186ENDPROC(v7_coherent_user_range) 186ENDPROC(v7_coherent_user_range)
187 187
188/* 188/*
189 * v7_flush_kern_dcache_page(kaddr) 189 * v7_flush_kern_dcache_area(void *addr, size_t size)
190 * 190 *
191 * Ensure that the data held in the page kaddr is written back 191 * Ensure that the data held in the page kaddr is written back
192 * to the page in question. 192 * to the page in question.
193 * 193 *
194 * - kaddr - kernel address (guaranteed to be page aligned) 194 * - addr - kernel address
195 * - size - region size
195 */ 196 */
196ENTRY(v7_flush_kern_dcache_page) 197ENTRY(v7_flush_kern_dcache_area)
197 dcache_line_size r2, r3 198 dcache_line_size r2, r3
198 add r1, r0, #PAGE_SZ 199 add r1, r0, r1
1991: 2001:
200 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 201 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
201 add r0, r0, r2 202 add r0, r0, r2
@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page)
203 blo 1b 204 blo 1b
204 dsb 205 dsb
205 mov pc, lr 206 mov pc, lr
206ENDPROC(v7_flush_kern_dcache_page) 207ENDPROC(v7_flush_kern_dcache_area)
207 208
208/* 209/*
209 * v7_dma_inv_range(start,end) 210 * v7_dma_inv_range(start,end)
@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns)
279 .long v7_flush_user_cache_range 280 .long v7_flush_user_cache_range
280 .long v7_coherent_kern_range 281 .long v7_coherent_kern_range
281 .long v7_coherent_user_range 282 .long v7_coherent_user_range
282 .long v7_flush_kern_dcache_page 283 .long v7_flush_kern_dcache_area
283 .long v7_dma_inv_range 284 .long v7_dma_inv_range
284 .long v7_dma_clean_range 285 .long v7_dma_clean_range
285 .long v7_dma_flush_range 286 .long v7_dma_flush_range
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 329594e760cd..6f3a4b7a3b82 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
131 */ 131 */
132 if (addr) 132 if (addr)
133#endif 133#endif
134 __cpuc_flush_dcache_page(addr); 134 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
135 135
136 /* 136 /*
137 * If this is a page cache page, and we have an aliasing VIPT cache, 137 * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
258 * in this mapping of the page. FIXME: this is overkill 258 * in this mapping of the page. FIXME: this is overkill
259 * since we actually ask for a write-back and invalidate. 259 * since we actually ask for a write-back and invalidate.
260 */ 260 */
261 __cpuc_flush_dcache_page(page_address(page)); 261 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
262} 262}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 30f82fb5918c..2be1ec7c1b41 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_page((void *)vaddr); 82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 83#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 900811cc9130..374a8311bc84 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode)
61 61
62void flush_dcache_page(struct page *page) 62void flush_dcache_page(struct page *page)
63{ 63{
64 __cpuc_flush_dcache_page(page_address(page)); 64 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
65} 65}
66EXPORT_SYMBOL(flush_dcache_page); 66EXPORT_SYMBOL(flush_dcache_page);
67 67
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index d9fb4b98c49f..8012e24282b2 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range)
231 mov pc, lr 231 mov pc, lr
232 232
233/* 233/*
234 * flush_kern_dcache_page(void *page) 234 * flush_kern_dcache_area(void *addr, size_t size)
235 * 235 *
236 * Ensure no D cache aliasing occurs, either with itself or 236 * Ensure no D cache aliasing occurs, either with itself or
237 * the I cache 237 * the I cache
238 * 238 *
239 * - page - page aligned address 239 * - addr - kernel address
240 * - size - region size
240 */ 241 */
241ENTRY(arm1020_flush_kern_dcache_page) 242ENTRY(arm1020_flush_kern_dcache_area)
242 mov ip, #0 243 mov ip, #0
243#ifndef CONFIG_CPU_DCACHE_DISABLE 244#ifndef CONFIG_CPU_DCACHE_DISABLE
244 add r1, r0, #PAGE_SZ 245 add r1, r0, r1
2451: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2461: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
246 mcr p15, 0, ip, c7, c10, 4 @ drain WB 247 mcr p15, 0, ip, c7, c10, 4 @ drain WB
247 add r0, r0, #CACHE_DLINESIZE 248 add r0, r0, #CACHE_DLINESIZE
@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns)
335 .long arm1020_flush_user_cache_range 336 .long arm1020_flush_user_cache_range
336 .long arm1020_coherent_kern_range 337 .long arm1020_coherent_kern_range
337 .long arm1020_coherent_user_range 338 .long arm1020_coherent_user_range
338 .long arm1020_flush_kern_dcache_page 339 .long arm1020_flush_kern_dcache_area
339 .long arm1020_dma_inv_range 340 .long arm1020_dma_inv_range
340 .long arm1020_dma_clean_range 341 .long arm1020_dma_clean_range
341 .long arm1020_dma_flush_range 342 .long arm1020_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 7453b75dcea5..41fe25d234f5 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range)
225 mov pc, lr 225 mov pc, lr
226 226
227/* 227/*
228 * flush_kern_dcache_page(void *page) 228 * flush_kern_dcache_area(void *addr, size_t size)
229 * 229 *
230 * Ensure no D cache aliasing occurs, either with itself or 230 * Ensure no D cache aliasing occurs, either with itself or
231 * the I cache 231 * the I cache
232 * 232 *
233 * - page - page aligned address 233 * - addr - kernel address
234 * - size - region size
234 */ 235 */
235ENTRY(arm1020e_flush_kern_dcache_page) 236ENTRY(arm1020e_flush_kern_dcache_area)
236 mov ip, #0 237 mov ip, #0
237#ifndef CONFIG_CPU_DCACHE_DISABLE 238#ifndef CONFIG_CPU_DCACHE_DISABLE
238 add r1, r0, #PAGE_SZ 239 add r1, r0, r1
2391: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2401: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
240 add r0, r0, #CACHE_DLINESIZE 241 add r0, r0, #CACHE_DLINESIZE
241 cmp r0, r1 242 cmp r0, r1
@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns)
321 .long arm1020e_flush_user_cache_range 322 .long arm1020e_flush_user_cache_range
322 .long arm1020e_coherent_kern_range 323 .long arm1020e_coherent_kern_range
323 .long arm1020e_coherent_user_range 324 .long arm1020e_coherent_user_range
324 .long arm1020e_flush_kern_dcache_page 325 .long arm1020e_flush_kern_dcache_area
325 .long arm1020e_dma_inv_range 326 .long arm1020e_dma_inv_range
326 .long arm1020e_dma_clean_range 327 .long arm1020e_dma_clean_range
327 .long arm1020e_dma_flush_range 328 .long arm1020e_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8eb72d75a8b6..20a5b1b31a70 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range)
214 mov pc, lr 214 mov pc, lr
215 215
216/* 216/*
217 * flush_kern_dcache_page(void *page) 217 * flush_kern_dcache_area(void *addr, size_t size)
218 * 218 *
219 * Ensure no D cache aliasing occurs, either with itself or 219 * Ensure no D cache aliasing occurs, either with itself or
220 * the I cache 220 * the I cache
221 * 221 *
222 * - page - page aligned address 222 * - addr - kernel address
223 * - size - region size
223 */ 224 */
224ENTRY(arm1022_flush_kern_dcache_page) 225ENTRY(arm1022_flush_kern_dcache_area)
225 mov ip, #0 226 mov ip, #0
226#ifndef CONFIG_CPU_DCACHE_DISABLE 227#ifndef CONFIG_CPU_DCACHE_DISABLE
227 add r1, r0, #PAGE_SZ 228 add r1, r0, r1
2281: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2291: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
229 add r0, r0, #CACHE_DLINESIZE 230 add r0, r0, #CACHE_DLINESIZE
230 cmp r0, r1 231 cmp r0, r1
@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns)
310 .long arm1022_flush_user_cache_range 311 .long arm1022_flush_user_cache_range
311 .long arm1022_coherent_kern_range 312 .long arm1022_coherent_kern_range
312 .long arm1022_coherent_user_range 313 .long arm1022_coherent_user_range
313 .long arm1022_flush_kern_dcache_page 314 .long arm1022_flush_kern_dcache_area
314 .long arm1022_dma_inv_range 315 .long arm1022_dma_inv_range
315 .long arm1022_dma_clean_range 316 .long arm1022_dma_clean_range
316 .long arm1022_dma_flush_range 317 .long arm1022_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3b59f0d67139..96aedb10fcc4 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range)
208 mov pc, lr 208 mov pc, lr
209 209
210/* 210/*
211 * flush_kern_dcache_page(void *page) 211 * flush_kern_dcache_area(void *addr, size_t size)
212 * 212 *
213 * Ensure no D cache aliasing occurs, either with itself or 213 * Ensure no D cache aliasing occurs, either with itself or
214 * the I cache 214 * the I cache
215 * 215 *
216 * - page - page aligned address 216 * - addr - kernel address
217 * - size - region size
217 */ 218 */
218ENTRY(arm1026_flush_kern_dcache_page) 219ENTRY(arm1026_flush_kern_dcache_area)
219 mov ip, #0 220 mov ip, #0
220#ifndef CONFIG_CPU_DCACHE_DISABLE 221#ifndef CONFIG_CPU_DCACHE_DISABLE
221 add r1, r0, #PAGE_SZ 222 add r1, r0, r1
2221: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2231: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
223 add r0, r0, #CACHE_DLINESIZE 224 add r0, r0, #CACHE_DLINESIZE
224 cmp r0, r1 225 cmp r0, r1
@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns)
304 .long arm1026_flush_user_cache_range 305 .long arm1026_flush_user_cache_range
305 .long arm1026_coherent_kern_range 306 .long arm1026_coherent_kern_range
306 .long arm1026_coherent_user_range 307 .long arm1026_coherent_user_range
307 .long arm1026_flush_kern_dcache_page 308 .long arm1026_flush_kern_dcache_area
308 .long arm1026_dma_inv_range 309 .long arm1026_dma_inv_range
309 .long arm1026_dma_clean_range 310 .long arm1026_dma_clean_range
310 .long arm1026_dma_flush_range 311 .long arm1026_dma_flush_range
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2b7c197cc58d..471669e2d7cb 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range)
207 mov pc, lr 207 mov pc, lr
208 208
209/* 209/*
210 * flush_kern_dcache_page(void *page) 210 * flush_kern_dcache_area(void *addr, size_t size)
211 * 211 *
212 * Ensure no D cache aliasing occurs, either with itself or 212 * Ensure no D cache aliasing occurs, either with itself or
213 * the I cache 213 * the I cache
214 * 214 *
215 * - addr - page aligned address 215 * - addr - kernel address
216 * - size - region size
216 */ 217 */
217ENTRY(arm920_flush_kern_dcache_page) 218ENTRY(arm920_flush_kern_dcache_area)
218 add r1, r0, #PAGE_SZ 219 add r1, r0, r1
2191: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2201: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
220 add r0, r0, #CACHE_DLINESIZE 221 add r0, r0, #CACHE_DLINESIZE
221 cmp r0, r1 222 cmp r0, r1
@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns)
293 .long arm920_flush_user_cache_range 294 .long arm920_flush_user_cache_range
294 .long arm920_coherent_kern_range 295 .long arm920_coherent_kern_range
295 .long arm920_coherent_user_range 296 .long arm920_coherent_user_range
296 .long arm920_flush_kern_dcache_page 297 .long arm920_flush_kern_dcache_area
297 .long arm920_dma_inv_range 298 .long arm920_dma_inv_range
298 .long arm920_dma_clean_range 299 .long arm920_dma_clean_range
299 .long arm920_dma_flush_range 300 .long arm920_dma_flush_range
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 06a1aa4e3398..ee111b00fa41 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range)
209 mov pc, lr 209 mov pc, lr
210 210
211/* 211/*
212 * flush_kern_dcache_page(void *page) 212 * flush_kern_dcache_area(void *addr, size_t size)
213 * 213 *
214 * Ensure no D cache aliasing occurs, either with itself or 214 * Ensure no D cache aliasing occurs, either with itself or
215 * the I cache 215 * the I cache
216 * 216 *
217 * - addr - page aligned address 217 * - addr - kernel address
218 * - size - region size
218 */ 219 */
219ENTRY(arm922_flush_kern_dcache_page) 220ENTRY(arm922_flush_kern_dcache_area)
220 add r1, r0, #PAGE_SZ 221 add r1, r0, r1
2211: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2221: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
222 add r0, r0, #CACHE_DLINESIZE 223 add r0, r0, #CACHE_DLINESIZE
223 cmp r0, r1 224 cmp r0, r1
@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns)
295 .long arm922_flush_user_cache_range 296 .long arm922_flush_user_cache_range
296 .long arm922_coherent_kern_range 297 .long arm922_coherent_kern_range
297 .long arm922_coherent_user_range 298 .long arm922_coherent_user_range
298 .long arm922_flush_kern_dcache_page 299 .long arm922_flush_kern_dcache_area
299 .long arm922_dma_inv_range 300 .long arm922_dma_inv_range
300 .long arm922_dma_clean_range 301 .long arm922_dma_clean_range
301 .long arm922_dma_flush_range 302 .long arm922_dma_flush_range
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index cb53435a85ae..8deb5bde58e4 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range)
251 mov pc, lr 251 mov pc, lr
252 252
253/* 253/*
254 * flush_kern_dcache_page(void *page) 254 * flush_kern_dcache_area(void *addr, size_t size)
255 * 255 *
256 * Ensure no D cache aliasing occurs, either with itself or 256 * Ensure no D cache aliasing occurs, either with itself or
257 * the I cache 257 * the I cache
258 * 258 *
259 * - addr - page aligned address 259 * - addr - kernel address
260 * - size - region size
260 */ 261 */
261ENTRY(arm925_flush_kern_dcache_page) 262ENTRY(arm925_flush_kern_dcache_area)
262 add r1, r0, #PAGE_SZ 263 add r1, r0, r1
2631: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2641: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
264 add r0, r0, #CACHE_DLINESIZE 265 add r0, r0, #CACHE_DLINESIZE
265 cmp r0, r1 266 cmp r0, r1
@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns)
346 .long arm925_flush_user_cache_range 347 .long arm925_flush_user_cache_range
347 .long arm925_coherent_kern_range 348 .long arm925_coherent_kern_range
348 .long arm925_coherent_user_range 349 .long arm925_coherent_user_range
349 .long arm925_flush_kern_dcache_page 350 .long arm925_flush_kern_dcache_area
350 .long arm925_dma_inv_range 351 .long arm925_dma_inv_range
351 .long arm925_dma_clean_range 352 .long arm925_dma_clean_range
352 .long arm925_dma_flush_range 353 .long arm925_dma_flush_range
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 1c4848704bb3..64db6e275a44 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range)
214 mov pc, lr 214 mov pc, lr
215 215
216/* 216/*
217 * flush_kern_dcache_page(void *page) 217 * flush_kern_dcache_area(void *addr, size_t size)
218 * 218 *
219 * Ensure no D cache aliasing occurs, either with itself or 219 * Ensure no D cache aliasing occurs, either with itself or
220 * the I cache 220 * the I cache
221 * 221 *
222 * - addr - page aligned address 222 * - addr - kernel address
223 * - size - region size
223 */ 224 */
224ENTRY(arm926_flush_kern_dcache_page) 225ENTRY(arm926_flush_kern_dcache_area)
225 add r1, r0, #PAGE_SZ 226 add r1, r0, r1
2261: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2271: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
227 add r0, r0, #CACHE_DLINESIZE 228 add r0, r0, #CACHE_DLINESIZE
228 cmp r0, r1 229 cmp r0, r1
@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns)
309 .long arm926_flush_user_cache_range 310 .long arm926_flush_user_cache_range
310 .long arm926_coherent_kern_range 311 .long arm926_coherent_kern_range
311 .long arm926_coherent_user_range 312 .long arm926_coherent_user_range
312 .long arm926_flush_kern_dcache_page 313 .long arm926_flush_kern_dcache_area
313 .long arm926_dma_inv_range 314 .long arm926_dma_inv_range
314 .long arm926_dma_clean_range 315 .long arm926_dma_clean_range
315 .long arm926_dma_flush_range 316 .long arm926_dma_flush_range
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 5b0f8464c8f2..8196b9f401fb 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range)
141 /* FALLTHROUGH */ 141 /* FALLTHROUGH */
142 142
143/* 143/*
144 * flush_kern_dcache_page(void *page) 144 * flush_kern_dcache_area(void *addr, size_t size)
145 * 145 *
146 * Ensure no D cache aliasing occurs, either with itself or 146 * Ensure no D cache aliasing occurs, either with itself or
147 * the I cache 147 * the I cache
148 * 148 *
149 * - addr - page aligned address 149 * - addr - kernel address
150 * - size - region size
150 */ 151 */
151ENTRY(arm940_flush_kern_dcache_page) 152ENTRY(arm940_flush_kern_dcache_area)
152 mov ip, #0 153 mov ip, #0
153 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 154 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1541: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1551: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns)
238 .long arm940_flush_user_cache_range 239 .long arm940_flush_user_cache_range
239 .long arm940_coherent_kern_range 240 .long arm940_coherent_kern_range
240 .long arm940_coherent_user_range 241 .long arm940_coherent_user_range
241 .long arm940_flush_kern_dcache_page 242 .long arm940_flush_kern_dcache_area
242 .long arm940_dma_inv_range 243 .long arm940_dma_inv_range
243 .long arm940_dma_clean_range 244 .long arm940_dma_clean_range
244 .long arm940_dma_flush_range 245 .long arm940_dma_flush_range
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 40c0449a139b..9a951239c86c 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range)
183 mov pc, lr 183 mov pc, lr
184 184
185/* 185/*
186 * flush_kern_dcache_page(void *page) 186 * flush_kern_dcache_area(void *addr, size_t size)
187 * 187 *
188 * Ensure no D cache aliasing occurs, either with itself or 188 * Ensure no D cache aliasing occurs, either with itself or
189 * the I cache 189 * the I cache
190 * 190 *
191 * - addr - page aligned address 191 * - addr - kernel address
192 * - size - region size
192 * (same as arm926) 193 * (same as arm926)
193 */ 194 */
194ENTRY(arm946_flush_kern_dcache_page) 195ENTRY(arm946_flush_kern_dcache_area)
195 add r1, r0, #PAGE_SZ 196 add r1, r0, r1
1961: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1971: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
197 add r0, r0, #CACHE_DLINESIZE 198 add r0, r0, #CACHE_DLINESIZE
198 cmp r0, r1 199 cmp r0, r1
@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns)
280 .long arm946_flush_user_cache_range 281 .long arm946_flush_user_cache_range
281 .long arm946_coherent_kern_range 282 .long arm946_coherent_kern_range
282 .long arm946_coherent_user_range 283 .long arm946_coherent_user_range
283 .long arm946_flush_kern_dcache_page 284 .long arm946_flush_kern_dcache_area
284 .long arm946_dma_inv_range 285 .long arm946_dma_inv_range
285 .long arm946_dma_clean_range 286 .long arm946_dma_clean_range
286 .long arm946_dma_flush_range 287 .long arm946_dma_flush_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d0d7795200fc..dbc39383e66a 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range)
226 mov pc, lr 226 mov pc, lr
227 227
228/* 228/*
229 * flush_kern_dcache_page(void *page) 229 * flush_kern_dcache_area(void *addr, size_t size)
230 * 230 *
231 * Ensure no D cache aliasing occurs, either with itself or 231 * Ensure no D cache aliasing occurs, either with itself or
232 * the I cache 232 * the I cache
233 * 233 *
234 * - addr - page aligned address 234 * - addr - kernel address
235 * - size - region size
235 */ 236 */
236 .align 5 237 .align 5
237ENTRY(feroceon_flush_kern_dcache_page) 238ENTRY(feroceon_flush_kern_dcache_area)
238 add r1, r0, #PAGE_SZ 239 add r1, r0, r1
2391: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 2401: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
240 add r0, r0, #CACHE_DLINESIZE 241 add r0, r0, #CACHE_DLINESIZE
241 cmp r0, r1 242 cmp r0, r1
@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page)
246 mov pc, lr 247 mov pc, lr
247 248
248 .align 5 249 .align 5
249ENTRY(feroceon_range_flush_kern_dcache_page) 250ENTRY(feroceon_range_flush_kern_dcache_area)
250 mrs r2, cpsr 251 mrs r2, cpsr
251 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive 252 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
252 orr r3, r2, #PSR_I_BIT 253 orr r3, r2, #PSR_I_BIT
@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns)
372 .long feroceon_flush_user_cache_range 373 .long feroceon_flush_user_cache_range
373 .long feroceon_coherent_kern_range 374 .long feroceon_coherent_kern_range
374 .long feroceon_coherent_user_range 375 .long feroceon_coherent_user_range
375 .long feroceon_flush_kern_dcache_page 376 .long feroceon_flush_kern_dcache_area
376 .long feroceon_dma_inv_range 377 .long feroceon_dma_inv_range
377 .long feroceon_dma_clean_range 378 .long feroceon_dma_clean_range
378 .long feroceon_dma_flush_range 379 .long feroceon_dma_flush_range
@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns)
383 .long feroceon_flush_user_cache_range 384 .long feroceon_flush_user_cache_range
384 .long feroceon_coherent_kern_range 385 .long feroceon_coherent_kern_range
385 .long feroceon_coherent_user_range 386 .long feroceon_coherent_user_range
386 .long feroceon_range_flush_kern_dcache_page 387 .long feroceon_range_flush_kern_dcache_area
387 .long feroceon_range_dma_inv_range 388 .long feroceon_range_dma_inv_range
388 .long feroceon_range_dma_clean_range 389 .long feroceon_range_dma_clean_range
389 .long feroceon_range_dma_flush_range 390 .long feroceon_range_dma_flush_range
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 52b5fd74fbb3..9674d36cc97d 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range)
186 mov pc, lr 186 mov pc, lr
187 187
188/* 188/*
189 * flush_kern_dcache_page(void *page) 189 * flush_kern_dcache_area(void *addr, size_t size)
190 * 190 *
191 * Ensure no D cache aliasing occurs, either with itself or 191 * Ensure no D cache aliasing occurs, either with itself or
192 * the I cache 192 * the I cache
193 * 193 *
194 * - addr - page aligned address 194 * - addr - kernel address
195 * - size - region size
195 */ 196 */
196ENTRY(mohawk_flush_kern_dcache_page) 197ENTRY(mohawk_flush_kern_dcache_area)
197 add r1, r0, #PAGE_SZ 198 add r1, r0, r1
1981: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1991: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
199 add r0, r0, #CACHE_DLINESIZE 200 add r0, r0, #CACHE_DLINESIZE
200 cmp r0, r1 201 cmp r0, r1
@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns)
273 .long mohawk_flush_user_cache_range 274 .long mohawk_flush_user_cache_range
274 .long mohawk_coherent_kern_range 275 .long mohawk_coherent_kern_range
275 .long mohawk_coherent_user_range 276 .long mohawk_coherent_user_range
276 .long mohawk_flush_kern_dcache_page 277 .long mohawk_flush_kern_dcache_area
277 .long mohawk_dma_inv_range 278 .long mohawk_dma_inv_range
278 .long mohawk_dma_clean_range 279 .long mohawk_dma_clean_range
279 .long mohawk_dma_flush_range 280 .long mohawk_dma_flush_range
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index ac5c80062b70..3e6210b4d6d4 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
27EXPORT_SYMBOL(__cpuc_flush_user_all); 27EXPORT_SYMBOL(__cpuc_flush_user_all);
28EXPORT_SYMBOL(__cpuc_flush_user_range); 28EXPORT_SYMBOL(__cpuc_flush_user_range);
29EXPORT_SYMBOL(__cpuc_coherent_kern_range); 29EXPORT_SYMBOL(__cpuc_coherent_kern_range);
30EXPORT_SYMBOL(__cpuc_flush_dcache_page); 30EXPORT_SYMBOL(__cpuc_flush_dcache_area);
31EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
32#else 31#else
33EXPORT_SYMBOL(cpu_cache); 32EXPORT_SYMBOL(cpu_cache);
34#endif 33#endif
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index fab134e29826..96456f548798 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range)
226 mov pc, lr 226 mov pc, lr
227 227
228/* 228/*
229 * flush_kern_dcache_page(void *page) 229 * flush_kern_dcache_area(void *addr, size_t size)
230 * 230 *
231 * Ensure no D cache aliasing occurs, either with itself or 231 * Ensure no D cache aliasing occurs, either with itself or
232 * the I cache. 232 * the I cache.
233 * 233 *
234 * - addr - page aligned address 234 * - addr - kernel address
235 * - size - region size
235 */ 236 */
236ENTRY(xsc3_flush_kern_dcache_page) 237ENTRY(xsc3_flush_kern_dcache_area)
237 add r1, r0, #PAGE_SZ 238 add r1, r0, r1
2381: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 2391: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
239 add r0, r0, #CACHELINESIZE 240 add r0, r0, #CACHELINESIZE
240 cmp r0, r1 241 cmp r0, r1
@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns)
309 .long xsc3_flush_user_cache_range 310 .long xsc3_flush_user_cache_range
310 .long xsc3_coherent_kern_range 311 .long xsc3_coherent_kern_range
311 .long xsc3_coherent_user_range 312 .long xsc3_coherent_user_range
312 .long xsc3_flush_kern_dcache_page 313 .long xsc3_flush_kern_dcache_area
313 .long xsc3_dma_inv_range 314 .long xsc3_dma_inv_range
314 .long xsc3_dma_clean_range 315 .long xsc3_dma_clean_range
315 .long xsc3_dma_flush_range 316 .long xsc3_dma_flush_range
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index f056c283682d..93df47265f2d 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range)
284 mov pc, lr 284 mov pc, lr
285 285
286/* 286/*
287 * flush_kern_dcache_page(void *page) 287 * flush_kern_dcache_area(void *addr, size_t size)
288 * 288 *
289 * Ensure no D cache aliasing occurs, either with itself or 289 * Ensure no D cache aliasing occurs, either with itself or
290 * the I cache 290 * the I cache
291 * 291 *
292 * - addr - page aligned address 292 * - addr - kernel address
293 * - size - region size
293 */ 294 */
294ENTRY(xscale_flush_kern_dcache_page) 295ENTRY(xscale_flush_kern_dcache_area)
295 add r1, r0, #PAGE_SZ 296 add r1, r0, r1
2961: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 2971: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
297 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 298 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
298 add r0, r0, #CACHELINESIZE 299 add r0, r0, #CACHELINESIZE
@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns)
368 .long xscale_flush_user_cache_range 369 .long xscale_flush_user_cache_range
369 .long xscale_coherent_kern_range 370 .long xscale_coherent_kern_range
370 .long xscale_coherent_user_range 371 .long xscale_coherent_user_range
371 .long xscale_flush_kern_dcache_page 372 .long xscale_flush_kern_dcache_area
372 .long xscale_dma_inv_range 373 .long xscale_dma_inv_range
373 .long xscale_dma_clean_range 374 .long xscale_dma_clean_range
374 .long xscale_dma_flush_range 375 .long xscale_dma_flush_range
@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
392 .long xscale_flush_user_cache_range 393 .long xscale_flush_user_cache_range
393 .long xscale_coherent_kern_range 394 .long xscale_coherent_kern_range
394 .long xscale_coherent_user_range 395 .long xscale_coherent_user_range
395 .long xscale_flush_kern_dcache_page 396 .long xscale_flush_kern_dcache_area
396 .long xscale_dma_flush_range 397 .long xscale_dma_flush_range
397 .long xscale_dma_clean_range 398 .long xscale_dma_clean_range
398 .long xscale_dma_flush_range 399 .long xscale_dma_flush_range