aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/common/dmabounce.c21
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/mm/Kconfig7
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/cache-l2x0.c104
-rw-r--r--arch/arm/mm/consistent.c17
-rw-r--r--arch/arm/mm/context.c12
-rw-r--r--arch/arm/mm/proc-v6.S14
-rw-r--r--arch/arm/mm/tlb-v6.S4
-rw-r--r--include/asm-arm/cacheflush.h49
-rw-r--r--include/asm-arm/dma-mapping.h2
-rw-r--r--include/asm-arm/domain.h1
-rw-r--r--include/asm-arm/hardware/cache-l2x0.h56
-rw-r--r--include/asm-arm/system.h62
-rw-r--r--include/asm-arm/tlbflush.h50
15 files changed, 338 insertions, 66 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 2e635b814c14..2362c498f52e 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -281,10 +281,14 @@ map_single(struct device *dev, void *ptr, size_t size,
281 ptr = buf->safe; 281 ptr = buf->safe;
282 282
283 dma_addr = buf->safe_dma_addr; 283 dma_addr = buf->safe_dma_addr;
284 } else {
285 /*
286 * We don't need to sync the DMA buffer since
287 * it was allocated via the coherent allocators.
288 */
289 consistent_sync(ptr, size, dir);
284 } 290 }
285 291
286 consistent_sync(ptr, size, dir);
287
288 return dma_addr; 292 return dma_addr;
289} 293}
290 294
@@ -317,12 +321,12 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
317 DO_STATS ( device_info->bounce_count++ ); 321 DO_STATS ( device_info->bounce_count++ );
318 322
319 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 323 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
320 unsigned long ptr; 324 void *ptr = buf->ptr;
321 325
322 dev_dbg(dev, 326 dev_dbg(dev,
323 "%s: copy back safe %p to unsafe %p size %d\n", 327 "%s: copy back safe %p to unsafe %p size %d\n",
324 __func__, buf->safe, buf->ptr, size); 328 __func__, buf->safe, ptr, size);
325 memcpy(buf->ptr, buf->safe, size); 329 memcpy(ptr, buf->safe, size);
326 330
327 /* 331 /*
328 * DMA buffers must have the same cache properties 332 * DMA buffers must have the same cache properties
@@ -332,8 +336,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
332 * bidirectional case because we know the cache 336 * bidirectional case because we know the cache
333 * lines will be coherent with the data written. 337 * lines will be coherent with the data written.
334 */ 338 */
335 ptr = (unsigned long)buf->ptr;
336 dmac_clean_range(ptr, ptr + size); 339 dmac_clean_range(ptr, ptr + size);
340 outer_clean_range(__pa(ptr), __pa(ptr) + size);
337 } 341 }
338 free_safe_buffer(device_info, buf); 342 free_safe_buffer(device_info, buf);
339 } 343 }
@@ -397,7 +401,10 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
397 default: 401 default:
398 BUG(); 402 BUG();
399 } 403 }
400 consistent_sync(buf->safe, size, dir); 404 /*
405 * No need to sync the safe buffer - it was allocated
406 * via the coherent allocators.
407 */
401 } else { 408 } else {
402 consistent_sync(dma_to_virt(dev, dma_addr), size, dir); 409 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
403 } 410 }
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bbab134cd82d..243aea458057 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -88,6 +88,9 @@ struct cpu_user_fns cpu_user;
88#ifdef MULTI_CACHE 88#ifdef MULTI_CACHE
89struct cpu_cache_fns cpu_cache; 89struct cpu_cache_fns cpu_cache;
90#endif 90#endif
91#ifdef CONFIG_OUTER_CACHE
92struct outer_cache_fns outer_cache;
93#endif
91 94
92struct stack { 95struct stack {
93 u32 irq[3]; 96 u32 irq[3];
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index aade2f72c920..af3fa9d622ff 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -609,3 +609,10 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
609 Forget about fast user space cmpxchg support. 609 Forget about fast user space cmpxchg support.
610 It is just not possible. 610 It is just not possible.
611 611
612config OUTER_CACHE
613 bool
614 default n
615
616config CACHE_L2X0
617 bool
618 select OUTER_CACHE
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index d2f5672ecf62..2f8b95947774 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -66,3 +66,5 @@ obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o 67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
68obj-$(CONFIG_CPU_V6) += proc-v6.o 68obj-$(CONFIG_CPU_V6) += proc-v6.o
69
70obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
new file mode 100644
index 000000000000..08a36f1b35d2
--- /dev/null
+++ b/arch/arm/mm/cache-l2x0.c
@@ -0,0 +1,104 @@
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20
21#include <asm/cacheflush.h>
22#include <asm/io.h>
23#include <asm/hardware/cache-l2x0.h>
24
25#define CACHE_LINE_SIZE 32
26
27static void __iomem *l2x0_base;
28
29static inline void sync_writel(unsigned long val, unsigned long reg,
30 unsigned long complete_mask)
31{
32 writel(val, l2x0_base + reg);
33 /* wait for the operation to complete */
34 while (readl(l2x0_base + reg) & complete_mask)
35 ;
36}
37
38static inline void cache_sync(void)
39{
40 sync_writel(0, L2X0_CACHE_SYNC, 1);
41}
42
43static inline void l2x0_inv_all(void)
44{
45 /* invalidate all ways */
46 sync_writel(0xff, L2X0_INV_WAY, 0xff);
47 cache_sync();
48}
49
50static void l2x0_inv_range(unsigned long start, unsigned long end)
51{
52 unsigned long addr;
53
54 start &= ~(CACHE_LINE_SIZE - 1);
55 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
56 sync_writel(addr, L2X0_INV_LINE_PA, 1);
57 cache_sync();
58}
59
60static void l2x0_clean_range(unsigned long start, unsigned long end)
61{
62 unsigned long addr;
63
64 start &= ~(CACHE_LINE_SIZE - 1);
65 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
66 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
67 cache_sync();
68}
69
70static void l2x0_flush_range(unsigned long start, unsigned long end)
71{
72 unsigned long addr;
73
74 start &= ~(CACHE_LINE_SIZE - 1);
75 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
76 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
77 cache_sync();
78}
79
80void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
81{
82 __u32 aux;
83
84 l2x0_base = base;
85
86 /* disable L2X0 */
87 writel(0, l2x0_base + L2X0_CTRL);
88
89 aux = readl(l2x0_base + L2X0_AUX_CTRL);
90 aux &= aux_mask;
91 aux |= aux_val;
92 writel(aux, l2x0_base + L2X0_AUX_CTRL);
93
94 l2x0_inv_all();
95
96 /* enable L2X0 */
97 writel(1, l2x0_base + L2X0_CTRL);
98
99 outer_cache.inv_range = l2x0_inv_range;
100 outer_cache.clean_range = l2x0_clean_range;
101 outer_cache.flush_range = l2x0_flush_range;
102
103 printk(KERN_INFO "L2X0 cache controller enabled\n");
104}
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 6a9c362fef5e..1f9f94f9af4b 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -205,9 +205,10 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
205 * kernel direct-mapped region for device DMA. 205 * kernel direct-mapped region for device DMA.
206 */ 206 */
207 { 207 {
208 unsigned long kaddr = (unsigned long)page_address(page); 208 void *ptr = page_address(page);
209 memset(page_address(page), 0, size); 209 memset(ptr, 0, size);
210 dmac_flush_range(kaddr, kaddr + size); 210 dmac_flush_range(ptr, ptr + size);
211 outer_flush_range(__pa(ptr), __pa(ptr) + size);
211 } 212 }
212 213
213 /* 214 /*
@@ -480,20 +481,24 @@ core_initcall(consistent_init);
480 * platforms with CONFIG_DMABOUNCE. 481 * platforms with CONFIG_DMABOUNCE.
481 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
482 */ 483 */
483void consistent_sync(void *vaddr, size_t size, int direction) 484void consistent_sync(const void *start, size_t size, int direction)
484{ 485{
485 unsigned long start = (unsigned long)vaddr; 486 const void *end = start + size;
486 unsigned long end = start + size; 487
488 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
487 489
488 switch (direction) { 490 switch (direction) {
489 case DMA_FROM_DEVICE: /* invalidate only */ 491 case DMA_FROM_DEVICE: /* invalidate only */
490 dmac_inv_range(start, end); 492 dmac_inv_range(start, end);
493 outer_inv_range(__pa(start), __pa(end));
491 break; 494 break;
492 case DMA_TO_DEVICE: /* writeback only */ 495 case DMA_TO_DEVICE: /* writeback only */
493 dmac_clean_range(start, end); 496 dmac_clean_range(start, end);
497 outer_clean_range(__pa(start), __pa(end));
494 break; 498 break;
495 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 499 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
496 dmac_flush_range(start, end); 500 dmac_flush_range(start, end);
501 outer_flush_range(__pa(start), __pa(end));
497 break; 502 break;
498 default: 503 default:
499 BUG(); 504 BUG();
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 79e800202424..9da43a0fdcdf 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -19,7 +19,8 @@ unsigned int cpu_last_asid = { 1 << ASID_BITS };
19/* 19/*
20 * We fork()ed a process, and we need a new context for the child 20 * We fork()ed a process, and we need a new context for the child
21 * to run in. We reserve version 0 for initial tasks so we will 21 * to run in. We reserve version 0 for initial tasks so we will
22 * always allocate an ASID. 22 * always allocate an ASID. The ASID 0 is reserved for the TTBR
23 * register changing sequence.
23 */ 24 */
24void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 25void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25{ 26{
@@ -38,8 +39,15 @@ void __new_context(struct mm_struct *mm)
38 * If we've used up all our ASIDs, we need 39 * If we've used up all our ASIDs, we need
39 * to start a new version and flush the TLB. 40 * to start a new version and flush the TLB.
40 */ 41 */
41 if ((asid & ~ASID_MASK) == 0) 42 if ((asid & ~ASID_MASK) == 0) {
43 asid = ++cpu_last_asid;
44 /* set the reserved ASID before flushing the TLB */
45 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
46 :
47 : "r" (0));
48 isb();
42 flush_tlb_all(); 49 flush_tlb_all();
50 }
43 51
44 mm->context.id = asid; 52 mm->context.id = asid;
45} 53}
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index f27d9eb64803..eb42e5b94863 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -33,6 +33,12 @@
33#define TTB_RGN_WT (2 << 3) 33#define TTB_RGN_WT (2 << 3)
34#define TTB_RGN_WB (3 << 3) 34#define TTB_RGN_WB (3 << 3)
35 35
36#ifndef CONFIG_SMP
37#define TTB_FLAGS TTB_RGN_WBWA
38#else
39#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
40#endif
41
36ENTRY(cpu_v6_proc_init) 42ENTRY(cpu_v6_proc_init)
37 mov pc, lr 43 mov pc, lr
38 44
@@ -95,9 +101,7 @@ ENTRY(cpu_v6_switch_mm)
95#ifdef CONFIG_MMU 101#ifdef CONFIG_MMU
96 mov r2, #0 102 mov r2, #0
97 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 103 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
98#ifdef CONFIG_SMP 104 orr r0, r0, #TTB_FLAGS
99 orr r0, r0, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
100#endif
101 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 105 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
102 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 106 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
103 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 107 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -206,9 +210,7 @@ __v6_setup:
206#ifdef CONFIG_MMU 210#ifdef CONFIG_MMU
207 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 211 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
208 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 212 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
209#ifdef CONFIG_SMP 213 orr r4, r4, #TTB_FLAGS
210 orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
211#endif
212 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 214 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
213#endif /* CONFIG_MMU */ 215#endif /* CONFIG_MMU */
214 adr r5, v6_crval 216 adr r5, v6_crval
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index fd6adde39091..20f84bbaa9bb 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -53,6 +53,8 @@ ENTRY(v6wbi_flush_user_tlb_range)
53 add r0, r0, #PAGE_SZ 53 add r0, r0, #PAGE_SZ
54 cmp r0, r1 54 cmp r0, r1
55 blo 1b 55 blo 1b
56 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
56 mov pc, lr 58 mov pc, lr
57 59
58/* 60/*
@@ -80,7 +82,9 @@ ENTRY(v6wbi_flush_kern_tlb_range)
80 add r0, r0, #PAGE_SZ 82 add r0, r0, #PAGE_SZ
81 cmp r0, r1 83 cmp r0, r1
82 blo 1b 84 blo 1b
85 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
83 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush
84 mov pc, lr 88 mov pc, lr
85 89
86 .section ".text.init", #alloc, #execinstr 90 .section ".text.init", #alloc, #execinstr
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index 5f531ea03059..afad32c76e6c 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -185,9 +185,15 @@ struct cpu_cache_fns {
185 void (*coherent_user_range)(unsigned long, unsigned long); 185 void (*coherent_user_range)(unsigned long, unsigned long);
186 void (*flush_kern_dcache_page)(void *); 186 void (*flush_kern_dcache_page)(void *);
187 187
188 void (*dma_inv_range)(unsigned long, unsigned long); 188 void (*dma_inv_range)(const void *, const void *);
189 void (*dma_clean_range)(unsigned long, unsigned long); 189 void (*dma_clean_range)(const void *, const void *);
190 void (*dma_flush_range)(unsigned long, unsigned long); 190 void (*dma_flush_range)(const void *, const void *);
191};
192
193struct outer_cache_fns {
194 void (*inv_range)(unsigned long, unsigned long);
195 void (*clean_range)(unsigned long, unsigned long);
196 void (*flush_range)(unsigned long, unsigned long);
191}; 197};
192 198
193/* 199/*
@@ -240,9 +246,40 @@ extern void __cpuc_flush_dcache_page(void *);
240#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 246#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
241#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 247#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
242 248
243extern void dmac_inv_range(unsigned long, unsigned long); 249extern void dmac_inv_range(const void *, const void *);
244extern void dmac_clean_range(unsigned long, unsigned long); 250extern void dmac_clean_range(const void *, const void *);
245extern void dmac_flush_range(unsigned long, unsigned long); 251extern void dmac_flush_range(const void *, const void *);
252
253#endif
254
255#ifdef CONFIG_OUTER_CACHE
256
257extern struct outer_cache_fns outer_cache;
258
259static inline void outer_inv_range(unsigned long start, unsigned long end)
260{
261 if (outer_cache.inv_range)
262 outer_cache.inv_range(start, end);
263}
264static inline void outer_clean_range(unsigned long start, unsigned long end)
265{
266 if (outer_cache.clean_range)
267 outer_cache.clean_range(start, end);
268}
269static inline void outer_flush_range(unsigned long start, unsigned long end)
270{
271 if (outer_cache.flush_range)
272 outer_cache.flush_range(start, end);
273}
274
275#else
276
277static inline void outer_inv_range(unsigned long start, unsigned long end)
278{ }
279static inline void outer_clean_range(unsigned long start, unsigned long end)
280{ }
281static inline void outer_flush_range(unsigned long start, unsigned long end)
282{ }
246 283
247#endif 284#endif
248 285
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 9bc46b486afb..a1d574cdcc14 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -17,7 +17,7 @@
17 * platforms with CONFIG_DMABOUNCE. 17 * platforms with CONFIG_DMABOUNCE.
18 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 18 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
19 */ 19 */
20extern void consistent_sync(void *kaddr, size_t size, int rw); 20extern void consistent_sync(const void *kaddr, size_t size, int rw);
21 21
22/* 22/*
23 * Return whether the given device DMA address mask can be supported 23 * Return whether the given device DMA address mask can be supported
diff --git a/include/asm-arm/domain.h b/include/asm-arm/domain.h
index 4c2885abbe6c..3c12a7625304 100644
--- a/include/asm-arm/domain.h
+++ b/include/asm-arm/domain.h
@@ -57,6 +57,7 @@
57 __asm__ __volatile__( \ 57 __asm__ __volatile__( \
58 "mcr p15, 0, %0, c3, c0 @ set domain" \ 58 "mcr p15, 0, %0, c3, c0 @ set domain" \
59 : : "r" (x)); \ 59 : : "r" (x)); \
60 isb(); \
60 } while (0) 61 } while (0)
61 62
62#define modify_domain(dom,type) \ 63#define modify_domain(dom,type) \
diff --git a/include/asm-arm/hardware/cache-l2x0.h b/include/asm-arm/hardware/cache-l2x0.h
new file mode 100644
index 000000000000..54029a740396
--- /dev/null
+++ b/include/asm-arm/hardware/cache-l2x0.h
@@ -0,0 +1,56 @@
1/*
2 * include/asm-arm/hardware/cache-l2x0.h
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __ASM_ARM_HARDWARE_L2X0_H
21#define __ASM_ARM_HARDWARE_L2X0_H
22
23#define L2X0_CACHE_ID 0x000
24#define L2X0_CACHE_TYPE 0x004
25#define L2X0_CTRL 0x100
26#define L2X0_AUX_CTRL 0x104
27#define L2X0_EVENT_CNT_CTRL 0x200
28#define L2X0_EVENT_CNT1_CFG 0x204
29#define L2X0_EVENT_CNT0_CFG 0x208
30#define L2X0_EVENT_CNT1_VAL 0x20C
31#define L2X0_EVENT_CNT0_VAL 0x210
32#define L2X0_INTR_MASK 0x214
33#define L2X0_MASKED_INTR_STAT 0x218
34#define L2X0_RAW_INTR_STAT 0x21C
35#define L2X0_INTR_CLEAR 0x220
36#define L2X0_CACHE_SYNC 0x730
37#define L2X0_INV_LINE_PA 0x770
38#define L2X0_INV_WAY 0x77C
39#define L2X0_CLEAN_LINE_PA 0x7B0
40#define L2X0_CLEAN_LINE_IDX 0x7B8
41#define L2X0_CLEAN_WAY 0x7BC
42#define L2X0_CLEAN_INV_LINE_PA 0x7F0
43#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
44#define L2X0_CLEAN_INV_WAY 0x7FC
45#define L2X0_LOCKDOWN_WAY_D 0x900
46#define L2X0_LOCKDOWN_WAY_I 0x904
47#define L2X0_TEST_OPERATION 0xF00
48#define L2X0_LINE_DATA 0xF10
49#define L2X0_LINE_TAG 0xF30
50#define L2X0_DEBUG_CTRL 0xF40
51
52#ifndef __ASSEMBLY__
53extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
54#endif
55
56#endif
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index aa223fc546af..f4386906b200 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -140,6 +140,40 @@ static inline int cpu_is_xsc3(void)
140#define cpu_is_xscale() 1 140#define cpu_is_xscale() 1
141#endif 141#endif
142 142
143#define UDBG_UNDEFINED (1 << 0)
144#define UDBG_SYSCALL (1 << 1)
145#define UDBG_BADABORT (1 << 2)
146#define UDBG_SEGV (1 << 3)
147#define UDBG_BUS (1 << 4)
148
149extern unsigned int user_debug;
150
151#if __LINUX_ARM_ARCH__ >= 4
152#define vectors_high() (cr_alignment & CR_V)
153#else
154#define vectors_high() (0)
155#endif
156
157#if __LINUX_ARM_ARCH__ >= 6
158#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
159 : : "r" (0) : "memory")
160#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
161 : : "r" (0) : "memory")
162#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
163 : : "r" (0) : "memory")
164#else
165#define isb() __asm__ __volatile__ ("" : : : "memory")
166#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
167 : : "r" (0) : "memory")
168#define dmb() __asm__ __volatile__ ("" : : : "memory")
169#endif
170#define mb() dmb()
171#define rmb() mb()
172#define wmb() mb()
173#define read_barrier_depends() do { } while(0)
174#define set_mb(var, value) do { var = value; mb(); } while (0)
175#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
176
143extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 177extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
144extern unsigned long cr_alignment; /* defined in entry-armv.S */ 178extern unsigned long cr_alignment; /* defined in entry-armv.S */
145 179
@@ -154,6 +188,7 @@ static inline void set_cr(unsigned int val)
154{ 188{
155 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 189 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
156 : : "r" (val) : "cc"); 190 : : "r" (val) : "cc");
191 isb();
157} 192}
158 193
159#ifndef CONFIG_SMP 194#ifndef CONFIG_SMP
@@ -176,34 +211,9 @@ static inline void set_copro_access(unsigned int val)
176{ 211{
177 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 212 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
178 : : "r" (val) : "cc"); 213 : : "r" (val) : "cc");
214 isb();
179} 215}
180 216
181#define UDBG_UNDEFINED (1 << 0)
182#define UDBG_SYSCALL (1 << 1)
183#define UDBG_BADABORT (1 << 2)
184#define UDBG_SEGV (1 << 3)
185#define UDBG_BUS (1 << 4)
186
187extern unsigned int user_debug;
188
189#if __LINUX_ARM_ARCH__ >= 4
190#define vectors_high() (cr_alignment & CR_V)
191#else
192#define vectors_high() (0)
193#endif
194
195#if __LINUX_ARM_ARCH__ >= 6
196#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
197 : : "r" (0) : "memory")
198#else
199#define mb() __asm__ __volatile__ ("" : : : "memory")
200#endif
201#define rmb() mb()
202#define wmb() mb()
203#define read_barrier_depends() do { } while(0)
204#define set_mb(var, value) do { var = value; mb(); } while (0)
205#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
206
207/* 217/*
208 * switch_mm() may do a full cache flush over the context switch, 218 * switch_mm() may do a full cache flush over the context switch,
209 * so enable interrupts over the context switch to avoid high 219 * so enable interrupts over the context switch to avoid high
diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h
index cd10a0b5f8ae..08c6991dc9c9 100644
--- a/include/asm-arm/tlbflush.h
+++ b/include/asm-arm/tlbflush.h
@@ -247,7 +247,7 @@ static inline void local_flush_tlb_all(void)
247 const unsigned int __tlb_flag = __cpu_tlb_flags; 247 const unsigned int __tlb_flag = __cpu_tlb_flags;
248 248
249 if (tlb_flag(TLB_WB)) 249 if (tlb_flag(TLB_WB))
250 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 250 dsb();
251 251
252 if (tlb_flag(TLB_V3_FULL)) 252 if (tlb_flag(TLB_V3_FULL))
253 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 253 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
@@ -257,6 +257,15 @@ static inline void local_flush_tlb_all(void)
257 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 257 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
258 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) 258 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
259 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 259 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
260
261 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
262 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
263 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
264 /* flush the branch target cache */
265 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
266 dsb();
267 isb();
268 }
260} 269}
261 270
262static inline void local_flush_tlb_mm(struct mm_struct *mm) 271static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -266,7 +275,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
266 const unsigned int __tlb_flag = __cpu_tlb_flags; 275 const unsigned int __tlb_flag = __cpu_tlb_flags;
267 276
268 if (tlb_flag(TLB_WB)) 277 if (tlb_flag(TLB_WB))
269 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 278 dsb();
270 279
271 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { 280 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
272 if (tlb_flag(TLB_V3_FULL)) 281 if (tlb_flag(TLB_V3_FULL))
@@ -285,6 +294,14 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
285 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); 294 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
286 if (tlb_flag(TLB_V6_I_ASID)) 295 if (tlb_flag(TLB_V6_I_ASID))
287 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); 296 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
297
298 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
299 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
300 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
301 /* flush the branch target cache */
302 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
303 dsb();
304 }
288} 305}
289 306
290static inline void 307static inline void
@@ -296,7 +313,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
296 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 313 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
297 314
298 if (tlb_flag(TLB_WB)) 315 if (tlb_flag(TLB_WB))
299 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero)); 316 dsb();
300 317
301 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 318 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
302 if (tlb_flag(TLB_V3_PAGE)) 319 if (tlb_flag(TLB_V3_PAGE))
@@ -317,6 +334,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
317 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); 334 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
318 if (tlb_flag(TLB_V6_I_PAGE)) 335 if (tlb_flag(TLB_V6_I_PAGE))
319 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); 336 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
337
338 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
339 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
340 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
341 /* flush the branch target cache */
342 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
343 dsb();
344 }
320} 345}
321 346
322static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 347static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -327,7 +352,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
327 kaddr &= PAGE_MASK; 352 kaddr &= PAGE_MASK;
328 353
329 if (tlb_flag(TLB_WB)) 354 if (tlb_flag(TLB_WB))
330 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 355 dsb();
331 356
332 if (tlb_flag(TLB_V3_PAGE)) 357 if (tlb_flag(TLB_V3_PAGE))
333 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); 358 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
@@ -347,11 +372,14 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
347 if (tlb_flag(TLB_V6_I_PAGE)) 372 if (tlb_flag(TLB_V6_I_PAGE))
348 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); 373 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
349 374
350 /* The ARM ARM states that the completion of a TLB maintenance 375 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
351 * operation is only guaranteed by a DSB instruction 376 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
352 */ 377 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
353 if (tlb_flag(TLB_V6_U_PAGE | TLB_V6_D_PAGE | TLB_V6_I_PAGE)) 378 /* flush the branch target cache */
354 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 379 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
380 dsb();
381 isb();
382 }
355} 383}
356 384
357/* 385/*
@@ -369,15 +397,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
369 */ 397 */
370static inline void flush_pmd_entry(pmd_t *pmd) 398static inline void flush_pmd_entry(pmd_t *pmd)
371{ 399{
372 const unsigned int zero = 0;
373 const unsigned int __tlb_flag = __cpu_tlb_flags; 400 const unsigned int __tlb_flag = __cpu_tlb_flags;
374 401
375 if (tlb_flag(TLB_DCLEAN)) 402 if (tlb_flag(TLB_DCLEAN))
376 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 403 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
377 : : "r" (pmd) : "cc"); 404 : : "r" (pmd) : "cc");
378 if (tlb_flag(TLB_WB)) 405 if (tlb_flag(TLB_WB))
379 asm("mcr p15, 0, %0, c7, c10, 4 @ flush_pmd" 406 dsb();
380 : : "r" (zero) : "cc");
381} 407}
382 408
383static inline void clean_pmd_entry(pmd_t *pmd) 409static inline void clean_pmd_entry(pmd_t *pmd)