aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig7
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/cache-l2x0.c104
-rw-r--r--arch/arm/mm/consistent.c17
-rw-r--r--arch/arm/mm/context.c12
-rw-r--r--arch/arm/mm/proc-v6.S14
-rw-r--r--arch/arm/mm/tlb-v6.S4
7 files changed, 146 insertions, 14 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index aade2f72c920..af3fa9d622ff 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -609,3 +609,10 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
609 Forget about fast user space cmpxchg support. 609 Forget about fast user space cmpxchg support.
610 It is just not possible. 610 It is just not possible.
611 611
612config OUTER_CACHE
613 bool
614 default n
615
616config CACHE_L2X0
617 bool
618 select OUTER_CACHE
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index d2f5672ecf62..2f8b95947774 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -66,3 +66,5 @@ obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o 67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
68obj-$(CONFIG_CPU_V6) += proc-v6.o 68obj-$(CONFIG_CPU_V6) += proc-v6.o
69
70obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
new file mode 100644
index 000000000000..08a36f1b35d2
--- /dev/null
+++ b/arch/arm/mm/cache-l2x0.c
@@ -0,0 +1,104 @@
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20
21#include <asm/cacheflush.h>
22#include <asm/io.h>
23#include <asm/hardware/cache-l2x0.h>
24
25#define CACHE_LINE_SIZE 32
26
27static void __iomem *l2x0_base;
28
29static inline void sync_writel(unsigned long val, unsigned long reg,
30 unsigned long complete_mask)
31{
32 writel(val, l2x0_base + reg);
33 /* wait for the operation to complete */
34 while (readl(l2x0_base + reg) & complete_mask)
35 ;
36}
37
38static inline void cache_sync(void)
39{
40 sync_writel(0, L2X0_CACHE_SYNC, 1);
41}
42
43static inline void l2x0_inv_all(void)
44{
45 /* invalidate all ways */
46 sync_writel(0xff, L2X0_INV_WAY, 0xff);
47 cache_sync();
48}
49
50static void l2x0_inv_range(unsigned long start, unsigned long end)
51{
52 unsigned long addr;
53
54 start &= ~(CACHE_LINE_SIZE - 1);
55 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
56 sync_writel(addr, L2X0_INV_LINE_PA, 1);
57 cache_sync();
58}
59
60static void l2x0_clean_range(unsigned long start, unsigned long end)
61{
62 unsigned long addr;
63
64 start &= ~(CACHE_LINE_SIZE - 1);
65 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
66 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
67 cache_sync();
68}
69
70static void l2x0_flush_range(unsigned long start, unsigned long end)
71{
72 unsigned long addr;
73
74 start &= ~(CACHE_LINE_SIZE - 1);
75 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
76 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
77 cache_sync();
78}
79
80void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
81{
82 __u32 aux;
83
84 l2x0_base = base;
85
86 /* disable L2X0 */
87 writel(0, l2x0_base + L2X0_CTRL);
88
89 aux = readl(l2x0_base + L2X0_AUX_CTRL);
90 aux &= aux_mask;
91 aux |= aux_val;
92 writel(aux, l2x0_base + L2X0_AUX_CTRL);
93
94 l2x0_inv_all();
95
96 /* enable L2X0 */
97 writel(1, l2x0_base + L2X0_CTRL);
98
99 outer_cache.inv_range = l2x0_inv_range;
100 outer_cache.clean_range = l2x0_clean_range;
101 outer_cache.flush_range = l2x0_flush_range;
102
103 printk(KERN_INFO "L2X0 cache controller enabled\n");
104}
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 6a9c362fef5e..1f9f94f9af4b 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -205,9 +205,10 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
205 * kernel direct-mapped region for device DMA. 205 * kernel direct-mapped region for device DMA.
206 */ 206 */
207 { 207 {
208 unsigned long kaddr = (unsigned long)page_address(page); 208 void *ptr = page_address(page);
209 memset(page_address(page), 0, size); 209 memset(ptr, 0, size);
210 dmac_flush_range(kaddr, kaddr + size); 210 dmac_flush_range(ptr, ptr + size);
211 outer_flush_range(__pa(ptr), __pa(ptr) + size);
211 } 212 }
212 213
213 /* 214 /*
@@ -480,20 +481,24 @@ core_initcall(consistent_init);
480 * platforms with CONFIG_DMABOUNCE. 481 * platforms with CONFIG_DMABOUNCE.
481 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
482 */ 483 */
483void consistent_sync(void *vaddr, size_t size, int direction) 484void consistent_sync(const void *start, size_t size, int direction)
484{ 485{
485 unsigned long start = (unsigned long)vaddr; 486 const void *end = start + size;
486 unsigned long end = start + size; 487
488 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
487 489
488 switch (direction) { 490 switch (direction) {
489 case DMA_FROM_DEVICE: /* invalidate only */ 491 case DMA_FROM_DEVICE: /* invalidate only */
490 dmac_inv_range(start, end); 492 dmac_inv_range(start, end);
493 outer_inv_range(__pa(start), __pa(end));
491 break; 494 break;
492 case DMA_TO_DEVICE: /* writeback only */ 495 case DMA_TO_DEVICE: /* writeback only */
493 dmac_clean_range(start, end); 496 dmac_clean_range(start, end);
497 outer_clean_range(__pa(start), __pa(end));
494 break; 498 break;
495 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 499 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
496 dmac_flush_range(start, end); 500 dmac_flush_range(start, end);
501 outer_flush_range(__pa(start), __pa(end));
497 break; 502 break;
498 default: 503 default:
499 BUG(); 504 BUG();
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 79e800202424..9da43a0fdcdf 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -19,7 +19,8 @@ unsigned int cpu_last_asid = { 1 << ASID_BITS };
19/* 19/*
20 * We fork()ed a process, and we need a new context for the child 20 * We fork()ed a process, and we need a new context for the child
21 * to run in. We reserve version 0 for initial tasks so we will 21 * to run in. We reserve version 0 for initial tasks so we will
22 * always allocate an ASID. 22 * always allocate an ASID. The ASID 0 is reserved for the TTBR
23 * register changing sequence.
23 */ 24 */
24void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 25void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25{ 26{
@@ -38,8 +39,15 @@ void __new_context(struct mm_struct *mm)
38 * If we've used up all our ASIDs, we need 39 * If we've used up all our ASIDs, we need
39 * to start a new version and flush the TLB. 40 * to start a new version and flush the TLB.
40 */ 41 */
41 if ((asid & ~ASID_MASK) == 0) 42 if ((asid & ~ASID_MASK) == 0) {
43 asid = ++cpu_last_asid;
44 /* set the reserved ASID before flushing the TLB */
45 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
46 :
47 : "r" (0));
48 isb();
42 flush_tlb_all(); 49 flush_tlb_all();
50 }
43 51
44 mm->context.id = asid; 52 mm->context.id = asid;
45} 53}
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index f27d9eb64803..eb42e5b94863 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -33,6 +33,12 @@
33#define TTB_RGN_WT (2 << 3) 33#define TTB_RGN_WT (2 << 3)
34#define TTB_RGN_WB (3 << 3) 34#define TTB_RGN_WB (3 << 3)
35 35
36#ifndef CONFIG_SMP
37#define TTB_FLAGS TTB_RGN_WBWA
38#else
39#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
40#endif
41
36ENTRY(cpu_v6_proc_init) 42ENTRY(cpu_v6_proc_init)
37 mov pc, lr 43 mov pc, lr
38 44
@@ -95,9 +101,7 @@ ENTRY(cpu_v6_switch_mm)
95#ifdef CONFIG_MMU 101#ifdef CONFIG_MMU
96 mov r2, #0 102 mov r2, #0
97 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 103 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
98#ifdef CONFIG_SMP 104 orr r0, r0, #TTB_FLAGS
99 orr r0, r0, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
100#endif
101 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 105 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
102 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 106 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
103 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 107 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -206,9 +210,7 @@ __v6_setup:
206#ifdef CONFIG_MMU 210#ifdef CONFIG_MMU
207 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 211 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
208 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 212 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
209#ifdef CONFIG_SMP 213 orr r4, r4, #TTB_FLAGS
210 orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
211#endif
212 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 214 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
213#endif /* CONFIG_MMU */ 215#endif /* CONFIG_MMU */
214 adr r5, v6_crval 216 adr r5, v6_crval
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index fd6adde39091..20f84bbaa9bb 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -53,6 +53,8 @@ ENTRY(v6wbi_flush_user_tlb_range)
53 add r0, r0, #PAGE_SZ 53 add r0, r0, #PAGE_SZ
54 cmp r0, r1 54 cmp r0, r1
55 blo 1b 55 blo 1b
56 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
56 mov pc, lr 58 mov pc, lr
57 59
58/* 60/*
@@ -80,7 +82,9 @@ ENTRY(v6wbi_flush_kern_tlb_range)
80 add r0, r0, #PAGE_SZ 82 add r0, r0, #PAGE_SZ
81 cmp r0, r1 83 cmp r0, r1
82 blo 1b 84 blo 1b
85 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
83 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush
84 mov pc, lr 88 mov pc, lr
85 89
86 .section ".text.init", #alloc, #execinstr 90 .section ".text.init", #alloc, #execinstr