aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig11
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/cache-l2x0.c104
-rw-r--r--arch/arm/mm/consistent.c17
-rw-r--r--arch/arm/mm/context.c12
-rw-r--r--arch/arm/mm/proc-v6.S22
-rw-r--r--arch/arm/mm/proc-xsc3.S151
-rw-r--r--arch/arm/mm/tlb-v6.S4
8 files changed, 228 insertions, 95 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index aade2f72c920..da8f043dc2cc 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -171,8 +171,8 @@ config CPU_ARM925T
171# ARM926T 171# ARM926T
172config CPU_ARM926T 172config CPU_ARM926T
173 bool "Support ARM926T processor" 173 bool "Support ARM926T processor"
174 depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 174 depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263
175 default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 175 default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263
176 select CPU_32v5 176 select CPU_32v5
177 select CPU_ABRT_EV5TJ 177 select CPU_ABRT_EV5TJ
178 select CPU_CACHE_VIVT 178 select CPU_CACHE_VIVT
@@ -609,3 +609,10 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
609 Forget about fast user space cmpxchg support. 609 Forget about fast user space cmpxchg support.
610 It is just not possible. 610 It is just not possible.
611 611
612config OUTER_CACHE
613 bool
614 default n
615
616config CACHE_L2X0
617 bool
618 select OUTER_CACHE
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index d2f5672ecf62..2f8b95947774 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -66,3 +66,5 @@ obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o 66obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o 67obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
68obj-$(CONFIG_CPU_V6) += proc-v6.o 68obj-$(CONFIG_CPU_V6) += proc-v6.o
69
70obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
new file mode 100644
index 000000000000..08a36f1b35d2
--- /dev/null
+++ b/arch/arm/mm/cache-l2x0.c
@@ -0,0 +1,104 @@
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20
21#include <asm/cacheflush.h>
22#include <asm/io.h>
23#include <asm/hardware/cache-l2x0.h>
24
25#define CACHE_LINE_SIZE 32
26
27static void __iomem *l2x0_base;
28
29static inline void sync_writel(unsigned long val, unsigned long reg,
30 unsigned long complete_mask)
31{
32 writel(val, l2x0_base + reg);
33 /* wait for the operation to complete */
34 while (readl(l2x0_base + reg) & complete_mask)
35 ;
36}
37
38static inline void cache_sync(void)
39{
40 sync_writel(0, L2X0_CACHE_SYNC, 1);
41}
42
43static inline void l2x0_inv_all(void)
44{
45 /* invalidate all ways */
46 sync_writel(0xff, L2X0_INV_WAY, 0xff);
47 cache_sync();
48}
49
50static void l2x0_inv_range(unsigned long start, unsigned long end)
51{
52 unsigned long addr;
53
54 start &= ~(CACHE_LINE_SIZE - 1);
55 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
56 sync_writel(addr, L2X0_INV_LINE_PA, 1);
57 cache_sync();
58}
59
60static void l2x0_clean_range(unsigned long start, unsigned long end)
61{
62 unsigned long addr;
63
64 start &= ~(CACHE_LINE_SIZE - 1);
65 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
66 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
67 cache_sync();
68}
69
70static void l2x0_flush_range(unsigned long start, unsigned long end)
71{
72 unsigned long addr;
73
74 start &= ~(CACHE_LINE_SIZE - 1);
75 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
76 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
77 cache_sync();
78}
79
80void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
81{
82 __u32 aux;
83
84 l2x0_base = base;
85
86 /* disable L2X0 */
87 writel(0, l2x0_base + L2X0_CTRL);
88
89 aux = readl(l2x0_base + L2X0_AUX_CTRL);
90 aux &= aux_mask;
91 aux |= aux_val;
92 writel(aux, l2x0_base + L2X0_AUX_CTRL);
93
94 l2x0_inv_all();
95
96 /* enable L2X0 */
97 writel(1, l2x0_base + L2X0_CTRL);
98
99 outer_cache.inv_range = l2x0_inv_range;
100 outer_cache.clean_range = l2x0_clean_range;
101 outer_cache.flush_range = l2x0_flush_range;
102
103 printk(KERN_INFO "L2X0 cache controller enabled\n");
104}
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 6a9c362fef5e..1f9f94f9af4b 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -205,9 +205,10 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
205 * kernel direct-mapped region for device DMA. 205 * kernel direct-mapped region for device DMA.
206 */ 206 */
207 { 207 {
208 unsigned long kaddr = (unsigned long)page_address(page); 208 void *ptr = page_address(page);
209 memset(page_address(page), 0, size); 209 memset(ptr, 0, size);
210 dmac_flush_range(kaddr, kaddr + size); 210 dmac_flush_range(ptr, ptr + size);
211 outer_flush_range(__pa(ptr), __pa(ptr) + size);
211 } 212 }
212 213
213 /* 214 /*
@@ -480,20 +481,24 @@ core_initcall(consistent_init);
480 * platforms with CONFIG_DMABOUNCE. 481 * platforms with CONFIG_DMABOUNCE.
481 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
482 */ 483 */
483void consistent_sync(void *vaddr, size_t size, int direction) 484void consistent_sync(const void *start, size_t size, int direction)
484{ 485{
485 unsigned long start = (unsigned long)vaddr; 486 const void *end = start + size;
486 unsigned long end = start + size; 487
488 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
487 489
488 switch (direction) { 490 switch (direction) {
489 case DMA_FROM_DEVICE: /* invalidate only */ 491 case DMA_FROM_DEVICE: /* invalidate only */
490 dmac_inv_range(start, end); 492 dmac_inv_range(start, end);
493 outer_inv_range(__pa(start), __pa(end));
491 break; 494 break;
492 case DMA_TO_DEVICE: /* writeback only */ 495 case DMA_TO_DEVICE: /* writeback only */
493 dmac_clean_range(start, end); 496 dmac_clean_range(start, end);
497 outer_clean_range(__pa(start), __pa(end));
494 break; 498 break;
495 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 499 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
496 dmac_flush_range(start, end); 500 dmac_flush_range(start, end);
501 outer_flush_range(__pa(start), __pa(end));
497 break; 502 break;
498 default: 503 default:
499 BUG(); 504 BUG();
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 79e800202424..9da43a0fdcdf 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -19,7 +19,8 @@ unsigned int cpu_last_asid = { 1 << ASID_BITS };
19/* 19/*
20 * We fork()ed a process, and we need a new context for the child 20 * We fork()ed a process, and we need a new context for the child
21 * to run in. We reserve version 0 for initial tasks so we will 21 * to run in. We reserve version 0 for initial tasks so we will
22 * always allocate an ASID. 22 * always allocate an ASID. The ASID 0 is reserved for the TTBR
23 * register changing sequence.
23 */ 24 */
24void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 25void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25{ 26{
@@ -38,8 +39,15 @@ void __new_context(struct mm_struct *mm)
38 * If we've used up all our ASIDs, we need 39 * If we've used up all our ASIDs, we need
39 * to start a new version and flush the TLB. 40 * to start a new version and flush the TLB.
40 */ 41 */
41 if ((asid & ~ASID_MASK) == 0) 42 if ((asid & ~ASID_MASK) == 0) {
43 asid = ++cpu_last_asid;
44 /* set the reserved ASID before flushing the TLB */
45 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
46 :
47 : "r" (0));
48 isb();
42 flush_tlb_all(); 49 flush_tlb_all();
50 }
43 51
44 mm->context.id = asid; 52 mm->context.id = asid;
45} 53}
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 7b1843befb9c..eb42e5b94863 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -14,10 +14,13 @@
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/elf.h> 16#include <asm/elf.h>
17#include <asm/hardware/arm_scu.h>
18#include <asm/pgtable-hwdef.h> 17#include <asm/pgtable-hwdef.h>
19#include <asm/pgtable.h> 18#include <asm/pgtable.h>
20 19
20#ifdef CONFIG_SMP
21#include <asm/hardware/arm_scu.h>
22#endif
23
21#include "proc-macros.S" 24#include "proc-macros.S"
22 25
23#define D_CACHE_LINE_SIZE 32 26#define D_CACHE_LINE_SIZE 32
@@ -30,6 +33,12 @@
30#define TTB_RGN_WT (2 << 3) 33#define TTB_RGN_WT (2 << 3)
31#define TTB_RGN_WB (3 << 3) 34#define TTB_RGN_WB (3 << 3)
32 35
36#ifndef CONFIG_SMP
37#define TTB_FLAGS TTB_RGN_WBWA
38#else
39#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
40#endif
41
33ENTRY(cpu_v6_proc_init) 42ENTRY(cpu_v6_proc_init)
34 mov pc, lr 43 mov pc, lr
35 44
@@ -92,9 +101,7 @@ ENTRY(cpu_v6_switch_mm)
92#ifdef CONFIG_MMU 101#ifdef CONFIG_MMU
93 mov r2, #0 102 mov r2, #0
94 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 103 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
95#ifdef CONFIG_SMP 104 orr r0, r0, #TTB_FLAGS
96 orr r0, r0, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
97#endif
98 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 105 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
99 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 106 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
100 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 107 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -183,8 +190,7 @@ __v6_setup:
183 /* Set up the SCU on core 0 only */ 190 /* Set up the SCU on core 0 only */
184 mrc p15, 0, r0, c0, c0, 5 @ CPU core number 191 mrc p15, 0, r0, c0, c0, 5 @ CPU core number
185 ands r0, r0, #15 192 ands r0, r0, #15
186 moveq r0, #0x10000000 @ SCU_BASE 193 ldreq r0, =SCU_BASE
187 orreq r0, r0, #0x00100000
188 ldreq r5, [r0, #SCU_CTRL] 194 ldreq r5, [r0, #SCU_CTRL]
189 orreq r5, r5, #1 195 orreq r5, r5, #1
190 streq r5, [r0, #SCU_CTRL] 196 streq r5, [r0, #SCU_CTRL]
@@ -204,9 +210,7 @@ __v6_setup:
204#ifdef CONFIG_MMU 210#ifdef CONFIG_MMU
205 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 211 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
206 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 212 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
207#ifdef CONFIG_SMP 213 orr r4, r4, #TTB_FLAGS
208 orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
209#endif
210 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 214 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
211#endif /* CONFIG_MMU */ 215#endif /* CONFIG_MMU */
212 adr r5, v6_crval 216 adr r5, v6_crval
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 94a58455f346..d95921a2ab99 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -5,23 +5,23 @@
5 * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> 5 * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org>
6 * 6 *
7 * Copyright 2004 (C) Intel Corp. 7 * Copyright 2004 (C) Intel Corp.
8 * Copyright 2005 (c) MontaVista Software, Inc. 8 * Copyright 2005 (C) MontaVista Software, Inc.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 * 13 *
14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is an 14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is
15 * extension to Intel's original XScale core that adds the following 15 * an extension to Intel's original XScale core that adds the following
16 * features: 16 * features:
17 * 17 *
18 * - ARMv6 Supersections 18 * - ARMv6 Supersections
19 * - Low Locality Reference pages (replaces mini-cache) 19 * - Low Locality Reference pages (replaces mini-cache)
20 * - 36-bit addressing 20 * - 36-bit addressing
21 * - L2 cache 21 * - L2 cache
22 * - Cache-coherency if chipset supports it 22 * - Cache coherency if chipset supports it
23 * 23 *
24 * Based on orignal XScale code by Nicolas Pitre 24 * Based on original XScale code by Nicolas Pitre.
25 */ 25 */
26 26
27#include <linux/linkage.h> 27#include <linux/linkage.h>
@@ -42,12 +42,12 @@
42#define MAX_AREA_SIZE 32768 42#define MAX_AREA_SIZE 32768
43 43
44/* 44/*
45 * The cache line size of the I and D cache. 45 * The cache line size of the L1 I, L1 D and unified L2 cache.
46 */ 46 */
47#define CACHELINESIZE 32 47#define CACHELINESIZE 32
48 48
49/* 49/*
50 * The size of the data cache. 50 * The size of the L1 D cache.
51 */ 51 */
52#define CACHESIZE 32768 52#define CACHESIZE 32768
53 53
@@ -57,9 +57,9 @@
57#define L2_CACHE_ENABLE 1 57#define L2_CACHE_ENABLE 1
58 58
59/* 59/*
60 * This macro is used to wait for a CP15 write and is needed 60 * This macro is used to wait for a CP15 write and is needed when we
61 * when we have to ensure that the last operation to the co-pro 61 * have to ensure that the last operation to the coprocessor was
62 * was completed before continuing with operation. 62 * completed before continuing with operation.
63 */ 63 */
64 .macro cpwait_ret, lr, rd 64 .macro cpwait_ret, lr, rd
65 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 65 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
@@ -68,13 +68,13 @@
68 .endm 68 .endm
69 69
70/* 70/*
71 * This macro cleans & invalidates the entire xsc3 dcache by set & way. 71 * This macro cleans and invalidates the entire L1 D cache.
72 */ 72 */
73 73
74 .macro clean_d_cache rd, rs 74 .macro clean_d_cache rd, rs
75 mov \rd, #0x1f00 75 mov \rd, #0x1f00
76 orr \rd, \rd, #0x00e0 76 orr \rd, \rd, #0x00e0
771: mcr p15, 0, \rd, c7, c14, 2 @ clean/inv set/way 771: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line
78 adds \rd, \rd, #0x40000000 78 adds \rd, \rd, #0x40000000
79 bcc 1b 79 bcc 1b
80 subs \rd, \rd, #0x20 80 subs \rd, \rd, #0x20
@@ -119,15 +119,15 @@ ENTRY(cpu_xsc3_reset)
119 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 119 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
120 msr cpsr_c, r1 @ reset CPSR 120 msr cpsr_c, r1 @ reset CPSR
121 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 121 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
122 bic r1, r1, #0x0086 @ ........B....CA.
123 bic r1, r1, #0x3900 @ ..VIZ..S........ 122 bic r1, r1, #0x3900 @ ..VIZ..S........
123 bic r1, r1, #0x0086 @ ........B....CA.
124 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 124 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
125 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 125 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
126 bic r1, r1, #0x0001 @ ...............M 126 bic r1, r1, #0x0001 @ ...............M
127 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 127 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
128 @ CAUTION: MMU turned off from this point. We count on the pipeline 128 @ CAUTION: MMU turned off from this point. We count on the pipeline
129 @ already containing those two last instructions to survive. 129 @ already containing those two last instructions to survive.
130 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 130 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
131 mov pc, r0 131 mov pc, r0
132 132
133/* 133/*
@@ -139,14 +139,12 @@ ENTRY(cpu_xsc3_reset)
139 * 139 *
140 * XScale supports clock switching, but using idle mode support 140 * XScale supports clock switching, but using idle mode support
141 * allows external hardware to react to system state changes. 141 * allows external hardware to react to system state changes.
142
143 MMG: Come back to this one.
144 */ 142 */
145 .align 5 143 .align 5
146 144
147ENTRY(cpu_xsc3_do_idle) 145ENTRY(cpu_xsc3_do_idle)
148 mov r0, #1 146 mov r0, #1
149 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 147 mcr p14, 0, r0, c7, c0, 0 @ go to idle
150 mov pc, lr 148 mov pc, lr
151 149
152/* ================================= CACHE ================================ */ 150/* ================================= CACHE ================================ */
@@ -171,9 +169,9 @@ ENTRY(xsc3_flush_kern_cache_all)
171__flush_whole_cache: 169__flush_whole_cache:
172 clean_d_cache r0, r1 170 clean_d_cache r0, r1
173 tst r2, #VM_EXEC 171 tst r2, #VM_EXEC
174 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 172 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
175 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 173 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
176 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush 174 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
177 mov pc, lr 175 mov pc, lr
178 176
179/* 177/*
@@ -194,21 +192,21 @@ ENTRY(xsc3_flush_user_cache_range)
194 bhs __flush_whole_cache 192 bhs __flush_whole_cache
195 193
1961: tst r2, #VM_EXEC 1941: tst r2, #VM_EXEC
197 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 195 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line
198 mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate D cache line 196 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
199 add r0, r0, #CACHELINESIZE 197 add r0, r0, #CACHELINESIZE
200 cmp r0, r1 198 cmp r0, r1
201 blo 1b 199 blo 1b
202 tst r2, #VM_EXEC 200 tst r2, #VM_EXEC
203 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 201 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
204 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 202 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
205 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush 203 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
206 mov pc, lr 204 mov pc, lr
207 205
208/* 206/*
209 * coherent_kern_range(start, end) 207 * coherent_kern_range(start, end)
210 * 208 *
211 * Ensure coherency between the Icache and the Dcache in the 209 * Ensure coherency between the I cache and the D cache in the
212 * region described by start. If you have non-snooping 210 * region described by start. If you have non-snooping
213 * Harvard caches, you need to implement this function. 211 * Harvard caches, you need to implement this function.
214 * 212 *
@@ -222,34 +220,34 @@ ENTRY(xsc3_coherent_kern_range)
222/* FALLTHROUGH */ 220/* FALLTHROUGH */
223ENTRY(xsc3_coherent_user_range) 221ENTRY(xsc3_coherent_user_range)
224 bic r0, r0, #CACHELINESIZE - 1 222 bic r0, r0, #CACHELINESIZE - 1
2251: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 2231: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
226 add r0, r0, #CACHELINESIZE 224 add r0, r0, #CACHELINESIZE
227 cmp r0, r1 225 cmp r0, r1
228 blo 1b 226 blo 1b
229 mov r0, #0 227 mov r0, #0
230 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 228 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
231 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 229 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
232 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush 230 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
233 mov pc, lr 231 mov pc, lr
234 232
235/* 233/*
236 * flush_kern_dcache_page(void *page) 234 * flush_kern_dcache_page(void *page)
237 * 235 *
238 * Ensure no D cache aliasing occurs, either with itself or 236 * Ensure no D cache aliasing occurs, either with itself or
239 * the I cache 237 * the I cache.
240 * 238 *
241 * - addr - page aligned address 239 * - addr - page aligned address
242 */ 240 */
243ENTRY(xsc3_flush_kern_dcache_page) 241ENTRY(xsc3_flush_kern_dcache_page)
244 add r1, r0, #PAGE_SZ 242 add r1, r0, #PAGE_SZ
2451: mcr p15, 0, r0, c7, c14, 1 @ Clean/Invalidate D Cache line 2431: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
246 add r0, r0, #CACHELINESIZE 244 add r0, r0, #CACHELINESIZE
247 cmp r0, r1 245 cmp r0, r1
248 blo 1b 246 blo 1b
249 mov r0, #0 247 mov r0, #0
250 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 248 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
251 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 249 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
252 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush 250 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
253 mov pc, lr 251 mov pc, lr
254 252
255/* 253/*
@@ -266,17 +264,17 @@ ENTRY(xsc3_flush_kern_dcache_page)
266ENTRY(xsc3_dma_inv_range) 264ENTRY(xsc3_dma_inv_range)
267 tst r0, #CACHELINESIZE - 1 265 tst r0, #CACHELINESIZE - 1
268 bic r0, r0, #CACHELINESIZE - 1 266 bic r0, r0, #CACHELINESIZE - 1
269 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D entry 267 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
270 mcrne p15, 1, r0, c7, c11, 1 @ clean L2 D entry 268 mcrne p15, 1, r0, c7, c11, 1 @ clean L2 line
271 tst r1, #CACHELINESIZE - 1 269 tst r1, #CACHELINESIZE - 1
272 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D entry 270 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line
273 mcrne p15, 1, r1, c7, c11, 1 @ clean L2 D entry 271 mcrne p15, 1, r1, c7, c11, 1 @ clean L2 line
2741: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D entry 2721: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line
275 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line 273 mcr p15, 1, r0, c7, c7, 1 @ invalidate L2 line
276 add r0, r0, #CACHELINESIZE 274 add r0, r0, #CACHELINESIZE
277 cmp r0, r1 275 cmp r0, r1
278 blo 1b 276 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 277 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
280 mov pc, lr 278 mov pc, lr
281 279
282/* 280/*
@@ -289,12 +287,12 @@ ENTRY(xsc3_dma_inv_range)
289 */ 287 */
290ENTRY(xsc3_dma_clean_range) 288ENTRY(xsc3_dma_clean_range)
291 bic r0, r0, #CACHELINESIZE - 1 289 bic r0, r0, #CACHELINESIZE - 1
2921: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D entry 2901: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
293 mcr p15, 1, r0, c7, c11, 1 @ clean L2 D entry 291 mcr p15, 1, r0, c7, c11, 1 @ clean L2 line
294 add r0, r0, #CACHELINESIZE 292 add r0, r0, #CACHELINESIZE
295 cmp r0, r1 293 cmp r0, r1
296 blo 1b 294 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 295 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
298 mov pc, lr 296 mov pc, lr
299 297
300/* 298/*
@@ -307,13 +305,13 @@ ENTRY(xsc3_dma_clean_range)
307 */ 305 */
308ENTRY(xsc3_dma_flush_range) 306ENTRY(xsc3_dma_flush_range)
309 bic r0, r0, #CACHELINESIZE - 1 307 bic r0, r0, #CACHELINESIZE - 1
3101: mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate L1 D cache line 3081: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
311 mcr p15, 1, r0, c7, c11, 1 @ Clean L2 D cache line 309 mcr p15, 1, r0, c7, c11, 1 @ clean L2 line
312 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line 310 mcr p15, 1, r0, c7, c7, 1 @ invalidate L2 line
313 add r0, r0, #CACHELINESIZE 311 add r0, r0, #CACHELINESIZE
314 cmp r0, r1 312 cmp r0, r1
315 blo 1b 313 blo 1b
316 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer 314 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
317 mov pc, lr 315 mov pc, lr
318 316
319ENTRY(xsc3_cache_fns) 317ENTRY(xsc3_cache_fns)
@@ -328,7 +326,7 @@ ENTRY(xsc3_cache_fns)
328 .long xsc3_dma_flush_range 326 .long xsc3_dma_flush_range
329 327
330ENTRY(cpu_xsc3_dcache_clean_area) 328ENTRY(cpu_xsc3_dcache_clean_area)
3311: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 3291: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
332 add r0, r0, #CACHELINESIZE 330 add r0, r0, #CACHELINESIZE
333 subs r1, r1, #CACHELINESIZE 331 subs r1, r1, #CACHELINESIZE
334 bhi 1b 332 bhi 1b
@@ -346,14 +344,14 @@ ENTRY(cpu_xsc3_dcache_clean_area)
346 .align 5 344 .align 5
347ENTRY(cpu_xsc3_switch_mm) 345ENTRY(cpu_xsc3_switch_mm)
348 clean_d_cache r1, r2 346 clean_d_cache r1, r2
349 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 347 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
350 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 348 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
351 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush 349 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
352#ifdef L2_CACHE_ENABLE 350#ifdef L2_CACHE_ENABLE
353 orr r0, r0, #0x18 @ cache the page table in L2 351 orr r0, r0, #0x18 @ cache the page table in L2
354#endif 352#endif
355 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 353 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
356 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 354 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
357 cpwait_ret lr, ip 355 cpwait_ret lr, ip
358 356
359/* 357/*
@@ -366,34 +364,34 @@ ENTRY(cpu_xsc3_switch_mm)
366ENTRY(cpu_xsc3_set_pte_ext) 364ENTRY(cpu_xsc3_set_pte_ext)
367 str r1, [r0], #-2048 @ linux version 365 str r1, [r0], #-2048 @ linux version
368 366
369 bic r2, r1, #0xff0 @ Keep C, B bits 367 bic r2, r1, #0xff0 @ keep C, B bits
370 orr r2, r2, #PTE_TYPE_EXT @ extended page 368 orr r2, r2, #PTE_TYPE_EXT @ extended page
371 tst r1, #L_PTE_SHARED @ Shared? 369 tst r1, #L_PTE_SHARED @ shared?
372 orrne r2, r2, #0x200 370 orrne r2, r2, #0x200
373 371
374 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 372 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
375 373
376 tst r3, #L_PTE_USER @ User? 374 tst r3, #L_PTE_USER @ user?
377 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 375 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
378 376
379 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 377 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
380 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 378 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
381 @ combined with user -> user r/w 379 @ combined with user -> user r/w
382 380
383#if L2_CACHE_ENABLE 381#if L2_CACHE_ENABLE
384 @ If its cacheable it needs to be in L2 also. 382 @ If it's cacheable, it needs to be in L2 also.
385 eor ip, r1, #L_PTE_CACHEABLE 383 eor ip, r1, #L_PTE_CACHEABLE
386 tst ip, #L_PTE_CACHEABLE 384 tst ip, #L_PTE_CACHEABLE
387 orreq r2, r2, #PTE_EXT_TEX(0x5) 385 orreq r2, r2, #PTE_EXT_TEX(0x5)
388#endif 386#endif
389 387
390 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 388 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
391 movne r2, #0 @ no -> fault 389 movne r2, #0 @ no -> fault
392 390
393 str r2, [r0] @ hardware version 391 str r2, [r0] @ hardware version
394 mov ip, #0 392 mov ip, #0
395 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr 393 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
396 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 394 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
397 mov pc, lr 395 mov pc, lr
398 396
399 .ltorg 397 .ltorg
@@ -406,17 +404,18 @@ ENTRY(cpu_xsc3_set_pte_ext)
406__xsc3_setup: 404__xsc3_setup:
407 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 405 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
408 msr cpsr_c, r0 406 msr cpsr_c, r0
409 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 407 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
410 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer 408 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
411 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush 409 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
412 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 410 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
413#if L2_CACHE_ENABLE 411#if L2_CACHE_ENABLE
414 orr r4, r4, #0x18 @ cache the page table in L2 412 orr r4, r4, #0x18 @ cache the page table in L2
415#endif 413#endif
416 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 414 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
417 mov r0, #1 @ Allow access to CP0 and CP13 415
418 orr r0, r0, #1 << 13 @ Its undefined whether this 416 mov r0, #0 @ don't allow CP access
419 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 417 mcr p15, 0, r0, c15, c1, 0 @ write CP access register
418
420 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg 419 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
421 and r0, r0, #2 @ preserve bit P bit setting 420 and r0, r0, #2 @ preserve bit P bit setting
422#if L2_CACHE_ENABLE 421#if L2_CACHE_ENABLE
@@ -427,9 +426,9 @@ __xsc3_setup:
427 adr r5, xsc3_crval 426 adr r5, xsc3_crval
428 ldmia r5, {r5, r6} 427 ldmia r5, {r5, r6}
429 mrc p15, 0, r0, c1, c0, 0 @ get control register 428 mrc p15, 0, r0, c1, c0, 0 @ get control register
430 bic r0, r0, r5 @ .... .... .... ..A. 429 bic r0, r0, r5 @ ..V. ..R. .... ..A.
431 orr r0, r0, r6 @ .... .... .... .C.M 430 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
432 orr r0, r0, #0x00000800 @ ..VI Z..S .... .... 431 @ ...I Z..S .... .... (uc)
433#if L2_CACHE_ENABLE 432#if L2_CACHE_ENABLE
434 orr r0, r0, #0x04000000 @ L2 enable 433 orr r0, r0, #0x04000000 @ L2 enable
435#endif 434#endif
@@ -439,7 +438,7 @@ __xsc3_setup:
439 438
440 .type xsc3_crval, #object 439 .type xsc3_crval, #object
441xsc3_crval: 440xsc3_crval:
442 crval clear=0x04003b02, mmuset=0x00003105, ucset=0x00001100 441 crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900
443 442
444 __INITDATA 443 __INITDATA
445 444
@@ -474,7 +473,7 @@ cpu_elf_name:
474 473
475 .type cpu_xsc3_name, #object 474 .type cpu_xsc3_name, #object
476cpu_xsc3_name: 475cpu_xsc3_name:
477 .asciz "XScale-Core3" 476 .asciz "XScale-V3 based processor"
478 .size cpu_xsc3_name, . - cpu_xsc3_name 477 .size cpu_xsc3_name, . - cpu_xsc3_name
479 478
480 .align 479 .align
@@ -490,7 +489,7 @@ __xsc3_proc_info:
490 PMD_SECT_CACHEABLE | \ 489 PMD_SECT_CACHEABLE | \
491 PMD_SECT_AP_WRITE | \ 490 PMD_SECT_AP_WRITE | \
492 PMD_SECT_AP_READ 491 PMD_SECT_AP_READ
493 .long PMD_TYPE_SECT | \ 492 .long PMD_TYPE_SECT | \
494 PMD_SECT_AP_WRITE | \ 493 PMD_SECT_AP_WRITE | \
495 PMD_SECT_AP_READ 494 PMD_SECT_AP_READ
496 b __xsc3_setup 495 b __xsc3_setup
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index fd6adde39091..20f84bbaa9bb 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -53,6 +53,8 @@ ENTRY(v6wbi_flush_user_tlb_range)
53 add r0, r0, #PAGE_SZ 53 add r0, r0, #PAGE_SZ
54 cmp r0, r1 54 cmp r0, r1
55 blo 1b 55 blo 1b
56 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
56 mov pc, lr 58 mov pc, lr
57 59
58/* 60/*
@@ -80,7 +82,9 @@ ENTRY(v6wbi_flush_kern_tlb_range)
80 add r0, r0, #PAGE_SZ 82 add r0, r0, #PAGE_SZ
81 cmp r0, r1 83 cmp r0, r1
82 blo 1b 84 blo 1b
85 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
83 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush
84 mov pc, lr 88 mov pc, lr
85 89
86 .section ".text.init", #alloc, #execinstr 90 .section ".text.init", #alloc, #execinstr