diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2012-03-05 06:49:28 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2012-09-17 08:42:00 -0400 |
commit | f1a0c4aa0937975b53991842a494f741d7769b02 (patch) | |
tree | 9b344c5267cb982b14a2372a0a20714f5b36d61a /arch/arm64/mm/cache.S | |
parent | 9cce7a435f89c9e60f244d44da2cf1cf4ed094ac (diff) |
arm64: Cache maintenance routines
The patch adds functionality required for cache maintenance. The AArch64
architecture mandates non-aliasing VIPT or PIPT D-cache and VIPT (may
have aliases) or ASID-tagged VIVT I-cache. Cache maintenance operations
are automatically broadcast in hardware between CPUs.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/mm/cache.S')
-rw-r--r-- | arch/arm64/mm/cache.S | 168 |
1 files changed, 168 insertions, 0 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S new file mode 100644 index 000000000000..abe69b80cf7f --- /dev/null +++ b/arch/arm64/mm/cache.S | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * Cache maintenance | ||
3 | * | ||
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/linkage.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <asm/assembler.h> | ||
23 | |||
24 | #include "proc-macros.S" | ||
25 | |||
26 | /* | ||
27 | * __flush_dcache_all() | ||
28 | * | ||
29 | * Flush the whole D-cache. | ||
30 | * | ||
31 | * Corrupted registers: x0-x7, x9-x11 | ||
32 | */ | ||
33 | ENTRY(__flush_dcache_all) | ||
34 | dsb sy // ensure ordering with previous memory accesses | ||
35 | mrs x0, clidr_el1 // read clidr | ||
36 | and x3, x0, #0x7000000 // extract loc from clidr | ||
37 | lsr x3, x3, #23 // left align loc bit field | ||
38 | cbz x3, finished // if loc is 0, then no need to clean | ||
39 | mov x10, #0 // start clean at cache level 0 | ||
40 | loop1: | ||
41 | add x2, x10, x10, lsr #1 // work out 3x current cache level | ||
42 | lsr x1, x0, x2 // extract cache type bits from clidr | ||
43 | and x1, x1, #7 // mask of the bits for current cache only | ||
44 | cmp x1, #2 // see what cache we have at this level | ||
45 | b.lt skip // skip if no cache, or just i-cache | ||
46 | save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic | ||
47 | msr csselr_el1, x10 // select current cache level in csselr | ||
48 | isb // isb to sych the new cssr&csidr | ||
49 | mrs x1, ccsidr_el1 // read the new ccsidr | ||
50 | restore_irqs x9 | ||
51 | and x2, x1, #7 // extract the length of the cache lines | ||
52 | add x2, x2, #4 // add 4 (line length offset) | ||
53 | mov x4, #0x3ff | ||
54 | and x4, x4, x1, lsr #3 // find maximum number on the way size | ||
55 | clz x5, x4 // find bit position of way size increment | ||
56 | mov x7, #0x7fff | ||
57 | and x7, x7, x1, lsr #13 // extract max number of the index size | ||
58 | loop2: | ||
59 | mov x9, x4 // create working copy of max way size | ||
60 | loop3: | ||
61 | lsl x6, x9, x5 | ||
62 | orr x11, x10, x6 // factor way and cache number into x11 | ||
63 | lsl x6, x7, x2 | ||
64 | orr x11, x11, x6 // factor index number into x11 | ||
65 | dc cisw, x11 // clean & invalidate by set/way | ||
66 | subs x9, x9, #1 // decrement the way | ||
67 | b.ge loop3 | ||
68 | subs x7, x7, #1 // decrement the index | ||
69 | b.ge loop2 | ||
70 | skip: | ||
71 | add x10, x10, #2 // increment cache number | ||
72 | cmp x3, x10 | ||
73 | b.gt loop1 | ||
74 | finished: | ||
75 | mov x10, #0 // swith back to cache level 0 | ||
76 | msr csselr_el1, x10 // select current cache level in csselr | ||
77 | dsb sy | ||
78 | isb | ||
79 | ret | ||
80 | ENDPROC(__flush_dcache_all) | ||
81 | |||
82 | /* | ||
83 | * flush_cache_all() | ||
84 | * | ||
85 | * Flush the entire cache system. The data cache flush is now achieved | ||
86 | * using atomic clean / invalidates working outwards from L1 cache. This | ||
87 | * is done using Set/Way based cache maintainance instructions. The | ||
88 | * instruction cache can still be invalidated back to the point of | ||
89 | * unification in a single instruction. | ||
90 | */ | ||
91 | ENTRY(flush_cache_all) | ||
92 | mov x12, lr | ||
93 | bl __flush_dcache_all | ||
94 | mov x0, #0 | ||
95 | ic ialluis // I+BTB cache invalidate | ||
96 | ret x12 | ||
97 | ENDPROC(flush_cache_all) | ||
98 | |||
99 | /* | ||
100 | * flush_icache_range(start,end) | ||
101 | * | ||
102 | * Ensure that the I and D caches are coherent within specified region. | ||
103 | * This is typically used when code has been written to a memory region, | ||
104 | * and will be executed. | ||
105 | * | ||
106 | * - start - virtual start address of region | ||
107 | * - end - virtual end address of region | ||
108 | */ | ||
109 | ENTRY(flush_icache_range) | ||
110 | /* FALLTHROUGH */ | ||
111 | |||
112 | /* | ||
113 | * __flush_cache_user_range(start,end) | ||
114 | * | ||
115 | * Ensure that the I and D caches are coherent within specified region. | ||
116 | * This is typically used when code has been written to a memory region, | ||
117 | * and will be executed. | ||
118 | * | ||
119 | * - start - virtual start address of region | ||
120 | * - end - virtual end address of region | ||
121 | */ | ||
122 | ENTRY(__flush_cache_user_range) | ||
123 | dcache_line_size x2, x3 | ||
124 | sub x3, x2, #1 | ||
125 | bic x4, x0, x3 | ||
126 | 1: | ||
127 | USER(9f, dc cvau, x4 ) // clean D line to PoU | ||
128 | add x4, x4, x2 | ||
129 | cmp x4, x1 | ||
130 | b.lo 1b | ||
131 | dsb sy | ||
132 | |||
133 | icache_line_size x2, x3 | ||
134 | sub x3, x2, #1 | ||
135 | bic x4, x0, x3 | ||
136 | 1: | ||
137 | USER(9f, ic ivau, x4 ) // invalidate I line PoU | ||
138 | add x4, x4, x2 | ||
139 | cmp x4, x1 | ||
140 | b.lo 1b | ||
141 | 9: // ignore any faulting cache operation | ||
142 | dsb sy | ||
143 | isb | ||
144 | ret | ||
145 | ENDPROC(flush_icache_range) | ||
146 | ENDPROC(__flush_cache_user_range) | ||
147 | |||
148 | /* | ||
149 | * __flush_kern_dcache_page(kaddr) | ||
150 | * | ||
151 | * Ensure that the data held in the page kaddr is written back to the | ||
152 | * page in question. | ||
153 | * | ||
154 | * - kaddr - kernel address | ||
155 | * - size - size in question | ||
156 | */ | ||
157 | ENTRY(__flush_dcache_area) | ||
158 | dcache_line_size x2, x3 | ||
159 | add x1, x0, x1 | ||
160 | sub x3, x2, #1 | ||
161 | bic x0, x0, x3 | ||
162 | 1: dc civac, x0 // clean & invalidate D line / unified line | ||
163 | add x0, x0, x2 | ||
164 | cmp x0, x1 | ||
165 | b.lo 1b | ||
166 | dsb sy | ||
167 | ret | ||
168 | ENDPROC(__flush_dcache_area) | ||