aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2012-03-05 06:49:28 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2012-09-17 08:42:00 -0400
commitf1a0c4aa0937975b53991842a494f741d7769b02 (patch)
tree9b344c5267cb982b14a2372a0a20714f5b36d61a /arch/arm64/mm
parent9cce7a435f89c9e60f244d44da2cf1cf4ed094ac (diff)
arm64: Cache maintenance routines
The patch adds functionality required for cache maintenance. The AArch64 architecture mandates non-aliasing VIPT or PIPT D-cache and VIPT (may have aliases) or ASID-tagged VIVT I-cache. Cache maintenance operations are automatically broadcast in hardware between CPUs. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S168
-rw-r--r--arch/arm64/mm/flush.c135
2 files changed, 303 insertions, 0 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
new file mode 100644
index 000000000000..abe69b80cf7f
--- /dev/null
+++ b/arch/arm64/mm/cache.S
@@ -0,0 +1,168 @@
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <linux/init.h>
22#include <asm/assembler.h>
23
24#include "proc-macros.S"
25
26/*
27 * __flush_dcache_all()
28 *
29 * Flush the whole D-cache.
30 *
31 * Corrupted registers: x0-x7, x9-x11
32 */
33ENTRY(__flush_dcache_all)
34 dsb sy // ensure ordering with previous memory accesses
35 mrs x0, clidr_el1 // read clidr
36 and x3, x0, #0x7000000 // extract loc from clidr
37 lsr x3, x3, #23 // left align loc bit field
38 cbz x3, finished // if loc is 0, then no need to clean
39 mov x10, #0 // start clean at cache level 0
40loop1:
41 add x2, x10, x10, lsr #1 // work out 3x current cache level
42 lsr x1, x0, x2 // extract cache type bits from clidr
43 and x1, x1, #7 // mask of the bits for current cache only
44 cmp x1, #2 // see what cache we have at this level
45 b.lt skip // skip if no cache, or just i-cache
46 save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
47 msr csselr_el1, x10 // select current cache level in csselr
48 isb // isb to sych the new cssr&csidr
49 mrs x1, ccsidr_el1 // read the new ccsidr
50 restore_irqs x9
51 and x2, x1, #7 // extract the length of the cache lines
52 add x2, x2, #4 // add 4 (line length offset)
53 mov x4, #0x3ff
54 and x4, x4, x1, lsr #3 // find maximum number on the way size
55 clz x5, x4 // find bit position of way size increment
56 mov x7, #0x7fff
57 and x7, x7, x1, lsr #13 // extract max number of the index size
58loop2:
59 mov x9, x4 // create working copy of max way size
60loop3:
61 lsl x6, x9, x5
62 orr x11, x10, x6 // factor way and cache number into x11
63 lsl x6, x7, x2
64 orr x11, x11, x6 // factor index number into x11
65 dc cisw, x11 // clean & invalidate by set/way
66 subs x9, x9, #1 // decrement the way
67 b.ge loop3
68 subs x7, x7, #1 // decrement the index
69 b.ge loop2
70skip:
71 add x10, x10, #2 // increment cache number
72 cmp x3, x10
73 b.gt loop1
74finished:
75 mov x10, #0 // swith back to cache level 0
76 msr csselr_el1, x10 // select current cache level in csselr
77 dsb sy
78 isb
79 ret
80ENDPROC(__flush_dcache_all)
81
82/*
83 * flush_cache_all()
84 *
85 * Flush the entire cache system. The data cache flush is now achieved
86 * using atomic clean / invalidates working outwards from L1 cache. This
87 * is done using Set/Way based cache maintainance instructions. The
88 * instruction cache can still be invalidated back to the point of
89 * unification in a single instruction.
90 */
91ENTRY(flush_cache_all)
92 mov x12, lr
93 bl __flush_dcache_all
94 mov x0, #0
95 ic ialluis // I+BTB cache invalidate
96 ret x12
97ENDPROC(flush_cache_all)
98
99/*
100 * flush_icache_range(start,end)
101 *
102 * Ensure that the I and D caches are coherent within specified region.
103 * This is typically used when code has been written to a memory region,
104 * and will be executed.
105 *
106 * - start - virtual start address of region
107 * - end - virtual end address of region
108 */
109ENTRY(flush_icache_range)
110 /* FALLTHROUGH */
111
112/*
113 * __flush_cache_user_range(start,end)
114 *
115 * Ensure that the I and D caches are coherent within specified region.
116 * This is typically used when code has been written to a memory region,
117 * and will be executed.
118 *
119 * - start - virtual start address of region
120 * - end - virtual end address of region
121 */
122ENTRY(__flush_cache_user_range)
123 dcache_line_size x2, x3
124 sub x3, x2, #1
125 bic x4, x0, x3
1261:
127USER(9f, dc cvau, x4 ) // clean D line to PoU
128 add x4, x4, x2
129 cmp x4, x1
130 b.lo 1b
131 dsb sy
132
133 icache_line_size x2, x3
134 sub x3, x2, #1
135 bic x4, x0, x3
1361:
137USER(9f, ic ivau, x4 ) // invalidate I line PoU
138 add x4, x4, x2
139 cmp x4, x1
140 b.lo 1b
1419: // ignore any faulting cache operation
142 dsb sy
143 isb
144 ret
145ENDPROC(flush_icache_range)
146ENDPROC(__flush_cache_user_range)
147
148/*
149 * __flush_kern_dcache_page(kaddr)
150 *
151 * Ensure that the data held in the page kaddr is written back to the
152 * page in question.
153 *
154 * - kaddr - kernel address
155 * - size - size in question
156 */
157ENTRY(__flush_dcache_area)
158 dcache_line_size x2, x3
159 add x1, x0, x1
160 sub x3, x2, #1
161 bic x0, x0, x3
1621: dc civac, x0 // clean & invalidate D line / unified line
163 add x0, x0, x2
164 cmp x0, x1
165 b.lo 1b
166 dsb sy
167 ret
168ENDPROC(__flush_dcache_area)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
new file mode 100644
index 000000000000..c144adb1682f
--- /dev/null
+++ b/arch/arm64/mm/flush.c
@@ -0,0 +1,135 @@
1/*
2 * Based on arch/arm/mm/flush.c
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/mm.h>
22#include <linux/pagemap.h>
23
24#include <asm/cacheflush.h>
25#include <asm/cachetype.h>
26#include <asm/tlbflush.h>
27
28#include "mm.h"
29
30void flush_cache_mm(struct mm_struct *mm)
31{
32}
33
34void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
35 unsigned long end)
36{
37 if (vma->vm_flags & VM_EXEC)
38 __flush_icache_all();
39}
40
41void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
42 unsigned long pfn)
43{
44}
45
46static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
47 unsigned long uaddr, void *kaddr,
48 unsigned long len)
49{
50 if (vma->vm_flags & VM_EXEC) {
51 unsigned long addr = (unsigned long)kaddr;
52 if (icache_is_aliasing()) {
53 __flush_dcache_area(kaddr, len);
54 __flush_icache_all();
55 } else {
56 flush_icache_range(addr, addr + len);
57 }
58 }
59}
60
61/*
62 * Copy user data from/to a page which is mapped into a different processes
63 * address space. Really, we want to allow our "user space" model to handle
64 * this.
65 *
66 * Note that this code needs to run on the current CPU.
67 */
68void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
69 unsigned long uaddr, void *dst, const void *src,
70 unsigned long len)
71{
72#ifdef CONFIG_SMP
73 preempt_disable();
74#endif
75 memcpy(dst, src, len);
76 flush_ptrace_access(vma, page, uaddr, dst, len);
77#ifdef CONFIG_SMP
78 preempt_enable();
79#endif
80}
81
82void __flush_dcache_page(struct page *page)
83{
84 __flush_dcache_area(page_address(page), PAGE_SIZE);
85}
86
87void __sync_icache_dcache(pte_t pte, unsigned long addr)
88{
89 unsigned long pfn;
90 struct page *page;
91
92 pfn = pte_pfn(pte);
93 if (!pfn_valid(pfn))
94 return;
95
96 page = pfn_to_page(pfn);
97 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
98 __flush_dcache_page(page);
99 __flush_icache_all();
100 } else if (icache_is_aivivt()) {
101 __flush_icache_all();
102 }
103}
104
105/*
106 * Ensure cache coherency between kernel mapping and userspace mapping of this
107 * page.
108 */
109void flush_dcache_page(struct page *page)
110{
111 struct address_space *mapping;
112
113 /*
114 * The zero page is never written to, so never has any dirty cache
115 * lines, and therefore never needs to be flushed.
116 */
117 if (page == ZERO_PAGE(0))
118 return;
119
120 mapping = page_mapping(page);
121 if (mapping && mapping_mapped(mapping)) {
122 __flush_dcache_page(page);
123 __flush_icache_all();
124 set_bit(PG_dcache_clean, &page->flags);
125 } else {
126 clear_bit(PG_dcache_clean, &page->flags);
127 }
128}
129EXPORT_SYMBOL(flush_dcache_page);
130
131/*
132 * Additional functions defined in assembly.
133 */
134EXPORT_SYMBOL(flush_cache_all);
135EXPORT_SYMBOL(flush_icache_range);