aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/cacheflush.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2012-03-05 06:49:28 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2012-09-17 08:42:00 -0400
commitf1a0c4aa0937975b53991842a494f741d7769b02 (patch)
tree9b344c5267cb982b14a2372a0a20714f5b36d61a /arch/arm64/include/asm/cacheflush.h
parent9cce7a435f89c9e60f244d44da2cf1cf4ed094ac (diff)
arm64: Cache maintenance routines
The patch adds functionality required for cache maintenance. The AArch64 architecture mandates non-aliasing VIPT or PIPT D-cache and VIPT (may have aliases) or ASID-tagged VIVT I-cache. Cache maintenance operations are automatically broadcast in hardware between CPUs. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/include/asm/cacheflush.h')
-rw-r--r--arch/arm64/include/asm/cacheflush.h148
1 files changed, 148 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
new file mode 100644
index 000000000000..aa3132ab7f29
--- /dev/null
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -0,0 +1,148 @@
1/*
2 * Based on arch/arm/include/asm/cacheflush.h
3 *
4 * Copyright (C) 1999-2002 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_CACHEFLUSH_H
20#define __ASM_CACHEFLUSH_H
21
22#include <linux/mm.h>
23
24/*
25 * This flag is used to indicate that the page pointed to by a pte is clean
26 * and does not require cleaning before returning it to the user.
27 */
28#define PG_dcache_clean PG_arch_1
29
30/*
31 * MM Cache Management
32 * ===================
33 *
34 * The arch/arm64/mm/cache.S implements these methods.
35 *
36 * Start addresses are inclusive and end addresses are exclusive; start
37 * addresses should be rounded down, end addresses up.
38 *
39 * See Documentation/cachetlb.txt for more information. Please note that
40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41 * VIPT or ASID-tagged VIVT I-cache.
42 *
43 * flush_cache_all()
44 *
45 * Unconditionally clean and invalidate the entire cache.
46 *
47 * flush_cache_mm(mm)
48 *
49 * Clean and invalidate all user space cache entries
50 * before a change of page tables.
51 *
52 * flush_icache_range(start, end)
53 *
54 * Ensure coherency between the I-cache and the D-cache in the
55 * region described by start, end.
56 * - start - virtual start address
57 * - end - virtual end address
58 *
59 * __flush_cache_user_range(start, end)
60 *
61 * Ensure coherency between the I-cache and the D-cache in the
62 * region described by start, end.
63 * - start - virtual start address
64 * - end - virtual end address
65 *
66 * __flush_dcache_area(kaddr, size)
67 *
68 * Ensure that the data held in page is written back.
69 * - kaddr - page address
70 * - size - region size
71 */
72extern void flush_cache_all(void);
73extern void flush_cache_mm(struct mm_struct *mm);
74extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
75extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
76extern void flush_icache_range(unsigned long start, unsigned long end);
77extern void __flush_dcache_area(void *addr, size_t len);
78extern void __flush_cache_user_range(unsigned long start, unsigned long end);
79
80/*
81 * Copy user data from/to a page which is mapped into a different
82 * processes address space. Really, we want to allow our "user
83 * space" model to handle this.
84 */
85extern void copy_to_user_page(struct vm_area_struct *, struct page *,
86 unsigned long, void *, const void *, unsigned long);
87#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
88 do { \
89 memcpy(dst, src, len); \
90 } while (0)
91
92#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
93
94/*
95 * flush_dcache_page is used when the kernel has written to the page
96 * cache page at virtual address page->virtual.
97 *
98 * If this page isn't mapped (ie, page_mapping == NULL), or it might
99 * have userspace mappings, then we _must_ always clean + invalidate
100 * the dcache entries associated with the kernel mapping.
101 *
102 * Otherwise we can defer the operation, and clean the cache when we are
103 * about to change to user space. This is the same method as used on SPARC64.
104 * See update_mmu_cache for the user space part.
105 */
106#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
107extern void flush_dcache_page(struct page *);
108
109static inline void __flush_icache_all(void)
110{
111 asm("ic ialluis");
112}
113
114#define flush_dcache_mmap_lock(mapping) \
115 spin_lock_irq(&(mapping)->tree_lock)
116#define flush_dcache_mmap_unlock(mapping) \
117 spin_unlock_irq(&(mapping)->tree_lock)
118
119#define flush_icache_user_range(vma,page,addr,len) \
120 flush_dcache_page(page)
121
122/*
123 * We don't appear to need to do anything here. In fact, if we did, we'd
124 * duplicate cache flushing elsewhere performed by flush_dcache_page().
125 */
126#define flush_icache_page(vma,page) do { } while (0)
127
128/*
129 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
130 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
131 * caches, since the direct-mappings of these pages may contain cached
132 * data, we need to do a full cache flush to ensure that writebacks
133 * don't corrupt data placed into these pages via the new mappings.
134 */
135static inline void flush_cache_vmap(unsigned long start, unsigned long end)
136{
137 /*
138 * set_pte_at() called from vmap_pte_range() does not
139 * have a DSB after cleaning the cache line.
140 */
141 dsb();
142}
143
144static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
145{
146}
147
148#endif