diff options
author | Victor Kamensky <victor.kamensky@linaro.org> | 2014-04-28 23:20:52 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-05-25 18:48:45 -0400 |
commit | 72e6ae285a1dbff553734985bedadf409d99c02d (patch) | |
tree | e431dde4c5ddcba69a1c668e9ff9d60491a736c6 /arch/arm/mm/flush.c | |
parent | 166aaf396654b533f536f2cf84d7558eb42f1c9f (diff) |
ARM: 8043/1: uprobes need icache flush after xol write
After instruction write into xol area, on ARM V7
architecture code need to flush dcache and icache to sync
them up for given set of addresses. Having just
'flush_dcache_page(page)' call is not enough - it is
possible to have stale instruction sitting in icache
for given xol area slot address.
Introduce arch_uprobe_ixol_copy weak function
that by default calls uprobes copy_to_page function and
than flush_dcache_page function and on ARM define new one
that handles xol slot copy in ARM specific way
flush_uprobe_xol_access function shares/reuses implementation
with/of flush_ptrace_access function and takes care of writing
instruction to user land address space on given variety of
different cache types on ARM CPUs. Because
flush_uprobe_xol_access does not have vma around
flush_ptrace_access was split into two parts. First that
retrieves set of condition from vma and common that receives
those conditions as flags.
Note ARM cache flush function need kernel address
through which instruction write happened, so instead
of using uprobes copy_to_page function changed
code to explicitly map page and do memcpy.
Note arch_uprobe_copy_ixol function, in similar way as
copy_to_user_page function, has preempt_disable/preempt_enable.
Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/flush.c')
-rw-r--r-- | arch/arm/mm/flush.c | 33 |
1 files changed, 28 insertions, 5 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 3387e60e4ea3..43d54f5b26b9 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -104,17 +104,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |||
104 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | 104 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | #define FLAG_PA_IS_EXEC 1 | ||
108 | #define FLAG_PA_CORE_IN_MM 2 | ||
109 | |||
107 | static void flush_ptrace_access_other(void *args) | 110 | static void flush_ptrace_access_other(void *args) |
108 | { | 111 | { |
109 | __flush_icache_all(); | 112 | __flush_icache_all(); |
110 | } | 113 | } |
111 | 114 | ||
112 | static | 115 | static inline |
113 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 116 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, |
114 | unsigned long uaddr, void *kaddr, unsigned long len) | 117 | unsigned long len, unsigned int flags) |
115 | { | 118 | { |
116 | if (cache_is_vivt()) { | 119 | if (cache_is_vivt()) { |
117 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 120 | if (flags & FLAG_PA_CORE_IN_MM) { |
118 | unsigned long addr = (unsigned long)kaddr; | 121 | unsigned long addr = (unsigned long)kaddr; |
119 | __cpuc_coherent_kern_range(addr, addr + len); | 122 | __cpuc_coherent_kern_range(addr, addr + len); |
120 | } | 123 | } |
@@ -128,7 +131,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
128 | } | 131 | } |
129 | 132 | ||
130 | /* VIPT non-aliasing D-cache */ | 133 | /* VIPT non-aliasing D-cache */ |
131 | if (vma->vm_flags & VM_EXEC) { | 134 | if (flags & FLAG_PA_IS_EXEC) { |
132 | unsigned long addr = (unsigned long)kaddr; | 135 | unsigned long addr = (unsigned long)kaddr; |
133 | if (icache_is_vipt_aliasing()) | 136 | if (icache_is_vipt_aliasing()) |
134 | flush_icache_alias(page_to_pfn(page), uaddr, len); | 137 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
@@ -140,6 +143,26 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
140 | } | 143 | } |
141 | } | 144 | } |
142 | 145 | ||
146 | static | ||
147 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
148 | unsigned long uaddr, void *kaddr, unsigned long len) | ||
149 | { | ||
150 | unsigned int flags = 0; | ||
151 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | ||
152 | flags |= FLAG_PA_CORE_IN_MM; | ||
153 | if (vma->vm_flags & VM_EXEC) | ||
154 | flags |= FLAG_PA_IS_EXEC; | ||
155 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | ||
156 | } | ||
157 | |||
158 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | ||
159 | void *kaddr, unsigned long len) | ||
160 | { | ||
161 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; | ||
162 | |||
163 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | ||
164 | } | ||
165 | |||
143 | /* | 166 | /* |
144 | * Copy user data from/to a page which is mapped into a different | 167 | * Copy user data from/to a page which is mapped into a different |
145 | * processes address space. Really, we want to allow our "user | 168 | * processes address space. Really, we want to allow our "user |