aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVictor Kamensky <victor.kamensky@linaro.org>2014-04-28 23:20:52 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-05-25 18:48:45 -0400
commit72e6ae285a1dbff553734985bedadf409d99c02d (patch)
treee431dde4c5ddcba69a1c668e9ff9d60491a736c6
parent166aaf396654b533f536f2cf84d7558eb42f1c9f (diff)
ARM: 8043/1: uprobes need icache flush after xol write
After instruction write into xol area, on ARM V7 architecture code need to flush dcache and icache to sync them up for given set of addresses. Having just 'flush_dcache_page(page)' call is not enough - it is possible to have stale instruction sitting in icache for given xol area slot address. Introduce arch_uprobe_ixol_copy weak function that by default calls uprobes copy_to_page function and than flush_dcache_page function and on ARM define new one that handles xol slot copy in ARM specific way flush_uprobe_xol_access function shares/reuses implementation with/of flush_ptrace_access function and takes care of writing instruction to user land address space on given variety of different cache types on ARM CPUs. Because flush_uprobe_xol_access does not have vma around flush_ptrace_access was split into two parts. First that retrieves set of condition from vma and common that receives those conditions as flags. Note ARM cache flush function need kernel address through which instruction write happened, so instead of using uprobes copy_to_page function changed code to explicitly map page and do memcpy. Note arch_uprobe_copy_ixol function, in similar way as copy_to_user_page function, has preempt_disable/preempt_enable. Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org> Acked-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: David A. Long <dave.long@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/include/asm/cacheflush.h2
-rw-r--r--arch/arm/kernel/uprobes.c20
-rw-r--r--arch/arm/mm/flush.c33
-rw-r--r--include/linux/uprobes.h3
-rw-r--r--kernel/events/uprobes.c25
5 files changed, 70 insertions, 13 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 00af9fe435e6..fd43f7f55b70 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages);
487int set_memory_x(unsigned long addr, int numpages); 487int set_memory_x(unsigned long addr, int numpages);
488int set_memory_nx(unsigned long addr, int numpages); 488int set_memory_nx(unsigned long addr, int numpages);
489 489
490void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
491 void *kaddr, unsigned long len);
490#endif 492#endif
diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/kernel/uprobes.c
index f9bacee973bf..56adf9c1fde0 100644
--- a/arch/arm/kernel/uprobes.c
+++ b/arch/arm/kernel/uprobes.c
@@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
113 return 0; 113 return 0;
114} 114}
115 115
116void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
117 void *src, unsigned long len)
118{
119 void *xol_page_kaddr = kmap_atomic(page);
120 void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
121
122 preempt_disable();
123
124 /* Initialize the slot */
125 memcpy(dst, src, len);
126
127 /* flush caches (dcache/icache) */
128 flush_uprobe_xol_access(page, vaddr, dst, len);
129
130 preempt_enable();
131
132 kunmap_atomic(xol_page_kaddr);
133}
134
135
116int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 136int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
117{ 137{
118 struct uprobe_task *utask = current->utask; 138 struct uprobe_task *utask = current->utask;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 3387e60e4ea3..43d54f5b26b9 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -104,17 +104,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
104#define flush_icache_alias(pfn,vaddr,len) do { } while (0) 104#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
105#endif 105#endif
106 106
107#define FLAG_PA_IS_EXEC 1
108#define FLAG_PA_CORE_IN_MM 2
109
107static void flush_ptrace_access_other(void *args) 110static void flush_ptrace_access_other(void *args)
108{ 111{
109 __flush_icache_all(); 112 __flush_icache_all();
110} 113}
111 114
112static 115static inline
113void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 116void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
114 unsigned long uaddr, void *kaddr, unsigned long len) 117 unsigned long len, unsigned int flags)
115{ 118{
116 if (cache_is_vivt()) { 119 if (cache_is_vivt()) {
117 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 120 if (flags & FLAG_PA_CORE_IN_MM) {
118 unsigned long addr = (unsigned long)kaddr; 121 unsigned long addr = (unsigned long)kaddr;
119 __cpuc_coherent_kern_range(addr, addr + len); 122 __cpuc_coherent_kern_range(addr, addr + len);
120 } 123 }
@@ -128,7 +131,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
128 } 131 }
129 132
130 /* VIPT non-aliasing D-cache */ 133 /* VIPT non-aliasing D-cache */
131 if (vma->vm_flags & VM_EXEC) { 134 if (flags & FLAG_PA_IS_EXEC) {
132 unsigned long addr = (unsigned long)kaddr; 135 unsigned long addr = (unsigned long)kaddr;
133 if (icache_is_vipt_aliasing()) 136 if (icache_is_vipt_aliasing())
134 flush_icache_alias(page_to_pfn(page), uaddr, len); 137 flush_icache_alias(page_to_pfn(page), uaddr, len);
@@ -140,6 +143,26 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
140 } 143 }
141} 144}
142 145
146static
147void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
148 unsigned long uaddr, void *kaddr, unsigned long len)
149{
150 unsigned int flags = 0;
151 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
152 flags |= FLAG_PA_CORE_IN_MM;
153 if (vma->vm_flags & VM_EXEC)
154 flags |= FLAG_PA_IS_EXEC;
155 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
156}
157
158void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
159 void *kaddr, unsigned long len)
160{
161 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
162
163 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
164}
165
143/* 166/*
144 * Copy user data from/to a page which is mapped into a different 167 * Copy user data from/to a page which is mapped into a different
145 * processes address space. Really, we want to allow our "user 168 * processes address space. Really, we want to allow our "user
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index edff2b97b864..c52f827ba6ce 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -32,6 +32,7 @@ struct vm_area_struct;
32struct mm_struct; 32struct mm_struct;
33struct inode; 33struct inode;
34struct notifier_block; 34struct notifier_block;
35struct page;
35 36
36#define UPROBE_HANDLER_REMOVE 1 37#define UPROBE_HANDLER_REMOVE 1
37#define UPROBE_HANDLER_MASK 1 38#define UPROBE_HANDLER_MASK 1
@@ -127,6 +128,8 @@ extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned l
127extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 128extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
128extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 129extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
129extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 130extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
131extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
132 void *src, unsigned long len);
130#else /* !CONFIG_UPROBES */ 133#else /* !CONFIG_UPROBES */
131struct uprobes_state { 134struct uprobes_state {
132}; 135};
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 04709b66369d..4968213c63fa 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1296,14 +1296,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1296 if (unlikely(!xol_vaddr)) 1296 if (unlikely(!xol_vaddr))
1297 return 0; 1297 return 0;
1298 1298
1299 /* Initialize the slot */ 1299 arch_uprobe_copy_ixol(area->page, xol_vaddr,
1300 copy_to_page(area->page, xol_vaddr, 1300 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1301 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1302 /*
1303 * We probably need flush_icache_user_range() but it needs vma.
1304 * This should work on supported architectures too.
1305 */
1306 flush_dcache_page(area->page);
1307 1301
1308 return xol_vaddr; 1302 return xol_vaddr;
1309} 1303}
@@ -1346,6 +1340,21 @@ static void xol_free_insn_slot(struct task_struct *tsk)
1346 } 1340 }
1347} 1341}
1348 1342
1343void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1344 void *src, unsigned long len)
1345{
1346 /* Initialize the slot */
1347 copy_to_page(page, vaddr, src, len);
1348
1349 /*
1350 * We probably need flush_icache_user_range() but it needs vma.
1351 * This should work on most of architectures by default. If
1352 * architecture needs to do something different it can define
1353 * its own version of the function.
1354 */
1355 flush_dcache_page(page);
1356}
1357
1349/** 1358/**
1350 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1359 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1351 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1360 * @regs: Reflects the saved state of the task after it has hit a breakpoint