diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-03-24 13:58:04 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2013-04-04 07:57:05 -0400 |
commit | 5669ccee21d87622f30a724b3fe0d04ec5b0afae (patch) | |
tree | b8eda48d364795c9ccb07c9df9f29fbf9fa7199e /kernel/events | |
parent | 98763a1bb1515f8a8d7f1d9ae42604e19872364b (diff) |
uprobes: Introduce copy_to_page()
Extract the kmap_atomic/memcpy/kunmap_atomic code from
xol_get_insn_slot() into the new simple helper, copy_to_page().
It will have more users soon.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Anton Arapov <anton@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/uprobes.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 093866547fe3..b8255eaca190 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -194,6 +194,13 @@ static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, in | |||
194 | kunmap_atomic(kaddr); | 194 | kunmap_atomic(kaddr); |
195 | } | 195 | } |
196 | 196 | ||
197 | static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) | ||
198 | { | ||
199 | void *kaddr = kmap_atomic(page); | ||
200 | memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); | ||
201 | kunmap_atomic(kaddr); | ||
202 | } | ||
203 | |||
197 | static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) | 204 | static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) |
198 | { | 205 | { |
199 | uprobe_opcode_t old_opcode; | 206 | uprobe_opcode_t old_opcode; |
@@ -1227,9 +1234,7 @@ static unsigned long xol_take_insn_slot(struct xol_area *area) | |||
1227 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe) | 1234 | static unsigned long xol_get_insn_slot(struct uprobe *uprobe) |
1228 | { | 1235 | { |
1229 | struct xol_area *area; | 1236 | struct xol_area *area; |
1230 | unsigned long offset; | ||
1231 | unsigned long xol_vaddr; | 1237 | unsigned long xol_vaddr; |
1232 | void *vaddr; | ||
1233 | 1238 | ||
1234 | area = get_xol_area(); | 1239 | area = get_xol_area(); |
1235 | if (!area) | 1240 | if (!area) |
@@ -1240,10 +1245,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) | |||
1240 | return 0; | 1245 | return 0; |
1241 | 1246 | ||
1242 | /* Initialize the slot */ | 1247 | /* Initialize the slot */ |
1243 | offset = xol_vaddr & ~PAGE_MASK; | 1248 | copy_to_page(area->page, xol_vaddr, uprobe->arch.insn, MAX_UINSN_BYTES); |
1244 | vaddr = kmap_atomic(area->page); | ||
1245 | memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES); | ||
1246 | kunmap_atomic(vaddr); | ||
1247 | /* | 1249 | /* |
1248 | * We probably need flush_icache_user_range() but it needs vma. | 1250 | * We probably need flush_icache_user_range() but it needs vma. |
1249 | * This should work on supported architectures too. | 1251 | * This should work on supported architectures too. |