diff options
author | Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 2012-02-22 04:16:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-22 05:26:09 -0500 |
commit | 3ff54efdfaace9e9b2b7c1959a865be6b91de96c (patch) | |
tree | 6218c7b40bd80704d059d5f3fa434974e675b070 | |
parent | 96379f60075c75b261328aa7830ef8aa158247ac (diff) |
uprobes/core: Move insn to arch specific structure
Few cleanups suggested by Ingo Molnar.
- Rename struct uprobe_arch_info to struct arch_uprobe.
- Move insn from struct uprobe to struct arch_uprobe.
- Make arch specific uprobe functions to accept struct arch_uprobe
instead of struct uprobe.
- Move struct uprobe to kernel/uprobes.c from include/linux/uprobes.h
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Anton Arapov <anton@redhat.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@linux.vnet.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Josh Stone <jistone@redhat.com>
Link: http://lkml.kernel.org/r/20120222091602.15880.40249.sendpatchset@srdronam.in.ibm.com
[ Made various small improvements ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/include/asm/uprobes.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/uprobes.c | 60 | ||||
-rw-r--r-- | include/linux/uprobes.h | 23 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 38 |
4 files changed, 61 insertions, 66 deletions
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 072df3902636..f7ce310a429d 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h | |||
@@ -31,13 +31,13 @@ typedef u8 uprobe_opcode_t; | |||
31 | #define UPROBES_BKPT_INSN 0xcc | 31 | #define UPROBES_BKPT_INSN 0xcc |
32 | #define UPROBES_BKPT_INSN_SIZE 1 | 32 | #define UPROBES_BKPT_INSN_SIZE 1 |
33 | 33 | ||
34 | struct uprobe_arch_info { | 34 | struct arch_uprobe { |
35 | u16 fixups; | 35 | u16 fixups; |
36 | u8 insn[MAX_UINSN_BYTES]; | ||
36 | #ifdef CONFIG_X86_64 | 37 | #ifdef CONFIG_X86_64 |
37 | unsigned long rip_rela_target_address; | 38 | unsigned long rip_rela_target_address; |
38 | #endif | 39 | #endif |
39 | }; | 40 | }; |
40 | 41 | ||
41 | struct uprobe; | 42 | extern int arch_uprobes_analyze_insn(struct mm_struct *mm, struct arch_uprobe *arch_uprobe); |
42 | extern int arch_uprobes_analyze_insn(struct mm_struct *mm, struct uprobe *uprobe); | ||
43 | #endif /* _ASM_UPROBES_H */ | 43 | #endif /* _ASM_UPROBES_H */ |
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 13d616d6519b..04dfcef2d028 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -200,9 +200,9 @@ static bool is_prefix_bad(struct insn *insn) | |||
200 | return false; | 200 | return false; |
201 | } | 201 | } |
202 | 202 | ||
203 | static int validate_insn_32bits(struct uprobe *uprobe, struct insn *insn) | 203 | static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn) |
204 | { | 204 | { |
205 | insn_init(insn, uprobe->insn, false); | 205 | insn_init(insn, auprobe->insn, false); |
206 | 206 | ||
207 | /* Skip good instruction prefixes; reject "bad" ones. */ | 207 | /* Skip good instruction prefixes; reject "bad" ones. */ |
208 | insn_get_opcode(insn); | 208 | insn_get_opcode(insn); |
@@ -222,11 +222,11 @@ static int validate_insn_32bits(struct uprobe *uprobe, struct insn *insn) | |||
222 | 222 | ||
223 | /* | 223 | /* |
224 | * Figure out which fixups post_xol() will need to perform, and annotate | 224 | * Figure out which fixups post_xol() will need to perform, and annotate |
225 | * uprobe->arch_info.fixups accordingly. To start with, | 225 | * arch_uprobe->fixups accordingly. To start with, |
226 | * uprobe->arch_info.fixups is either zero or it reflects rip-related | 226 | * arch_uprobe->fixups is either zero or it reflects rip-related |
227 | * fixups. | 227 | * fixups. |
228 | */ | 228 | */ |
229 | static void prepare_fixups(struct uprobe *uprobe, struct insn *insn) | 229 | static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn) |
230 | { | 230 | { |
231 | bool fix_ip = true, fix_call = false; /* defaults */ | 231 | bool fix_ip = true, fix_call = false; /* defaults */ |
232 | int reg; | 232 | int reg; |
@@ -269,17 +269,17 @@ static void prepare_fixups(struct uprobe *uprobe, struct insn *insn) | |||
269 | break; | 269 | break; |
270 | } | 270 | } |
271 | if (fix_ip) | 271 | if (fix_ip) |
272 | uprobe->arch_info.fixups |= UPROBES_FIX_IP; | 272 | auprobe->fixups |= UPROBES_FIX_IP; |
273 | if (fix_call) | 273 | if (fix_call) |
274 | uprobe->arch_info.fixups |= UPROBES_FIX_CALL; | 274 | auprobe->fixups |= UPROBES_FIX_CALL; |
275 | } | 275 | } |
276 | 276 | ||
277 | #ifdef CONFIG_X86_64 | 277 | #ifdef CONFIG_X86_64 |
278 | /* | 278 | /* |
279 | * If uprobe->insn doesn't use rip-relative addressing, return | 279 | * If arch_uprobe->insn doesn't use rip-relative addressing, return |
280 | * immediately. Otherwise, rewrite the instruction so that it accesses | 280 | * immediately. Otherwise, rewrite the instruction so that it accesses |
281 | * its memory operand indirectly through a scratch register. Set | 281 | * its memory operand indirectly through a scratch register. Set |
282 | * uprobe->arch_info.fixups and uprobe->arch_info.rip_rela_target_address | 282 | * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address |
283 | * accordingly. (The contents of the scratch register will be saved | 283 | * accordingly. (The contents of the scratch register will be saved |
284 | * before we single-step the modified instruction, and restored | 284 | * before we single-step the modified instruction, and restored |
285 | * afterward.) | 285 | * afterward.) |
@@ -297,7 +297,7 @@ static void prepare_fixups(struct uprobe *uprobe, struct insn *insn) | |||
297 | * - There's never a SIB byte. | 297 | * - There's never a SIB byte. |
298 | * - The displacement is always 4 bytes. | 298 | * - The displacement is always 4 bytes. |
299 | */ | 299 | */ |
300 | static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn) | 300 | static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn) |
301 | { | 301 | { |
302 | u8 *cursor; | 302 | u8 *cursor; |
303 | u8 reg; | 303 | u8 reg; |
@@ -305,7 +305,7 @@ static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, stru | |||
305 | if (mm->context.ia32_compat) | 305 | if (mm->context.ia32_compat) |
306 | return; | 306 | return; |
307 | 307 | ||
308 | uprobe->arch_info.rip_rela_target_address = 0x0; | 308 | auprobe->rip_rela_target_address = 0x0; |
309 | if (!insn_rip_relative(insn)) | 309 | if (!insn_rip_relative(insn)) |
310 | return; | 310 | return; |
311 | 311 | ||
@@ -315,7 +315,7 @@ static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, stru | |||
315 | * we want to encode rax/rcx, not r8/r9. | 315 | * we want to encode rax/rcx, not r8/r9. |
316 | */ | 316 | */ |
317 | if (insn->rex_prefix.nbytes) { | 317 | if (insn->rex_prefix.nbytes) { |
318 | cursor = uprobe->insn + insn_offset_rex_prefix(insn); | 318 | cursor = auprobe->insn + insn_offset_rex_prefix(insn); |
319 | *cursor &= 0xfe; /* Clearing REX.B bit */ | 319 | *cursor &= 0xfe; /* Clearing REX.B bit */ |
320 | } | 320 | } |
321 | 321 | ||
@@ -324,7 +324,7 @@ static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, stru | |||
324 | * displacement. Beyond the displacement, for some instructions, | 324 | * displacement. Beyond the displacement, for some instructions, |
325 | * is the immediate operand. | 325 | * is the immediate operand. |
326 | */ | 326 | */ |
327 | cursor = uprobe->insn + insn_offset_modrm(insn); | 327 | cursor = auprobe->insn + insn_offset_modrm(insn); |
328 | insn_get_length(insn); | 328 | insn_get_length(insn); |
329 | 329 | ||
330 | /* | 330 | /* |
@@ -341,18 +341,18 @@ static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, stru | |||
341 | * is NOT the register operand, so we use %rcx (register | 341 | * is NOT the register operand, so we use %rcx (register |
342 | * #1) for the scratch register. | 342 | * #1) for the scratch register. |
343 | */ | 343 | */ |
344 | uprobe->arch_info.fixups = UPROBES_FIX_RIP_CX; | 344 | auprobe->fixups = UPROBES_FIX_RIP_CX; |
345 | /* Change modrm from 00 000 101 to 00 000 001. */ | 345 | /* Change modrm from 00 000 101 to 00 000 001. */ |
346 | *cursor = 0x1; | 346 | *cursor = 0x1; |
347 | } else { | 347 | } else { |
348 | /* Use %rax (register #0) for the scratch register. */ | 348 | /* Use %rax (register #0) for the scratch register. */ |
349 | uprobe->arch_info.fixups = UPROBES_FIX_RIP_AX; | 349 | auprobe->fixups = UPROBES_FIX_RIP_AX; |
350 | /* Change modrm from 00 xxx 101 to 00 xxx 000 */ | 350 | /* Change modrm from 00 xxx 101 to 00 xxx 000 */ |
351 | *cursor = (reg << 3); | 351 | *cursor = (reg << 3); |
352 | } | 352 | } |
353 | 353 | ||
354 | /* Target address = address of next instruction + (signed) offset */ | 354 | /* Target address = address of next instruction + (signed) offset */ |
355 | uprobe->arch_info.rip_rela_target_address = (long)insn->length + insn->displacement.value; | 355 | auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value; |
356 | 356 | ||
357 | /* Displacement field is gone; slide immediate field (if any) over. */ | 357 | /* Displacement field is gone; slide immediate field (if any) over. */ |
358 | if (insn->immediate.nbytes) { | 358 | if (insn->immediate.nbytes) { |
@@ -362,9 +362,9 @@ static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, stru | |||
362 | return; | 362 | return; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int validate_insn_64bits(struct uprobe *uprobe, struct insn *insn) | 365 | static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn) |
366 | { | 366 | { |
367 | insn_init(insn, uprobe->insn, true); | 367 | insn_init(insn, auprobe->insn, true); |
368 | 368 | ||
369 | /* Skip good instruction prefixes; reject "bad" ones. */ | 369 | /* Skip good instruction prefixes; reject "bad" ones. */ |
370 | insn_get_opcode(insn); | 370 | insn_get_opcode(insn); |
@@ -381,42 +381,42 @@ static int validate_insn_64bits(struct uprobe *uprobe, struct insn *insn) | |||
381 | return -ENOTSUPP; | 381 | return -ENOTSUPP; |
382 | } | 382 | } |
383 | 383 | ||
384 | static int validate_insn_bits(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn) | 384 | static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn) |
385 | { | 385 | { |
386 | if (mm->context.ia32_compat) | 386 | if (mm->context.ia32_compat) |
387 | return validate_insn_32bits(uprobe, insn); | 387 | return validate_insn_32bits(auprobe, insn); |
388 | return validate_insn_64bits(uprobe, insn); | 388 | return validate_insn_64bits(auprobe, insn); |
389 | } | 389 | } |
390 | #else /* 32-bit: */ | 390 | #else /* 32-bit: */ |
391 | static void handle_riprel_insn(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn) | 391 | static void handle_riprel_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn) |
392 | { | 392 | { |
393 | /* No RIP-relative addressing on 32-bit */ | 393 | /* No RIP-relative addressing on 32-bit */ |
394 | } | 394 | } |
395 | 395 | ||
396 | static int validate_insn_bits(struct mm_struct *mm, struct uprobe *uprobe, struct insn *insn) | 396 | static int validate_insn_bits(struct mm_struct *mm, struct arch_uprobe *auprobe, struct insn *insn) |
397 | { | 397 | { |
398 | return validate_insn_32bits(uprobe, insn); | 398 | return validate_insn_32bits(auprobe, insn); |
399 | } | 399 | } |
400 | #endif /* CONFIG_X86_64 */ | 400 | #endif /* CONFIG_X86_64 */ |
401 | 401 | ||
402 | /** | 402 | /** |
403 | * arch_uprobes_analyze_insn - instruction analysis including validity and fixups. | 403 | * arch_uprobes_analyze_insn - instruction analysis including validity and fixups. |
404 | * @mm: the probed address space. | 404 | * @mm: the probed address space. |
405 | * @uprobe: the probepoint information. | 405 | * @arch_uprobe: the probepoint information. |
406 | * Return 0 on success or a -ve number on error. | 406 | * Return 0 on success or a -ve number on error. |
407 | */ | 407 | */ |
408 | int arch_uprobes_analyze_insn(struct mm_struct *mm, struct uprobe *uprobe) | 408 | int arch_uprobes_analyze_insn(struct mm_struct *mm, struct arch_uprobe *auprobe) |
409 | { | 409 | { |
410 | int ret; | 410 | int ret; |
411 | struct insn insn; | 411 | struct insn insn; |
412 | 412 | ||
413 | uprobe->arch_info.fixups = 0; | 413 | auprobe->fixups = 0; |
414 | ret = validate_insn_bits(mm, uprobe, &insn); | 414 | ret = validate_insn_bits(mm, auprobe, &insn); |
415 | if (ret != 0) | 415 | if (ret != 0) |
416 | return ret; | 416 | return ret; |
417 | 417 | ||
418 | handle_riprel_insn(mm, uprobe, &insn); | 418 | handle_riprel_insn(mm, auprobe, &insn); |
419 | prepare_fixups(uprobe, &insn); | 419 | prepare_fixups(auprobe, &insn); |
420 | 420 | ||
421 | return 0; | 421 | return 0; |
422 | } | 422 | } |
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index fd45b70750d4..9c6be62787ed 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
@@ -29,12 +29,6 @@ | |||
29 | struct vm_area_struct; | 29 | struct vm_area_struct; |
30 | #ifdef CONFIG_ARCH_SUPPORTS_UPROBES | 30 | #ifdef CONFIG_ARCH_SUPPORTS_UPROBES |
31 | #include <asm/uprobes.h> | 31 | #include <asm/uprobes.h> |
32 | #else | ||
33 | |||
34 | typedef u8 uprobe_opcode_t; | ||
35 | struct uprobe_arch_info {}; | ||
36 | |||
37 | #define MAX_UINSN_BYTES 4 | ||
38 | #endif | 32 | #endif |
39 | 33 | ||
40 | /* flags that denote/change uprobes behaviour */ | 34 | /* flags that denote/change uprobes behaviour */ |
@@ -56,22 +50,9 @@ struct uprobe_consumer { | |||
56 | struct uprobe_consumer *next; | 50 | struct uprobe_consumer *next; |
57 | }; | 51 | }; |
58 | 52 | ||
59 | struct uprobe { | ||
60 | struct rb_node rb_node; /* node in the rb tree */ | ||
61 | atomic_t ref; | ||
62 | struct rw_semaphore consumer_rwsem; | ||
63 | struct list_head pending_list; | ||
64 | struct uprobe_arch_info arch_info; | ||
65 | struct uprobe_consumer *consumers; | ||
66 | struct inode *inode; /* Also hold a ref to inode */ | ||
67 | loff_t offset; | ||
68 | int flags; | ||
69 | u8 insn[MAX_UINSN_BYTES]; | ||
70 | }; | ||
71 | |||
72 | #ifdef CONFIG_UPROBES | 53 | #ifdef CONFIG_UPROBES |
73 | extern int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr); | 54 | extern int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr); |
74 | extern int __weak set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr, bool verify); | 55 | extern int __weak set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify); |
75 | extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn); | 56 | extern bool __weak is_bkpt_insn(uprobe_opcode_t *insn); |
76 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer); | 57 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer); |
77 | extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer); | 58 | extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *consumer); |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index ee496ad95db3..13f1b5909af4 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -65,6 +65,18 @@ struct vma_info { | |||
65 | loff_t vaddr; | 65 | loff_t vaddr; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct uprobe { | ||
69 | struct rb_node rb_node; /* node in the rb tree */ | ||
70 | atomic_t ref; | ||
71 | struct rw_semaphore consumer_rwsem; | ||
72 | struct list_head pending_list; | ||
73 | struct uprobe_consumer *consumers; | ||
74 | struct inode *inode; /* Also hold a ref to inode */ | ||
75 | loff_t offset; | ||
76 | int flags; | ||
77 | struct arch_uprobe arch; | ||
78 | }; | ||
79 | |||
68 | /* | 80 | /* |
69 | * valid_vma: Verify if the specified vma is an executable vma | 81 | * valid_vma: Verify if the specified vma is an executable vma |
70 | * Relax restrictions while unregistering: vm_flags might have | 82 | * Relax restrictions while unregistering: vm_flags might have |
@@ -180,7 +192,7 @@ bool __weak is_bkpt_insn(uprobe_opcode_t *insn) | |||
180 | /* | 192 | /* |
181 | * write_opcode - write the opcode at a given virtual address. | 193 | * write_opcode - write the opcode at a given virtual address. |
182 | * @mm: the probed process address space. | 194 | * @mm: the probed process address space. |
183 | * @uprobe: the breakpointing information. | 195 | * @arch_uprobe: the breakpointing information. |
184 | * @vaddr: the virtual address to store the opcode. | 196 | * @vaddr: the virtual address to store the opcode. |
185 | * @opcode: opcode to be written at @vaddr. | 197 | * @opcode: opcode to be written at @vaddr. |
186 | * | 198 | * |
@@ -190,13 +202,14 @@ bool __weak is_bkpt_insn(uprobe_opcode_t *insn) | |||
190 | * For mm @mm, write the opcode at @vaddr. | 202 | * For mm @mm, write the opcode at @vaddr. |
191 | * Return 0 (success) or a negative errno. | 203 | * Return 0 (success) or a negative errno. |
192 | */ | 204 | */ |
193 | static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe, | 205 | static int write_opcode(struct mm_struct *mm, struct arch_uprobe *auprobe, |
194 | unsigned long vaddr, uprobe_opcode_t opcode) | 206 | unsigned long vaddr, uprobe_opcode_t opcode) |
195 | { | 207 | { |
196 | struct page *old_page, *new_page; | 208 | struct page *old_page, *new_page; |
197 | struct address_space *mapping; | 209 | struct address_space *mapping; |
198 | void *vaddr_old, *vaddr_new; | 210 | void *vaddr_old, *vaddr_new; |
199 | struct vm_area_struct *vma; | 211 | struct vm_area_struct *vma; |
212 | struct uprobe *uprobe; | ||
200 | loff_t addr; | 213 | loff_t addr; |
201 | int ret; | 214 | int ret; |
202 | 215 | ||
@@ -216,6 +229,7 @@ static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe, | |||
216 | if (!valid_vma(vma, is_bkpt_insn(&opcode))) | 229 | if (!valid_vma(vma, is_bkpt_insn(&opcode))) |
217 | goto put_out; | 230 | goto put_out; |
218 | 231 | ||
232 | uprobe = container_of(auprobe, struct uprobe, arch); | ||
219 | mapping = uprobe->inode->i_mapping; | 233 | mapping = uprobe->inode->i_mapping; |
220 | if (mapping != vma->vm_file->f_mapping) | 234 | if (mapping != vma->vm_file->f_mapping) |
221 | goto put_out; | 235 | goto put_out; |
@@ -326,7 +340,7 @@ static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr) | |||
326 | * For mm @mm, store the breakpoint instruction at @vaddr. | 340 | * For mm @mm, store the breakpoint instruction at @vaddr. |
327 | * Return 0 (success) or a negative errno. | 341 | * Return 0 (success) or a negative errno. |
328 | */ | 342 | */ |
329 | int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr) | 343 | int __weak set_bkpt(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr) |
330 | { | 344 | { |
331 | int result; | 345 | int result; |
332 | 346 | ||
@@ -337,7 +351,7 @@ int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long v | |||
337 | if (result) | 351 | if (result) |
338 | return result; | 352 | return result; |
339 | 353 | ||
340 | return write_opcode(mm, uprobe, vaddr, UPROBES_BKPT_INSN); | 354 | return write_opcode(mm, auprobe, vaddr, UPROBES_BKPT_INSN); |
341 | } | 355 | } |
342 | 356 | ||
343 | /** | 357 | /** |
@@ -351,7 +365,7 @@ int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe, unsigned long v | |||
351 | * Return 0 (success) or a negative errno. | 365 | * Return 0 (success) or a negative errno. |
352 | */ | 366 | */ |
353 | int __weak | 367 | int __weak |
354 | set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr, bool verify) | 368 | set_orig_insn(struct mm_struct *mm, struct arch_uprobe *auprobe, unsigned long vaddr, bool verify) |
355 | { | 369 | { |
356 | if (verify) { | 370 | if (verify) { |
357 | int result; | 371 | int result; |
@@ -363,7 +377,7 @@ set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe, unsigned long vaddr, | |||
363 | if (result != 1) | 377 | if (result != 1) |
364 | return result; | 378 | return result; |
365 | } | 379 | } |
366 | return write_opcode(mm, uprobe, vaddr, *(uprobe_opcode_t *)uprobe->insn); | 380 | return write_opcode(mm, auprobe, vaddr, *(uprobe_opcode_t *)auprobe->insn); |
367 | } | 381 | } |
368 | 382 | ||
369 | static int match_uprobe(struct uprobe *l, struct uprobe *r) | 383 | static int match_uprobe(struct uprobe *l, struct uprobe *r) |
@@ -593,13 +607,13 @@ static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned | |||
593 | 607 | ||
594 | /* Instruction at the page-boundary; copy bytes in second page */ | 608 | /* Instruction at the page-boundary; copy bytes in second page */ |
595 | if (nbytes < bytes) { | 609 | if (nbytes < bytes) { |
596 | if (__copy_insn(mapping, vma, uprobe->insn + nbytes, | 610 | if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes, |
597 | bytes - nbytes, uprobe->offset + nbytes)) | 611 | bytes - nbytes, uprobe->offset + nbytes)) |
598 | return -ENOMEM; | 612 | return -ENOMEM; |
599 | 613 | ||
600 | bytes = nbytes; | 614 | bytes = nbytes; |
601 | } | 615 | } |
602 | return __copy_insn(mapping, vma, uprobe->insn, bytes, uprobe->offset); | 616 | return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset); |
603 | } | 617 | } |
604 | 618 | ||
605 | static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, | 619 | static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, |
@@ -625,23 +639,23 @@ static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, | |||
625 | if (ret) | 639 | if (ret) |
626 | return ret; | 640 | return ret; |
627 | 641 | ||
628 | if (is_bkpt_insn((uprobe_opcode_t *)uprobe->insn)) | 642 | if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn)) |
629 | return -EEXIST; | 643 | return -EEXIST; |
630 | 644 | ||
631 | ret = arch_uprobes_analyze_insn(mm, uprobe); | 645 | ret = arch_uprobes_analyze_insn(mm, &uprobe->arch); |
632 | if (ret) | 646 | if (ret) |
633 | return ret; | 647 | return ret; |
634 | 648 | ||
635 | uprobe->flags |= UPROBES_COPY_INSN; | 649 | uprobe->flags |= UPROBES_COPY_INSN; |
636 | } | 650 | } |
637 | ret = set_bkpt(mm, uprobe, addr); | 651 | ret = set_bkpt(mm, &uprobe->arch, addr); |
638 | 652 | ||
639 | return ret; | 653 | return ret; |
640 | } | 654 | } |
641 | 655 | ||
642 | static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr) | 656 | static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe, loff_t vaddr) |
643 | { | 657 | { |
644 | set_orig_insn(mm, uprobe, (unsigned long)vaddr, true); | 658 | set_orig_insn(mm, &uprobe->arch, (unsigned long)vaddr, true); |
645 | } | 659 | } |
646 | 660 | ||
647 | static void delete_uprobe(struct uprobe *uprobe) | 661 | static void delete_uprobe(struct uprobe *uprobe) |