diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-03-31 09:16:22 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2014-04-17 15:58:16 -0400 |
commit | ddb69f276c4af8bb47ad4f24a72f72ddf58c228a (patch) | |
tree | 6d4daad064b45ec6be22199a84ffef9eccda25b5 | |
parent | 8a6b173287bb94b3ef8360119020e856afb1c934 (diff) |
uprobes/x86: Fold prepare_fixups() into arch_uprobe_analyze_insn()
No functional changes, preparation.
Shift the code from prepare_fixups() to arch_uprobe_analyze_insn()
with the following modifications:
- Do not call insn_get_opcode() again, it was already called
by validate_insn_bits().
- Move "case 0xea" up. This way "case 0xff" can fall through
to default case.
- change "case 0xff" to use the nested "switch (MODRM_REG)",
this way the code looks a bit simpler.
- Make the comments look consistent.
While at it, kill the initialization of rip_rela_target_address and
->fixups, we can rely on kzalloc(). We will add the new members into
arch_uprobe, it would be better to assume that everything is zero by
default.
TODO: cleanup/fix the mess in validate_insn_bits() paths:
- validate_insn_64bits() and validate_insn_32bits() should be
unified.
- "ifdef" is not used consistently; if good_insns_64 depends
on CONFIG_X86_64, then probably good_insns_32 should depend
on CONFIG_X86_32/EMULATION
- the usage of mm->context.ia32_compat looks wrong if the task
is TIF_X32.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Jim Keniston <jkenisto@us.ibm.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
-rw-r--r-- | arch/x86/kernel/uprobes.c | 110 |
1 files changed, 47 insertions, 63 deletions
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 2ed845928b5f..098e56ec7954 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #define OPCODE1(insn) ((insn)->opcode.bytes[0]) | 53 | #define OPCODE1(insn) ((insn)->opcode.bytes[0]) |
54 | #define OPCODE2(insn) ((insn)->opcode.bytes[1]) | 54 | #define OPCODE2(insn) ((insn)->opcode.bytes[1]) |
55 | #define OPCODE3(insn) ((insn)->opcode.bytes[2]) | 55 | #define OPCODE3(insn) ((insn)->opcode.bytes[2]) |
56 | #define MODRM_REG(insn) X86_MODRM_REG(insn->modrm.value) | 56 | #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value) |
57 | 57 | ||
58 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ | 58 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
59 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ | 59 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
@@ -229,63 +229,6 @@ static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn) | |||
229 | return -ENOTSUPP; | 229 | return -ENOTSUPP; |
230 | } | 230 | } |
231 | 231 | ||
232 | /* | ||
233 | * Figure out which fixups arch_uprobe_post_xol() will need to perform, and | ||
234 | * annotate arch_uprobe->fixups accordingly. To start with, | ||
235 | * arch_uprobe->fixups is either zero or it reflects rip-related fixups. | ||
236 | */ | ||
237 | static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn) | ||
238 | { | ||
239 | bool fix_ip = true, fix_call = false; /* defaults */ | ||
240 | int reg; | ||
241 | |||
242 | insn_get_opcode(insn); /* should be a nop */ | ||
243 | |||
244 | switch (OPCODE1(insn)) { | ||
245 | case 0x9d: | ||
246 | /* popf */ | ||
247 | auprobe->fixups |= UPROBE_FIX_SETF; | ||
248 | break; | ||
249 | case 0xc3: /* ret/lret */ | ||
250 | case 0xcb: | ||
251 | case 0xc2: | ||
252 | case 0xca: | ||
253 | /* ip is correct */ | ||
254 | fix_ip = false; | ||
255 | break; | ||
256 | case 0xe8: /* call relative - Fix return addr */ | ||
257 | fix_call = true; | ||
258 | break; | ||
259 | case 0x9a: /* call absolute - Fix return addr, not ip */ | ||
260 | fix_call = true; | ||
261 | fix_ip = false; | ||
262 | break; | ||
263 | case 0xff: | ||
264 | insn_get_modrm(insn); | ||
265 | reg = MODRM_REG(insn); | ||
266 | if (reg == 2 || reg == 3) { | ||
267 | /* call or lcall, indirect */ | ||
268 | /* Fix return addr; ip is correct. */ | ||
269 | fix_call = true; | ||
270 | fix_ip = false; | ||
271 | } else if (reg == 4 || reg == 5) { | ||
272 | /* jmp or ljmp, indirect */ | ||
273 | /* ip is correct. */ | ||
274 | fix_ip = false; | ||
275 | } | ||
276 | break; | ||
277 | case 0xea: /* jmp absolute -- ip is correct */ | ||
278 | fix_ip = false; | ||
279 | break; | ||
280 | default: | ||
281 | break; | ||
282 | } | ||
283 | if (fix_ip) | ||
284 | auprobe->fixups |= UPROBE_FIX_IP; | ||
285 | if (fix_call) | ||
286 | auprobe->fixups |= UPROBE_FIX_CALL; | ||
287 | } | ||
288 | |||
289 | #ifdef CONFIG_X86_64 | 232 | #ifdef CONFIG_X86_64 |
290 | /* | 233 | /* |
291 | * If arch_uprobe->insn doesn't use rip-relative addressing, return | 234 | * If arch_uprobe->insn doesn't use rip-relative addressing, return |
@@ -318,7 +261,6 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct ins | |||
318 | if (mm->context.ia32_compat) | 261 | if (mm->context.ia32_compat) |
319 | return; | 262 | return; |
320 | 263 | ||
321 | auprobe->rip_rela_target_address = 0x0; | ||
322 | if (!insn_rip_relative(insn)) | 264 | if (!insn_rip_relative(insn)) |
323 | return; | 265 | return; |
324 | 266 | ||
@@ -421,16 +363,58 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, | |||
421 | */ | 363 | */ |
422 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) | 364 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) |
423 | { | 365 | { |
424 | int ret; | ||
425 | struct insn insn; | 366 | struct insn insn; |
367 | bool fix_ip = true, fix_call = false; | ||
368 | int ret; | ||
426 | 369 | ||
427 | auprobe->fixups = 0; | ||
428 | ret = validate_insn_bits(auprobe, mm, &insn); | 370 | ret = validate_insn_bits(auprobe, mm, &insn); |
429 | if (ret != 0) | 371 | if (ret) |
430 | return ret; | 372 | return ret; |
431 | 373 | ||
374 | /* | ||
375 | * Figure out which fixups arch_uprobe_post_xol() will need to perform, | ||
376 | * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups | ||
377 | * is either zero or it reflects rip-related fixups. | ||
378 | */ | ||
432 | handle_riprel_insn(auprobe, mm, &insn); | 379 | handle_riprel_insn(auprobe, mm, &insn); |
433 | prepare_fixups(auprobe, &insn); | 380 | |
381 | switch (OPCODE1(&insn)) { | ||
382 | case 0x9d: /* popf */ | ||
383 | auprobe->fixups |= UPROBE_FIX_SETF; | ||
384 | break; | ||
385 | case 0xc3: /* ret or lret -- ip is correct */ | ||
386 | case 0xcb: | ||
387 | case 0xc2: | ||
388 | case 0xca: | ||
389 | fix_ip = false; | ||
390 | break; | ||
391 | case 0xe8: /* call relative - Fix return addr */ | ||
392 | fix_call = true; | ||
393 | break; | ||
394 | case 0x9a: /* call absolute - Fix return addr, not ip */ | ||
395 | fix_call = true; | ||
396 | fix_ip = false; | ||
397 | break; | ||
398 | case 0xea: /* jmp absolute -- ip is correct */ | ||
399 | fix_ip = false; | ||
400 | break; | ||
401 | case 0xff: | ||
402 | insn_get_modrm(&insn); | ||
403 | switch (MODRM_REG(&insn)) { | ||
404 | case 2: case 3: /* call or lcall, indirect */ | ||
405 | fix_call = true; | ||
406 | case 4: case 5: /* jmp or ljmp, indirect */ | ||
407 | fix_ip = false; | ||
408 | } | ||
409 | break; | ||
410 | default: | ||
411 | break; | ||
412 | } | ||
413 | |||
414 | if (fix_ip) | ||
415 | auprobe->fixups |= UPROBE_FIX_IP; | ||
416 | if (fix_call) | ||
417 | auprobe->fixups |= UPROBE_FIX_CALL; | ||
434 | 418 | ||
435 | return 0; | 419 | return 0; |
436 | } | 420 | } |