diff options
Diffstat (limited to 'arch/x86/kernel/kprobes.c')
-rw-r--r-- | arch/x86/kernel/kprobes.c | 154 |
1 files changed, 128 insertions, 26 deletions
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 770ebfb349e9..f1a6244d7d93 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -230,9 +230,6 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | |||
230 | return 0; | 230 | return 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Dummy buffers for kallsyms_lookup */ | ||
234 | static char __dummy_buf[KSYM_NAME_LEN]; | ||
235 | |||
236 | /* Check if paddr is at an instruction boundary */ | 233 | /* Check if paddr is at an instruction boundary */ |
237 | static int __kprobes can_probe(unsigned long paddr) | 234 | static int __kprobes can_probe(unsigned long paddr) |
238 | { | 235 | { |
@@ -241,7 +238,7 @@ static int __kprobes can_probe(unsigned long paddr) | |||
241 | struct insn insn; | 238 | struct insn insn; |
242 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 239 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
243 | 240 | ||
244 | if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) | 241 | if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) |
245 | return 0; | 242 | return 0; |
246 | 243 | ||
247 | /* Decode instructions */ | 244 | /* Decode instructions */ |
@@ -406,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
406 | 403 | ||
407 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 404 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
408 | { | 405 | { |
409 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 406 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
410 | kcb->kprobe_status = kcb->prev_kprobe.status; | 407 | kcb->kprobe_status = kcb->prev_kprobe.status; |
411 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; | 408 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
412 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; | 409 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
@@ -415,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
415 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 412 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
416 | struct kprobe_ctlblk *kcb) | 413 | struct kprobe_ctlblk *kcb) |
417 | { | 414 | { |
418 | __get_cpu_var(current_kprobe) = p; | 415 | __this_cpu_write(current_kprobe, p); |
419 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags | 416 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
420 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); | 417 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
421 | if (is_IF_modifier(p->ainsn.insn)) | 418 | if (is_IF_modifier(p->ainsn.insn)) |
@@ -589,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
589 | preempt_enable_no_resched(); | 586 | preempt_enable_no_resched(); |
590 | return 1; | 587 | return 1; |
591 | } else if (kprobe_running()) { | 588 | } else if (kprobe_running()) { |
592 | p = __get_cpu_var(current_kprobe); | 589 | p = __this_cpu_read(current_kprobe); |
593 | if (p->break_handler && p->break_handler(p, regs)) { | 590 | if (p->break_handler && p->break_handler(p, regs)) { |
594 | setup_singlestep(p, regs, kcb, 0); | 591 | setup_singlestep(p, regs, kcb, 0); |
595 | return 1; | 592 | return 1; |
@@ -762,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
762 | 759 | ||
763 | orig_ret_address = (unsigned long)ri->ret_addr; | 760 | orig_ret_address = (unsigned long)ri->ret_addr; |
764 | if (ri->rp && ri->rp->handler) { | 761 | if (ri->rp && ri->rp->handler) { |
765 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 762 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
766 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | 763 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
767 | ri->ret_addr = correct_ret_addr; | 764 | ri->ret_addr = correct_ret_addr; |
768 | ri->rp->handler(ri, regs); | 765 | ri->rp->handler(ri, regs); |
769 | __get_cpu_var(current_kprobe) = NULL; | 766 | __this_cpu_write(current_kprobe, NULL); |
770 | } | 767 | } |
771 | 768 | ||
772 | recycle_rp_inst(ri, &empty_rp); | 769 | recycle_rp_inst(ri, &empty_rp); |
@@ -1129,7 +1126,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, | |||
1129 | *(unsigned long *)addr = val; | 1126 | *(unsigned long *)addr = val; |
1130 | } | 1127 | } |
1131 | 1128 | ||
1132 | void __kprobes kprobes_optinsn_template_holder(void) | 1129 | static void __used __kprobes kprobes_optinsn_template_holder(void) |
1133 | { | 1130 | { |
1134 | asm volatile ( | 1131 | asm volatile ( |
1135 | ".global optprobe_template_entry\n" | 1132 | ".global optprobe_template_entry\n" |
@@ -1186,8 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1186 | struct pt_regs *regs) | 1183 | struct pt_regs *regs) |
1187 | { | 1184 | { |
1188 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1185 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1186 | unsigned long flags; | ||
1189 | 1187 | ||
1190 | preempt_disable(); | 1188 | /* This is possible if op is under delayed unoptimizing */ |
1189 | if (kprobe_disabled(&op->kp)) | ||
1190 | return; | ||
1191 | |||
1192 | local_irq_save(flags); | ||
1191 | if (kprobe_running()) { | 1193 | if (kprobe_running()) { |
1192 | kprobes_inc_nmissed_count(&op->kp); | 1194 | kprobes_inc_nmissed_count(&op->kp); |
1193 | } else { | 1195 | } else { |
@@ -1201,12 +1203,12 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1201 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | 1203 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; |
1202 | regs->orig_ax = ~0UL; | 1204 | regs->orig_ax = ~0UL; |
1203 | 1205 | ||
1204 | __get_cpu_var(current_kprobe) = &op->kp; | 1206 | __this_cpu_write(current_kprobe, &op->kp); |
1205 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 1207 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
1206 | opt_pre_handler(&op->kp, regs); | 1208 | opt_pre_handler(&op->kp, regs); |
1207 | __get_cpu_var(current_kprobe) = NULL; | 1209 | __this_cpu_write(current_kprobe, NULL); |
1208 | } | 1210 | } |
1209 | preempt_enable_no_resched(); | 1211 | local_irq_restore(flags); |
1210 | } | 1212 | } |
1211 | 1213 | ||
1212 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | 1214 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) |
@@ -1221,7 +1223,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | |||
1221 | } | 1223 | } |
1222 | /* Check whether the address range is reserved */ | 1224 | /* Check whether the address range is reserved */ |
1223 | if (ftrace_text_reserved(src, src + len - 1) || | 1225 | if (ftrace_text_reserved(src, src + len - 1) || |
1224 | alternatives_text_reserved(src, src + len - 1)) | 1226 | alternatives_text_reserved(src, src + len - 1) || |
1227 | jump_label_text_reserved(src, src + len - 1)) | ||
1225 | return -EBUSY; | 1228 | return -EBUSY; |
1226 | 1229 | ||
1227 | return len; | 1230 | return len; |
@@ -1269,11 +1272,17 @@ static int __kprobes can_optimize(unsigned long paddr) | |||
1269 | unsigned long addr, size = 0, offset = 0; | 1272 | unsigned long addr, size = 0, offset = 0; |
1270 | struct insn insn; | 1273 | struct insn insn; |
1271 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 1274 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
1272 | /* Dummy buffers for lookup_symbol_attrs */ | ||
1273 | static char __dummy_buf[KSYM_NAME_LEN]; | ||
1274 | 1275 | ||
1275 | /* Lookup symbol including addr */ | 1276 | /* Lookup symbol including addr */ |
1276 | if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf)) | 1277 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) |
1278 | return 0; | ||
1279 | |||
1280 | /* | ||
1281 | * Do not optimize in the entry code due to the unstable | ||
1282 | * stack handling. | ||
1283 | */ | ||
1284 | if ((paddr >= (unsigned long )__entry_text_start) && | ||
1285 | (paddr < (unsigned long )__entry_text_end)) | ||
1277 | return 0; | 1286 | return 0; |
1278 | 1287 | ||
1279 | /* Check there is enough space for a relative jump. */ | 1288 | /* Check there is enough space for a relative jump. */ |
@@ -1405,10 +1414,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | |||
1405 | return 0; | 1414 | return 0; |
1406 | } | 1415 | } |
1407 | 1416 | ||
1408 | /* Replace a breakpoint (int3) with a relative jump. */ | 1417 | #define MAX_OPTIMIZE_PROBES 256 |
1409 | int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op) | 1418 | static struct text_poke_param *jump_poke_params; |
1419 | static struct jump_poke_buffer { | ||
1420 | u8 buf[RELATIVEJUMP_SIZE]; | ||
1421 | } *jump_poke_bufs; | ||
1422 | |||
1423 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | ||
1424 | u8 *insn_buf, | ||
1425 | struct optimized_kprobe *op) | ||
1410 | { | 1426 | { |
1411 | unsigned char jmp_code[RELATIVEJUMP_SIZE]; | ||
1412 | s32 rel = (s32)((long)op->optinsn.insn - | 1427 | s32 rel = (s32)((long)op->optinsn.insn - |
1413 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | 1428 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); |
1414 | 1429 | ||
@@ -1416,16 +1431,79 @@ int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op) | |||
1416 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | 1431 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, |
1417 | RELATIVE_ADDR_SIZE); | 1432 | RELATIVE_ADDR_SIZE); |
1418 | 1433 | ||
1419 | jmp_code[0] = RELATIVEJUMP_OPCODE; | 1434 | insn_buf[0] = RELATIVEJUMP_OPCODE; |
1420 | *(s32 *)(&jmp_code[1]) = rel; | 1435 | *(s32 *)(&insn_buf[1]) = rel; |
1436 | |||
1437 | tprm->addr = op->kp.addr; | ||
1438 | tprm->opcode = insn_buf; | ||
1439 | tprm->len = RELATIVEJUMP_SIZE; | ||
1440 | } | ||
1441 | |||
1442 | /* | ||
1443 | * Replace breakpoints (int3) with relative jumps. | ||
1444 | * Caller must call with locking kprobe_mutex and text_mutex. | ||
1445 | */ | ||
1446 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | ||
1447 | { | ||
1448 | struct optimized_kprobe *op, *tmp; | ||
1449 | int c = 0; | ||
1450 | |||
1451 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1452 | WARN_ON(kprobe_disabled(&op->kp)); | ||
1453 | /* Setup param */ | ||
1454 | setup_optimize_kprobe(&jump_poke_params[c], | ||
1455 | jump_poke_bufs[c].buf, op); | ||
1456 | list_del_init(&op->list); | ||
1457 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1458 | break; | ||
1459 | } | ||
1421 | 1460 | ||
1422 | /* | 1461 | /* |
1423 | * text_poke_smp doesn't support NMI/MCE code modifying. | 1462 | * text_poke_smp doesn't support NMI/MCE code modifying. |
1424 | * However, since kprobes itself also doesn't support NMI/MCE | 1463 | * However, since kprobes itself also doesn't support NMI/MCE |
1425 | * code probing, it's not a problem. | 1464 | * code probing, it's not a problem. |
1426 | */ | 1465 | */ |
1427 | text_poke_smp(op->kp.addr, jmp_code, RELATIVEJUMP_SIZE); | 1466 | text_poke_smp_batch(jump_poke_params, c); |
1428 | return 0; | 1467 | } |
1468 | |||
1469 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | ||
1470 | u8 *insn_buf, | ||
1471 | struct optimized_kprobe *op) | ||
1472 | { | ||
1473 | /* Set int3 to first byte for kprobes */ | ||
1474 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | ||
1475 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1476 | |||
1477 | tprm->addr = op->kp.addr; | ||
1478 | tprm->opcode = insn_buf; | ||
1479 | tprm->len = RELATIVEJUMP_SIZE; | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * Recover original instructions and breakpoints from relative jumps. | ||
1484 | * Caller must call with locking kprobe_mutex. | ||
1485 | */ | ||
1486 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
1487 | struct list_head *done_list) | ||
1488 | { | ||
1489 | struct optimized_kprobe *op, *tmp; | ||
1490 | int c = 0; | ||
1491 | |||
1492 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1493 | /* Setup param */ | ||
1494 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
1495 | jump_poke_bufs[c].buf, op); | ||
1496 | list_move(&op->list, done_list); | ||
1497 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1498 | break; | ||
1499 | } | ||
1500 | |||
1501 | /* | ||
1502 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1503 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1504 | * code probing, it's not a problem. | ||
1505 | */ | ||
1506 | text_poke_smp_batch(jump_poke_params, c); | ||
1429 | } | 1507 | } |
1430 | 1508 | ||
1431 | /* Replace a relative jump with a breakpoint (int3). */ | 1509 | /* Replace a relative jump with a breakpoint (int3). */ |
@@ -1457,11 +1535,35 @@ static int __kprobes setup_detour_execution(struct kprobe *p, | |||
1457 | } | 1535 | } |
1458 | return 0; | 1536 | return 0; |
1459 | } | 1537 | } |
1538 | |||
1539 | static int __kprobes init_poke_params(void) | ||
1540 | { | ||
1541 | /* Allocate code buffer and parameter array */ | ||
1542 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | ||
1543 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
1544 | if (!jump_poke_bufs) | ||
1545 | return -ENOMEM; | ||
1546 | |||
1547 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | ||
1548 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
1549 | if (!jump_poke_params) { | ||
1550 | kfree(jump_poke_bufs); | ||
1551 | jump_poke_bufs = NULL; | ||
1552 | return -ENOMEM; | ||
1553 | } | ||
1554 | |||
1555 | return 0; | ||
1556 | } | ||
1557 | #else /* !CONFIG_OPTPROBES */ | ||
1558 | static int __kprobes init_poke_params(void) | ||
1559 | { | ||
1560 | return 0; | ||
1561 | } | ||
1460 | #endif | 1562 | #endif |
1461 | 1563 | ||
1462 | int __init arch_init_kprobes(void) | 1564 | int __init arch_init_kprobes(void) |
1463 | { | 1565 | { |
1464 | return 0; | 1566 | return init_poke_params(); |
1465 | } | 1567 | } |
1466 | 1568 | ||
1467 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 1569 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |