aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kprobes.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2010-12-03 04:54:28 -0500
committerIngo Molnar <mingo@elte.hu>2010-12-06 11:59:31 -0500
commitcd7ebe2298ff1c3112232878678ce5fe6be8a15b (patch)
tree7bac7adf40ce2141e779b7d99b2784279c2dc45c /arch/x86/kernel/kprobes.c
parent7deb18dcf0478940ac979de002db1ed8ba6531dc (diff)
kprobes: Use text_poke_smp_batch for optimizing
Use text_poke_smp_batch() in optimization path for reducing the number of stop_machine() issues. If the number of optimizing probes is more than MAX_OPTIMIZE_PROBES(=256), kprobes optimizes first MAX_OPTIMIZE_PROBES probes and kicks optimizer for remaining probes. Changes in v5: - Use kick_kprobe_optimizer() instead of directly calling schedule_delayed_work(). - Rescheduling optimizer outside of kprobe mutex lock. Changes in v2: - Allocate code buffer and parameters in arch_init_kprobes() instead of using static arraies. - Merge previous max optimization limit patch into this patch. So, this patch introduces upper limit of optimization at once. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: 2nddept-manager@sdl.hitachi.co.jp Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <20101203095428.2961.8994.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/kprobes.c')
-rw-r--r--arch/x86/kernel/kprobes.c69
1 files changed, 61 insertions, 8 deletions
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index da51dc8e77cb..25a8af76feb5 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1405,10 +1405,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
1405 return 0; 1405 return 0;
1406} 1406}
1407 1407
1408/* Replace a breakpoint (int3) with a relative jump. */ 1408#define MAX_OPTIMIZE_PROBES 256
1409int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op) 1409static struct text_poke_param *jump_poke_params;
1410static struct jump_poke_buffer {
1411 u8 buf[RELATIVEJUMP_SIZE];
1412} *jump_poke_bufs;
1413
1414static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
1415 u8 *insn_buf,
1416 struct optimized_kprobe *op)
1410{ 1417{
1411 unsigned char jmp_code[RELATIVEJUMP_SIZE];
1412 s32 rel = (s32)((long)op->optinsn.insn - 1418 s32 rel = (s32)((long)op->optinsn.insn -
1413 ((long)op->kp.addr + RELATIVEJUMP_SIZE)); 1419 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
1414 1420
@@ -1416,16 +1422,39 @@ int __kprobes arch_optimize_kprobe(struct optimized_kprobe *op)
1416 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, 1422 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
1417 RELATIVE_ADDR_SIZE); 1423 RELATIVE_ADDR_SIZE);
1418 1424
1419 jmp_code[0] = RELATIVEJUMP_OPCODE; 1425 insn_buf[0] = RELATIVEJUMP_OPCODE;
1420 *(s32 *)(&jmp_code[1]) = rel; 1426 *(s32 *)(&insn_buf[1]) = rel;
1427
1428 tprm->addr = op->kp.addr;
1429 tprm->opcode = insn_buf;
1430 tprm->len = RELATIVEJUMP_SIZE;
1431}
1432
1433/*
1434 * Replace breakpoints (int3) with relative jumps.
1435 * Caller must call with locking kprobe_mutex and text_mutex.
1436 */
1437void __kprobes arch_optimize_kprobes(struct list_head *oplist)
1438{
1439 struct optimized_kprobe *op, *tmp;
1440 int c = 0;
1441
1442 list_for_each_entry_safe(op, tmp, oplist, list) {
1443 WARN_ON(kprobe_disabled(&op->kp));
1444 /* Setup param */
1445 setup_optimize_kprobe(&jump_poke_params[c],
1446 jump_poke_bufs[c].buf, op);
1447 list_del_init(&op->list);
1448 if (++c >= MAX_OPTIMIZE_PROBES)
1449 break;
1450 }
1421 1451
1422 /* 1452 /*
1423 * text_poke_smp doesn't support NMI/MCE code modifying. 1453 * text_poke_smp doesn't support NMI/MCE code modifying.
1424 * However, since kprobes itself also doesn't support NMI/MCE 1454 * However, since kprobes itself also doesn't support NMI/MCE
1425 * code probing, it's not a problem. 1455 * code probing, it's not a problem.
1426 */ 1456 */
1427 text_poke_smp(op->kp.addr, jmp_code, RELATIVEJUMP_SIZE); 1457 text_poke_smp_batch(jump_poke_params, c);
1428 return 0;
1429} 1458}
1430 1459
1431/* Replace a relative jump with a breakpoint (int3). */ 1460/* Replace a relative jump with a breakpoint (int3). */
@@ -1457,11 +1486,35 @@ static int __kprobes setup_detour_execution(struct kprobe *p,
1457 } 1486 }
1458 return 0; 1487 return 0;
1459} 1488}
1489
1490static int __kprobes init_poke_params(void)
1491{
1492 /* Allocate code buffer and parameter array */
1493 jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
1494 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1495 if (!jump_poke_bufs)
1496 return -ENOMEM;
1497
1498 jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
1499 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1500 if (!jump_poke_params) {
1501 kfree(jump_poke_bufs);
1502 jump_poke_bufs = NULL;
1503 return -ENOMEM;
1504 }
1505
1506 return 0;
1507}
1508#else /* !CONFIG_OPTPROBES */
1509static int __kprobes init_poke_params(void)
1510{
1511 return 0;
1512}
1460#endif 1513#endif
1461 1514
1462int __init arch_init_kprobes(void) 1515int __init arch_init_kprobes(void)
1463{ 1516{
1464 return 0; 1517 return init_poke_params();
1465} 1518}
1466 1519
1467int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1520int __kprobes arch_trampoline_kprobe(struct kprobe *p)