aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2012-06-05 06:28:32 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-07-31 10:29:58 -0400
commitae6aa16fdc163afe6b04b6c073ad4ddd4663c03b (patch)
tree0a6055f56f80561c4a92df5ea50628644db73e7e /kernel/kprobes.c
parent4dc936769e8a6382a4cc12375e8a4daa2b829fda (diff)
kprobes: introduce ftrace based optimization
Introduce function trace based kprobes optimization. With using ftrace optimization, kprobes on the mcount calling address, use ftrace's mcount call instead of breakpoint. Furthermore, this optimization works with preemptive kernel not like as current jump-based optimization. Of cource, this feature works only if the probe is on mcount call. Only if kprobe.break_handler is set, that probe is not optimized with ftrace (nor put on ftrace). The reason why this limitation comes is that this break_handler may be used only from jprobes which changes ip address (for fetching the function arguments), but function tracer ignores modified ip address. Changes in v2: - Fix ftrace_ops registering right after setting its filter. - Unregister ftrace_ops if there is no kprobe using. - Remove notrace dependency from __kprobes macro. Link: http://lkml.kernel.org/r/20120605102832.27845.63461.stgit@localhost.localdomain Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c105
1 files changed, 92 insertions, 13 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9e47f44f3531..69c16efc315b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -759,6 +759,10 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
759 struct kprobe *ap; 759 struct kprobe *ap;
760 struct optimized_kprobe *op; 760 struct optimized_kprobe *op;
761 761
762 /* Impossible to optimize ftrace-based kprobe */
763 if (kprobe_ftrace(p))
764 return;
765
762 /* For preparing optimization, jump_label_text_reserved() is called */ 766 /* For preparing optimization, jump_label_text_reserved() is called */
763 jump_label_lock(); 767 jump_label_lock();
764 mutex_lock(&text_mutex); 768 mutex_lock(&text_mutex);
@@ -915,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
915} 919}
916#endif /* CONFIG_OPTPROBES */ 920#endif /* CONFIG_OPTPROBES */
917 921
922#ifdef KPROBES_CAN_USE_FTRACE
923static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924 .regs_func = kprobe_ftrace_handler,
925 .flags = FTRACE_OPS_FL_SAVE_REGS,
926};
927static int kprobe_ftrace_enabled;
928
929/* Must ensure p->addr is really on ftrace */
930static int __kprobes prepare_kprobe(struct kprobe *p)
931{
932 if (!kprobe_ftrace(p))
933 return arch_prepare_kprobe(p);
934
935 return arch_prepare_kprobe_ftrace(p);
936}
937
938/* Caller must lock kprobe_mutex */
939static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
940{
941 int ret;
942
943 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
944 (unsigned long)p->addr, 0, 0);
945 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
946 kprobe_ftrace_enabled++;
947 if (kprobe_ftrace_enabled == 1) {
948 ret = register_ftrace_function(&kprobe_ftrace_ops);
949 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
950 }
951}
952
953/* Caller must lock kprobe_mutex */
954static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
955{
956 int ret;
957
958 kprobe_ftrace_enabled--;
959 if (kprobe_ftrace_enabled == 0) {
960 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
961 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
962 }
963 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
964 (unsigned long)p->addr, 1, 0);
965 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966}
967#else /* !KPROBES_CAN_USE_FTRACE */
968#define prepare_kprobe(p) arch_prepare_kprobe(p)
969#define arm_kprobe_ftrace(p) do {} while (0)
970#define disarm_kprobe_ftrace(p) do {} while (0)
971#endif
972
918/* Arm a kprobe with text_mutex */ 973/* Arm a kprobe with text_mutex */
919static void __kprobes arm_kprobe(struct kprobe *kp) 974static void __kprobes arm_kprobe(struct kprobe *kp)
920{ 975{
976 if (unlikely(kprobe_ftrace(kp))) {
977 arm_kprobe_ftrace(kp);
978 return;
979 }
921 /* 980 /*
922 * Here, since __arm_kprobe() doesn't use stop_machine(), 981 * Here, since __arm_kprobe() doesn't use stop_machine(),
923 * this doesn't cause deadlock on text_mutex. So, we don't 982 * this doesn't cause deadlock on text_mutex. So, we don't
@@ -929,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
929} 988}
930 989
931/* Disarm a kprobe with text_mutex */ 990/* Disarm a kprobe with text_mutex */
932static void __kprobes disarm_kprobe(struct kprobe *kp) 991static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
933{ 992{
993 if (unlikely(kprobe_ftrace(kp))) {
994 disarm_kprobe_ftrace(kp);
995 return;
996 }
934 /* Ditto */ 997 /* Ditto */
935 mutex_lock(&text_mutex); 998 mutex_lock(&text_mutex);
936 __disarm_kprobe(kp, true); 999 __disarm_kprobe(kp, reopt);
937 mutex_unlock(&text_mutex); 1000 mutex_unlock(&text_mutex);
938} 1001}
939 1002
@@ -1343,6 +1406,26 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1343 struct module **probed_mod) 1406 struct module **probed_mod)
1344{ 1407{
1345 int ret = 0; 1408 int ret = 0;
1409 unsigned long ftrace_addr;
1410
1411 /*
1412 * If the address is located on a ftrace nop, set the
1413 * breakpoint to the following instruction.
1414 */
1415 ftrace_addr = ftrace_location((unsigned long)p->addr);
1416 if (ftrace_addr) {
1417#ifdef KPROBES_CAN_USE_FTRACE
1418 /* Given address is not on the instruction boundary */
1419 if ((unsigned long)p->addr != ftrace_addr)
1420 return -EILSEQ;
1421 /* break_handler (jprobe) can not work with ftrace */
1422 if (p->break_handler)
1423 return -EINVAL;
1424 p->flags |= KPROBE_FLAG_FTRACE;
1425#else /* !KPROBES_CAN_USE_FTRACE */
1426 return -EINVAL;
1427#endif
1428 }
1346 1429
1347 jump_label_lock(); 1430 jump_label_lock();
1348 preempt_disable(); 1431 preempt_disable();
@@ -1350,7 +1433,6 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1350 /* Ensure it is not in reserved area nor out of text */ 1433 /* Ensure it is not in reserved area nor out of text */
1351 if (!kernel_text_address((unsigned long) p->addr) || 1434 if (!kernel_text_address((unsigned long) p->addr) ||
1352 in_kprobes_functions((unsigned long) p->addr) || 1435 in_kprobes_functions((unsigned long) p->addr) ||
1353 ftrace_text_reserved(p->addr, p->addr) ||
1354 jump_label_text_reserved(p->addr, p->addr)) { 1436 jump_label_text_reserved(p->addr, p->addr)) {
1355 ret = -EINVAL; 1437 ret = -EINVAL;
1356 goto out; 1438 goto out;
@@ -1422,7 +1504,7 @@ int __kprobes register_kprobe(struct kprobe *p)
1422 } 1504 }
1423 1505
1424 mutex_lock(&text_mutex); /* Avoiding text modification */ 1506 mutex_lock(&text_mutex); /* Avoiding text modification */
1425 ret = arch_prepare_kprobe(p); 1507 ret = prepare_kprobe(p);
1426 mutex_unlock(&text_mutex); 1508 mutex_unlock(&text_mutex);
1427 if (ret) 1509 if (ret)
1428 goto out; 1510 goto out;
@@ -1480,7 +1562,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1480 1562
1481 /* Try to disarm and disable this/parent probe */ 1563 /* Try to disarm and disable this/parent probe */
1482 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1564 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1483 disarm_kprobe(orig_p); 1565 disarm_kprobe(orig_p, true);
1484 orig_p->flags |= KPROBE_FLAG_DISABLED; 1566 orig_p->flags |= KPROBE_FLAG_DISABLED;
1485 } 1567 }
1486 } 1568 }
@@ -2078,10 +2160,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2078 2160
2079 if (!pp) 2161 if (!pp)
2080 pp = p; 2162 pp = p;
2081 seq_printf(pi, "%s%s%s\n", 2163 seq_printf(pi, "%s%s%s%s\n",
2082 (kprobe_gone(p) ? "[GONE]" : ""), 2164 (kprobe_gone(p) ? "[GONE]" : ""),
2083 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2165 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2084 (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); 2166 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2167 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2085} 2168}
2086 2169
2087static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2170static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -2160,14 +2243,12 @@ static void __kprobes arm_all_kprobes(void)
2160 goto already_enabled; 2243 goto already_enabled;
2161 2244
2162 /* Arming kprobes doesn't optimize kprobe itself */ 2245 /* Arming kprobes doesn't optimize kprobe itself */
2163 mutex_lock(&text_mutex);
2164 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2246 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2165 head = &kprobe_table[i]; 2247 head = &kprobe_table[i];
2166 hlist_for_each_entry_rcu(p, node, head, hlist) 2248 hlist_for_each_entry_rcu(p, node, head, hlist)
2167 if (!kprobe_disabled(p)) 2249 if (!kprobe_disabled(p))
2168 __arm_kprobe(p); 2250 arm_kprobe(p);
2169 } 2251 }
2170 mutex_unlock(&text_mutex);
2171 2252
2172 kprobes_all_disarmed = false; 2253 kprobes_all_disarmed = false;
2173 printk(KERN_INFO "Kprobes globally enabled\n"); 2254 printk(KERN_INFO "Kprobes globally enabled\n");
@@ -2195,15 +2276,13 @@ static void __kprobes disarm_all_kprobes(void)
2195 kprobes_all_disarmed = true; 2276 kprobes_all_disarmed = true;
2196 printk(KERN_INFO "Kprobes globally disabled\n"); 2277 printk(KERN_INFO "Kprobes globally disabled\n");
2197 2278
2198 mutex_lock(&text_mutex);
2199 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2279 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2200 head = &kprobe_table[i]; 2280 head = &kprobe_table[i];
2201 hlist_for_each_entry_rcu(p, node, head, hlist) { 2281 hlist_for_each_entry_rcu(p, node, head, hlist) {
2202 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2282 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2203 __disarm_kprobe(p, false); 2283 disarm_kprobe(p, false);
2204 } 2284 }
2205 } 2285 }
2206 mutex_unlock(&text_mutex);
2207 mutex_unlock(&kprobe_mutex); 2286 mutex_unlock(&kprobe_mutex);
2208 2287
2209 /* Wait for disarming all kprobes by optimizer */ 2288 /* Wait for disarming all kprobes by optimizer */