aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 13:28:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-01 13:28:49 -0400
commit7e92daaefa68e5ef1e1732e45231e73adbb724e7 (patch)
tree8e7f8ac9d82654df4c65939c6682f95510e22977 /kernel/kprobes.c
parent7a68294278ae714ce2632a54f0f46916dca64f56 (diff)
parent1d787d37c8ff6612b8151c6dff15bfa7347bcbdf (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf update from Ingo Molnar: "Lots of changes in this cycle as well, with hundreds of commits from over 30 contributors. Most of the activity was on the tooling side. Higher level changes: - New 'perf kvm' analysis tool, from Xiao Guangrong. - New 'perf trace' system-wide tracing tool - uprobes fixes + cleanups from Oleg Nesterov. - Lots of patches to make perf build on Android out of box, from Irina Tirdea - Extend ftrace function tracing utility to be more dynamic for its users. It allows for data passing to the callback functions, as well as reading regs as if a breakpoint were to trigger at function entry. The main goal of this patch series was to allow kprobes to use ftrace as an optimized probe point when a probe is placed on an ftrace nop. With lots of help from Masami Hiramatsu, and going through lots of iterations, we finally came up with a good solution. - Add cpumask for uncore pmu, use it in 'stat', from Yan, Zheng. - Various tracing updates from Steve Rostedt - Clean up and improve 'perf sched' performance by elliminating lots of needless calls to libtraceevent. - Event group parsing support, from Jiri Olsa - UI/gtk refactorings and improvements from Namhyung Kim - Add support for non-tracepoint events in perf script python, from Feng Tang - Add --symbols to 'script', similar to the one in 'report', from Feng Tang. Infrastructure enhancements and fixes: - Convert the trace builtins to use the growing evsel/evlist tracepoint infrastructure, removing several open coded constructs like switch like series of strcmp to dispatch events, etc. Basically what had already been showcased in 'perf sched'. - Add evsel constructor for tracepoints, that uses libtraceevent just to parse the /format events file, use it in a new 'perf test' to make sure the libtraceevent format parsing regressions can be more readily caught. - Some strange errors were happening in some builds, but not on the next, reported by several people, problem was some parser related files, generated during the build, didn't had proper make deps, fix from Eric Sandeen. - Introduce struct and cache information about the environment where a perf.data file was captured, from Namhyung Kim. - Fix handling of unresolved samples when --symbols is used in 'report', from Feng Tang. - Add union member access support to 'probe', from Hyeoncheol Lee. - Fixups to die() removal, from Namhyung Kim. - Render fixes for the TUI, from Namhyung Kim. - Don't enable annotation in non symbolic view, from Namhyung Kim. - Fix pipe mode in 'report', from Namhyung Kim. - Move related stats code from stat to util/, will be used by the 'stat' kvm tool, from Xiao Guangrong. - Remove die()/exit() calls from several tools. - Resolve vdso callchains, from Jiri Olsa - Don't pass const char pointers to basename, so that we can unconditionally use libgen.h and thus avoid ifdef BIONIC lines, from David Ahern - Refactor hist formatting so that it can be reused with the GTK browser, From Namhyung Kim - Fix build for another rbtree.c change, from Adrian Hunter. - Make 'perf diff' command work with evsel hists, from Jiri Olsa. - Use the only field_sep var that is set up: symbol_conf.field_sep, fix from Jiri Olsa. - .gitignore compiled python binaries, from Namhyung Kim. - Get rid of die() in more libtraceevent places, from Namhyung Kim. - Rename libtraceevent 'private' struct member to 'priv' so that it works in C++, from Steven Rostedt - Remove lots of exit()/die() calls from tools so that the main perf exit routine can take place, from David Ahern - Fix x86 build on x86-64, from David Ahern. - {int,str,rb}list fixes from Suzuki K Poulose - perf.data header fixes from Namhyung Kim - Allow user to indicate objdump path, needed in cross environments, from Maciek Borzecki - Fix hardware cache event name generation, fix from Jiri Olsa - Add round trip test for sw, hw and cache event names, catching the problem Jiri fixed, after Jiri's patch, the test passes successfully. - Clean target should do clean for lib/traceevent too, fix from David Ahern - Check the right variable for allocation failure, fix from Namhyung Kim - Set up evsel->tp_format regardless of evsel->name being set already, fix from Namhyung Kim - Oprofile fixes from Robert Richter. - Remove perf_event_attr needless version inflation, from Jiri Olsa - Introduce libtraceevent strerror like error reporting facility, from Namhyung Kim - Add pmu mappings to perf.data header and use event names from cmd line, from Robert Richter - Fix include order for bison/flex-generated C files, from Ben Hutchings - Build fixes and documentation corrections from David Ahern - Assorted cleanups from Robert Richter - Let O= makes handle relative paths, from Steven Rostedt - perf script python fixes, from Feng Tang. - Initial bash completion support, from Frederic Weisbecker - Allow building without libelf, from Namhyung Kim. - Support DWARF CFI based unwind to have callchains when %bp based unwinding is not possible, from Jiri Olsa. - Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF section was the end goal, several fixes for code that handles all architectures and cleanups are included, from Cody Schafer. - Assorted fixes for Documentation and build in 32 bit, from Robert Richter - Cache the libtraceevent event_format associated to each evsel early, so that we avoid relookups, i.e. calling pevent_find_event repeatedly when processing tracepoint events. [ This is to reduce the surface contact with libtraceevents and make clear what is that the perf tools needs from that lib: so far parsing the common and per event fields. ] - Don't stop the build if the audit libraries are not installed, fix from Namhyung Kim. - Fix bfd.h/libbfd detection with recent binutils, from Markus Trippelsdorf. - Improve warning message when libunwind devel packages not present, from Jiri Olsa" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (282 commits) perf trace: Add aliases for some syscalls perf probe: Print an enum type variable in "enum variable-name" format when showing accessible variables perf tools: Check libaudit availability for perf-trace builtin perf hists: Add missing period_* fields when collapsing a hist entry perf trace: New tool perf evsel: Export the event_format constructor perf evsel: Introduce rawptr() method perf tools: Use perf_evsel__newtp in the event parser perf evsel: The tracepoint constructor should store sys:name perf evlist: Introduce set_filter() method perf evlist: Renane set_filters method to apply_filters perf test: Add test to check we correctly parse and match syscall open parms perf evsel: Handle endianity in intval method perf evsel: Know if byte swap is needed perf tools: Allow handling a NULL cpu_map as meaning "all cpus" perf evsel: Improve tracepoint constructor setup tools lib traceevent: Fix error path on pevent_parse_event perf test: Fix build failure trace: Move trace event enable from fs_initcall to core_initcall tracing: Add an option for disabling markers ...
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c247
1 files changed, 176 insertions, 71 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c62b8546cc90..098f396aa409 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
561{ 561{
562 LIST_HEAD(free_list); 562 LIST_HEAD(free_list);
563 563
564 mutex_lock(&kprobe_mutex);
564 /* Lock modules while optimizing kprobes */ 565 /* Lock modules while optimizing kprobes */
565 mutex_lock(&module_mutex); 566 mutex_lock(&module_mutex);
566 mutex_lock(&kprobe_mutex);
567 567
568 /* 568 /*
569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
586 /* Step 4: Free cleaned kprobes after quiesence period */ 586 /* Step 4: Free cleaned kprobes after quiesence period */
587 do_free_cleaned_kprobes(&free_list); 587 do_free_cleaned_kprobes(&free_list);
588 588
589 mutex_unlock(&kprobe_mutex);
590 mutex_unlock(&module_mutex); 589 mutex_unlock(&module_mutex);
590 mutex_unlock(&kprobe_mutex);
591 591
592 /* Step 5: Kick optimizer again if needed */ 592 /* Step 5: Kick optimizer again if needed */
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
759 struct kprobe *ap; 759 struct kprobe *ap;
760 struct optimized_kprobe *op; 760 struct optimized_kprobe *op;
761 761
762 /* Impossible to optimize ftrace-based kprobe */
763 if (kprobe_ftrace(p))
764 return;
765
766 /* For preparing optimization, jump_label_text_reserved() is called */
767 jump_label_lock();
768 mutex_lock(&text_mutex);
769
762 ap = alloc_aggr_kprobe(p); 770 ap = alloc_aggr_kprobe(p);
763 if (!ap) 771 if (!ap)
764 return; 772 goto out;
765 773
766 op = container_of(ap, struct optimized_kprobe, kp); 774 op = container_of(ap, struct optimized_kprobe, kp);
767 if (!arch_prepared_optinsn(&op->optinsn)) { 775 if (!arch_prepared_optinsn(&op->optinsn)) {
768 /* If failed to setup optimizing, fallback to kprobe */ 776 /* If failed to setup optimizing, fallback to kprobe */
769 arch_remove_optimized_kprobe(op); 777 arch_remove_optimized_kprobe(op);
770 kfree(op); 778 kfree(op);
771 return; 779 goto out;
772 } 780 }
773 781
774 init_aggr_kprobe(ap, p); 782 init_aggr_kprobe(ap, p);
775 optimize_kprobe(ap); 783 optimize_kprobe(ap); /* This just kicks optimizer thread */
784
785out:
786 mutex_unlock(&text_mutex);
787 jump_label_unlock();
776} 788}
777 789
778#ifdef CONFIG_SYSCTL 790#ifdef CONFIG_SYSCTL
@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
907} 919}
908#endif /* CONFIG_OPTPROBES */ 920#endif /* CONFIG_OPTPROBES */
909 921
922#ifdef KPROBES_CAN_USE_FTRACE
923static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924 .func = kprobe_ftrace_handler,
925 .flags = FTRACE_OPS_FL_SAVE_REGS,
926};
927static int kprobe_ftrace_enabled;
928
929/* Must ensure p->addr is really on ftrace */
930static int __kprobes prepare_kprobe(struct kprobe *p)
931{
932 if (!kprobe_ftrace(p))
933 return arch_prepare_kprobe(p);
934
935 return arch_prepare_kprobe_ftrace(p);
936}
937
938/* Caller must lock kprobe_mutex */
939static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
940{
941 int ret;
942
943 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
944 (unsigned long)p->addr, 0, 0);
945 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
946 kprobe_ftrace_enabled++;
947 if (kprobe_ftrace_enabled == 1) {
948 ret = register_ftrace_function(&kprobe_ftrace_ops);
949 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
950 }
951}
952
953/* Caller must lock kprobe_mutex */
954static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
955{
956 int ret;
957
958 kprobe_ftrace_enabled--;
959 if (kprobe_ftrace_enabled == 0) {
960 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
961 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
962 }
963 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
964 (unsigned long)p->addr, 1, 0);
965 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966}
967#else /* !KPROBES_CAN_USE_FTRACE */
968#define prepare_kprobe(p) arch_prepare_kprobe(p)
969#define arm_kprobe_ftrace(p) do {} while (0)
970#define disarm_kprobe_ftrace(p) do {} while (0)
971#endif
972
910/* Arm a kprobe with text_mutex */ 973/* Arm a kprobe with text_mutex */
911static void __kprobes arm_kprobe(struct kprobe *kp) 974static void __kprobes arm_kprobe(struct kprobe *kp)
912{ 975{
976 if (unlikely(kprobe_ftrace(kp))) {
977 arm_kprobe_ftrace(kp);
978 return;
979 }
913 /* 980 /*
914 * Here, since __arm_kprobe() doesn't use stop_machine(), 981 * Here, since __arm_kprobe() doesn't use stop_machine(),
915 * this doesn't cause deadlock on text_mutex. So, we don't 982 * this doesn't cause deadlock on text_mutex. So, we don't
@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
921} 988}
922 989
923/* Disarm a kprobe with text_mutex */ 990/* Disarm a kprobe with text_mutex */
924static void __kprobes disarm_kprobe(struct kprobe *kp) 991static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
925{ 992{
993 if (unlikely(kprobe_ftrace(kp))) {
994 disarm_kprobe_ftrace(kp);
995 return;
996 }
926 /* Ditto */ 997 /* Ditto */
927 mutex_lock(&text_mutex); 998 mutex_lock(&text_mutex);
928 __disarm_kprobe(kp, true); 999 __disarm_kprobe(kp, reopt);
929 mutex_unlock(&text_mutex); 1000 mutex_unlock(&text_mutex);
930} 1001}
931 1002
@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1144 if (p->post_handler && !ap->post_handler) 1215 if (p->post_handler && !ap->post_handler)
1145 ap->post_handler = aggr_post_handler; 1216 ap->post_handler = aggr_post_handler;
1146 1217
1147 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
1148 ap->flags &= ~KPROBE_FLAG_DISABLED;
1149 if (!kprobes_all_disarmed)
1150 /* Arm the breakpoint again. */
1151 __arm_kprobe(ap);
1152 }
1153 return 0; 1218 return 0;
1154} 1219}
1155 1220
@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1189 int ret = 0; 1254 int ret = 0;
1190 struct kprobe *ap = orig_p; 1255 struct kprobe *ap = orig_p;
1191 1256
1257 /* For preparing optimization, jump_label_text_reserved() is called */
1258 jump_label_lock();
1259 /*
1260 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1261 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1262 */
1263 get_online_cpus();
1264 mutex_lock(&text_mutex);
1265
1192 if (!kprobe_aggrprobe(orig_p)) { 1266 if (!kprobe_aggrprobe(orig_p)) {
1193 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1267 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1194 ap = alloc_aggr_kprobe(orig_p); 1268 ap = alloc_aggr_kprobe(orig_p);
1195 if (!ap) 1269 if (!ap) {
1196 return -ENOMEM; 1270 ret = -ENOMEM;
1271 goto out;
1272 }
1197 init_aggr_kprobe(ap, orig_p); 1273 init_aggr_kprobe(ap, orig_p);
1198 } else if (kprobe_unused(ap)) 1274 } else if (kprobe_unused(ap))
1199 /* This probe is going to die. Rescue it */ 1275 /* This probe is going to die. Rescue it */
@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1213 * free aggr_probe. It will be used next time, or 1289 * free aggr_probe. It will be used next time, or
1214 * freed by unregister_kprobe. 1290 * freed by unregister_kprobe.
1215 */ 1291 */
1216 return ret; 1292 goto out;
1217 1293
1218 /* Prepare optimized instructions if possible. */ 1294 /* Prepare optimized instructions if possible. */
1219 prepare_optimized_kprobe(ap); 1295 prepare_optimized_kprobe(ap);
@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1228 1304
1229 /* Copy ap's insn slot to p */ 1305 /* Copy ap's insn slot to p */
1230 copy_kprobe(ap, p); 1306 copy_kprobe(ap, p);
1231 return add_new_kprobe(ap, p); 1307 ret = add_new_kprobe(ap, p);
1308
1309out:
1310 mutex_unlock(&text_mutex);
1311 put_online_cpus();
1312 jump_label_unlock();
1313
1314 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1315 ap->flags &= ~KPROBE_FLAG_DISABLED;
1316 if (!kprobes_all_disarmed)
1317 /* Arm the breakpoint again. */
1318 arm_kprobe(ap);
1319 }
1320 return ret;
1232} 1321}
1233 1322
1234static int __kprobes in_kprobes_functions(unsigned long addr) 1323static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -1313,71 +1402,96 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1313 return ret; 1402 return ret;
1314} 1403}
1315 1404
1316int __kprobes register_kprobe(struct kprobe *p) 1405static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1406 struct module **probed_mod)
1317{ 1407{
1318 int ret = 0; 1408 int ret = 0;
1319 struct kprobe *old_p; 1409 unsigned long ftrace_addr;
1320 struct module *probed_mod;
1321 kprobe_opcode_t *addr;
1322
1323 addr = kprobe_addr(p);
1324 if (IS_ERR(addr))
1325 return PTR_ERR(addr);
1326 p->addr = addr;
1327 1410
1328 ret = check_kprobe_rereg(p); 1411 /*
1329 if (ret) 1412 * If the address is located on a ftrace nop, set the
1330 return ret; 1413 * breakpoint to the following instruction.
1414 */
1415 ftrace_addr = ftrace_location((unsigned long)p->addr);
1416 if (ftrace_addr) {
1417#ifdef KPROBES_CAN_USE_FTRACE
1418 /* Given address is not on the instruction boundary */
1419 if ((unsigned long)p->addr != ftrace_addr)
1420 return -EILSEQ;
1421 p->flags |= KPROBE_FLAG_FTRACE;
1422#else /* !KPROBES_CAN_USE_FTRACE */
1423 return -EINVAL;
1424#endif
1425 }
1331 1426
1332 jump_label_lock(); 1427 jump_label_lock();
1333 preempt_disable(); 1428 preempt_disable();
1429
1430 /* Ensure it is not in reserved area nor out of text */
1334 if (!kernel_text_address((unsigned long) p->addr) || 1431 if (!kernel_text_address((unsigned long) p->addr) ||
1335 in_kprobes_functions((unsigned long) p->addr) || 1432 in_kprobes_functions((unsigned long) p->addr) ||
1336 ftrace_text_reserved(p->addr, p->addr) ||
1337 jump_label_text_reserved(p->addr, p->addr)) { 1433 jump_label_text_reserved(p->addr, p->addr)) {
1338 ret = -EINVAL; 1434 ret = -EINVAL;
1339 goto cannot_probe; 1435 goto out;
1340 } 1436 }
1341 1437
1342 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1438 /* Check if are we probing a module */
1343 p->flags &= KPROBE_FLAG_DISABLED; 1439 *probed_mod = __module_text_address((unsigned long) p->addr);
1344 1440 if (*probed_mod) {
1345 /*
1346 * Check if are we probing a module.
1347 */
1348 probed_mod = __module_text_address((unsigned long) p->addr);
1349 if (probed_mod) {
1350 /* Return -ENOENT if fail. */
1351 ret = -ENOENT;
1352 /* 1441 /*
1353 * We must hold a refcount of the probed module while updating 1442 * We must hold a refcount of the probed module while updating
1354 * its code to prohibit unexpected unloading. 1443 * its code to prohibit unexpected unloading.
1355 */ 1444 */
1356 if (unlikely(!try_module_get(probed_mod))) 1445 if (unlikely(!try_module_get(*probed_mod))) {
1357 goto cannot_probe; 1446 ret = -ENOENT;
1447 goto out;
1448 }
1358 1449
1359 /* 1450 /*
1360 * If the module freed .init.text, we couldn't insert 1451 * If the module freed .init.text, we couldn't insert
1361 * kprobes in there. 1452 * kprobes in there.
1362 */ 1453 */
1363 if (within_module_init((unsigned long)p->addr, probed_mod) && 1454 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1364 probed_mod->state != MODULE_STATE_COMING) { 1455 (*probed_mod)->state != MODULE_STATE_COMING) {
1365 module_put(probed_mod); 1456 module_put(*probed_mod);
1366 goto cannot_probe; 1457 *probed_mod = NULL;
1458 ret = -ENOENT;
1367 } 1459 }
1368 /* ret will be updated by following code */
1369 } 1460 }
1461out:
1370 preempt_enable(); 1462 preempt_enable();
1371 jump_label_unlock(); 1463 jump_label_unlock();
1372 1464
1465 return ret;
1466}
1467
1468int __kprobes register_kprobe(struct kprobe *p)
1469{
1470 int ret;
1471 struct kprobe *old_p;
1472 struct module *probed_mod;
1473 kprobe_opcode_t *addr;
1474
1475 /* Adjust probe address from symbol */
1476 addr = kprobe_addr(p);
1477 if (IS_ERR(addr))
1478 return PTR_ERR(addr);
1479 p->addr = addr;
1480
1481 ret = check_kprobe_rereg(p);
1482 if (ret)
1483 return ret;
1484
1485 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1486 p->flags &= KPROBE_FLAG_DISABLED;
1373 p->nmissed = 0; 1487 p->nmissed = 0;
1374 INIT_LIST_HEAD(&p->list); 1488 INIT_LIST_HEAD(&p->list);
1375 mutex_lock(&kprobe_mutex);
1376 1489
1377 jump_label_lock(); /* needed to call jump_label_text_reserved() */ 1490 ret = check_kprobe_address_safe(p, &probed_mod);
1491 if (ret)
1492 return ret;
1378 1493
1379 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1494 mutex_lock(&kprobe_mutex);
1380 mutex_lock(&text_mutex);
1381 1495
1382 old_p = get_kprobe(p->addr); 1496 old_p = get_kprobe(p->addr);
1383 if (old_p) { 1497 if (old_p) {
@@ -1386,7 +1500,9 @@ int __kprobes register_kprobe(struct kprobe *p)
1386 goto out; 1500 goto out;
1387 } 1501 }
1388 1502
1389 ret = arch_prepare_kprobe(p); 1503 mutex_lock(&text_mutex); /* Avoiding text modification */
1504 ret = prepare_kprobe(p);
1505 mutex_unlock(&text_mutex);
1390 if (ret) 1506 if (ret)
1391 goto out; 1507 goto out;
1392 1508
@@ -1395,26 +1511,18 @@ int __kprobes register_kprobe(struct kprobe *p)
1395 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1511 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1396 1512
1397 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1513 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1398 __arm_kprobe(p); 1514 arm_kprobe(p);
1399 1515
1400 /* Try to optimize kprobe */ 1516 /* Try to optimize kprobe */
1401 try_to_optimize_kprobe(p); 1517 try_to_optimize_kprobe(p);
1402 1518
1403out: 1519out:
1404 mutex_unlock(&text_mutex);
1405 put_online_cpus();
1406 jump_label_unlock();
1407 mutex_unlock(&kprobe_mutex); 1520 mutex_unlock(&kprobe_mutex);
1408 1521
1409 if (probed_mod) 1522 if (probed_mod)
1410 module_put(probed_mod); 1523 module_put(probed_mod);
1411 1524
1412 return ret; 1525 return ret;
1413
1414cannot_probe:
1415 preempt_enable();
1416 jump_label_unlock();
1417 return ret;
1418} 1526}
1419EXPORT_SYMBOL_GPL(register_kprobe); 1527EXPORT_SYMBOL_GPL(register_kprobe);
1420 1528
@@ -1451,7 +1559,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1451 1559
1452 /* Try to disarm and disable this/parent probe */ 1560 /* Try to disarm and disable this/parent probe */
1453 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1561 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1454 disarm_kprobe(orig_p); 1562 disarm_kprobe(orig_p, true);
1455 orig_p->flags |= KPROBE_FLAG_DISABLED; 1563 orig_p->flags |= KPROBE_FLAG_DISABLED;
1456 } 1564 }
1457 } 1565 }
@@ -2049,10 +2157,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2049 2157
2050 if (!pp) 2158 if (!pp)
2051 pp = p; 2159 pp = p;
2052 seq_printf(pi, "%s%s%s\n", 2160 seq_printf(pi, "%s%s%s%s\n",
2053 (kprobe_gone(p) ? "[GONE]" : ""), 2161 (kprobe_gone(p) ? "[GONE]" : ""),
2054 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2162 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2055 (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); 2163 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2164 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2056} 2165}
2057 2166
2058static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2167static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -2131,14 +2240,12 @@ static void __kprobes arm_all_kprobes(void)
2131 goto already_enabled; 2240 goto already_enabled;
2132 2241
2133 /* Arming kprobes doesn't optimize kprobe itself */ 2242 /* Arming kprobes doesn't optimize kprobe itself */
2134 mutex_lock(&text_mutex);
2135 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2243 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2136 head = &kprobe_table[i]; 2244 head = &kprobe_table[i];
2137 hlist_for_each_entry_rcu(p, node, head, hlist) 2245 hlist_for_each_entry_rcu(p, node, head, hlist)
2138 if (!kprobe_disabled(p)) 2246 if (!kprobe_disabled(p))
2139 __arm_kprobe(p); 2247 arm_kprobe(p);
2140 } 2248 }
2141 mutex_unlock(&text_mutex);
2142 2249
2143 kprobes_all_disarmed = false; 2250 kprobes_all_disarmed = false;
2144 printk(KERN_INFO "Kprobes globally enabled\n"); 2251 printk(KERN_INFO "Kprobes globally enabled\n");
@@ -2166,15 +2273,13 @@ static void __kprobes disarm_all_kprobes(void)
2166 kprobes_all_disarmed = true; 2273 kprobes_all_disarmed = true;
2167 printk(KERN_INFO "Kprobes globally disabled\n"); 2274 printk(KERN_INFO "Kprobes globally disabled\n");
2168 2275
2169 mutex_lock(&text_mutex);
2170 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2276 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2171 head = &kprobe_table[i]; 2277 head = &kprobe_table[i];
2172 hlist_for_each_entry_rcu(p, node, head, hlist) { 2278 hlist_for_each_entry_rcu(p, node, head, hlist) {
2173 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2279 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2174 __disarm_kprobe(p, false); 2280 disarm_kprobe(p, false);
2175 } 2281 }
2176 } 2282 }
2177 mutex_unlock(&text_mutex);
2178 mutex_unlock(&kprobe_mutex); 2283 mutex_unlock(&kprobe_mutex);
2179 2284
2180 /* Wait for disarming all kprobes by optimizer */ 2285 /* Wait for disarming all kprobes by optimizer */