aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c247
1 files changed, 176 insertions, 71 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c62b8546cc90..098f396aa409 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
561{ 561{
562 LIST_HEAD(free_list); 562 LIST_HEAD(free_list);
563 563
564 mutex_lock(&kprobe_mutex);
564 /* Lock modules while optimizing kprobes */ 565 /* Lock modules while optimizing kprobes */
565 mutex_lock(&module_mutex); 566 mutex_lock(&module_mutex);
566 mutex_lock(&kprobe_mutex);
567 567
568 /* 568 /*
569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
586 /* Step 4: Free cleaned kprobes after quiesence period */ 586 /* Step 4: Free cleaned kprobes after quiesence period */
587 do_free_cleaned_kprobes(&free_list); 587 do_free_cleaned_kprobes(&free_list);
588 588
589 mutex_unlock(&kprobe_mutex);
590 mutex_unlock(&module_mutex); 589 mutex_unlock(&module_mutex);
590 mutex_unlock(&kprobe_mutex);
591 591
592 /* Step 5: Kick optimizer again if needed */ 592 /* Step 5: Kick optimizer again if needed */
593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
759 struct kprobe *ap; 759 struct kprobe *ap;
760 struct optimized_kprobe *op; 760 struct optimized_kprobe *op;
761 761
762 /* Impossible to optimize ftrace-based kprobe */
763 if (kprobe_ftrace(p))
764 return;
765
766 /* For preparing optimization, jump_label_text_reserved() is called */
767 jump_label_lock();
768 mutex_lock(&text_mutex);
769
762 ap = alloc_aggr_kprobe(p); 770 ap = alloc_aggr_kprobe(p);
763 if (!ap) 771 if (!ap)
764 return; 772 goto out;
765 773
766 op = container_of(ap, struct optimized_kprobe, kp); 774 op = container_of(ap, struct optimized_kprobe, kp);
767 if (!arch_prepared_optinsn(&op->optinsn)) { 775 if (!arch_prepared_optinsn(&op->optinsn)) {
768 /* If failed to setup optimizing, fallback to kprobe */ 776 /* If failed to setup optimizing, fallback to kprobe */
769 arch_remove_optimized_kprobe(op); 777 arch_remove_optimized_kprobe(op);
770 kfree(op); 778 kfree(op);
771 return; 779 goto out;
772 } 780 }
773 781
774 init_aggr_kprobe(ap, p); 782 init_aggr_kprobe(ap, p);
775 optimize_kprobe(ap); 783 optimize_kprobe(ap); /* This just kicks optimizer thread */
784
785out:
786 mutex_unlock(&text_mutex);
787 jump_label_unlock();
776} 788}
777 789
778#ifdef CONFIG_SYSCTL 790#ifdef CONFIG_SYSCTL
@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
907} 919}
908#endif /* CONFIG_OPTPROBES */ 920#endif /* CONFIG_OPTPROBES */
909 921
922#ifdef KPROBES_CAN_USE_FTRACE
923static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924 .func = kprobe_ftrace_handler,
925 .flags = FTRACE_OPS_FL_SAVE_REGS,
926};
927static int kprobe_ftrace_enabled;
928
929/* Must ensure p->addr is really on ftrace */
930static int __kprobes prepare_kprobe(struct kprobe *p)
931{
932 if (!kprobe_ftrace(p))
933 return arch_prepare_kprobe(p);
934
935 return arch_prepare_kprobe_ftrace(p);
936}
937
938/* Caller must lock kprobe_mutex */
939static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
940{
941 int ret;
942
943 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
944 (unsigned long)p->addr, 0, 0);
945 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
946 kprobe_ftrace_enabled++;
947 if (kprobe_ftrace_enabled == 1) {
948 ret = register_ftrace_function(&kprobe_ftrace_ops);
949 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
950 }
951}
952
953/* Caller must lock kprobe_mutex */
954static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
955{
956 int ret;
957
958 kprobe_ftrace_enabled--;
959 if (kprobe_ftrace_enabled == 0) {
960 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
961 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
962 }
963 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
964 (unsigned long)p->addr, 1, 0);
965 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966}
967#else /* !KPROBES_CAN_USE_FTRACE */
968#define prepare_kprobe(p) arch_prepare_kprobe(p)
969#define arm_kprobe_ftrace(p) do {} while (0)
970#define disarm_kprobe_ftrace(p) do {} while (0)
971#endif
972
910/* Arm a kprobe with text_mutex */ 973/* Arm a kprobe with text_mutex */
911static void __kprobes arm_kprobe(struct kprobe *kp) 974static void __kprobes arm_kprobe(struct kprobe *kp)
912{ 975{
976 if (unlikely(kprobe_ftrace(kp))) {
977 arm_kprobe_ftrace(kp);
978 return;
979 }
913 /* 980 /*
914 * Here, since __arm_kprobe() doesn't use stop_machine(), 981 * Here, since __arm_kprobe() doesn't use stop_machine(),
915 * this doesn't cause deadlock on text_mutex. So, we don't 982 * this doesn't cause deadlock on text_mutex. So, we don't
@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
921} 988}
922 989
923/* Disarm a kprobe with text_mutex */ 990/* Disarm a kprobe with text_mutex */
924static void __kprobes disarm_kprobe(struct kprobe *kp) 991static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
925{ 992{
993 if (unlikely(kprobe_ftrace(kp))) {
994 disarm_kprobe_ftrace(kp);
995 return;
996 }
926 /* Ditto */ 997 /* Ditto */
927 mutex_lock(&text_mutex); 998 mutex_lock(&text_mutex);
928 __disarm_kprobe(kp, true); 999 __disarm_kprobe(kp, reopt);
929 mutex_unlock(&text_mutex); 1000 mutex_unlock(&text_mutex);
930} 1001}
931 1002
@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1144 if (p->post_handler && !ap->post_handler) 1215 if (p->post_handler && !ap->post_handler)
1145 ap->post_handler = aggr_post_handler; 1216 ap->post_handler = aggr_post_handler;
1146 1217
1147 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
1148 ap->flags &= ~KPROBE_FLAG_DISABLED;
1149 if (!kprobes_all_disarmed)
1150 /* Arm the breakpoint again. */
1151 __arm_kprobe(ap);
1152 }
1153 return 0; 1218 return 0;
1154} 1219}
1155 1220
@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1189 int ret = 0; 1254 int ret = 0;
1190 struct kprobe *ap = orig_p; 1255 struct kprobe *ap = orig_p;
1191 1256
1257 /* For preparing optimization, jump_label_text_reserved() is called */
1258 jump_label_lock();
1259 /*
1260 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1261 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1262 */
1263 get_online_cpus();
1264 mutex_lock(&text_mutex);
1265
1192 if (!kprobe_aggrprobe(orig_p)) { 1266 if (!kprobe_aggrprobe(orig_p)) {
1193 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1267 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1194 ap = alloc_aggr_kprobe(orig_p); 1268 ap = alloc_aggr_kprobe(orig_p);
1195 if (!ap) 1269 if (!ap) {
1196 return -ENOMEM; 1270 ret = -ENOMEM;
1271 goto out;
1272 }
1197 init_aggr_kprobe(ap, orig_p); 1273 init_aggr_kprobe(ap, orig_p);
1198 } else if (kprobe_unused(ap)) 1274 } else if (kprobe_unused(ap))
1199 /* This probe is going to die. Rescue it */ 1275 /* This probe is going to die. Rescue it */
@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1213 * free aggr_probe. It will be used next time, or 1289 * free aggr_probe. It will be used next time, or
1214 * freed by unregister_kprobe. 1290 * freed by unregister_kprobe.
1215 */ 1291 */
1216 return ret; 1292 goto out;
1217 1293
1218 /* Prepare optimized instructions if possible. */ 1294 /* Prepare optimized instructions if possible. */
1219 prepare_optimized_kprobe(ap); 1295 prepare_optimized_kprobe(ap);
@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1228 1304
1229 /* Copy ap's insn slot to p */ 1305 /* Copy ap's insn slot to p */
1230 copy_kprobe(ap, p); 1306 copy_kprobe(ap, p);
1231 return add_new_kprobe(ap, p); 1307 ret = add_new_kprobe(ap, p);
1308
1309out:
1310 mutex_unlock(&text_mutex);
1311 put_online_cpus();
1312 jump_label_unlock();
1313
1314 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1315 ap->flags &= ~KPROBE_FLAG_DISABLED;
1316 if (!kprobes_all_disarmed)
1317 /* Arm the breakpoint again. */
1318 arm_kprobe(ap);
1319 }
1320 return ret;
1232} 1321}
1233 1322
1234static int __kprobes in_kprobes_functions(unsigned long addr) 1323static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -1313,71 +1402,96 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1313 return ret; 1402 return ret;
1314} 1403}
1315 1404
1316int __kprobes register_kprobe(struct kprobe *p) 1405static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1406 struct module **probed_mod)
1317{ 1407{
1318 int ret = 0; 1408 int ret = 0;
1319 struct kprobe *old_p; 1409 unsigned long ftrace_addr;
1320 struct module *probed_mod;
1321 kprobe_opcode_t *addr;
1322
1323 addr = kprobe_addr(p);
1324 if (IS_ERR(addr))
1325 return PTR_ERR(addr);
1326 p->addr = addr;
1327 1410
1328 ret = check_kprobe_rereg(p); 1411 /*
1329 if (ret) 1412 * If the address is located on a ftrace nop, set the
1330 return ret; 1413 * breakpoint to the following instruction.
1414 */
1415 ftrace_addr = ftrace_location((unsigned long)p->addr);
1416 if (ftrace_addr) {
1417#ifdef KPROBES_CAN_USE_FTRACE
1418 /* Given address is not on the instruction boundary */
1419 if ((unsigned long)p->addr != ftrace_addr)
1420 return -EILSEQ;
1421 p->flags |= KPROBE_FLAG_FTRACE;
1422#else /* !KPROBES_CAN_USE_FTRACE */
1423 return -EINVAL;
1424#endif
1425 }
1331 1426
1332 jump_label_lock(); 1427 jump_label_lock();
1333 preempt_disable(); 1428 preempt_disable();
1429
1430 /* Ensure it is not in reserved area nor out of text */
1334 if (!kernel_text_address((unsigned long) p->addr) || 1431 if (!kernel_text_address((unsigned long) p->addr) ||
1335 in_kprobes_functions((unsigned long) p->addr) || 1432 in_kprobes_functions((unsigned long) p->addr) ||
1336 ftrace_text_reserved(p->addr, p->addr) ||
1337 jump_label_text_reserved(p->addr, p->addr)) { 1433 jump_label_text_reserved(p->addr, p->addr)) {
1338 ret = -EINVAL; 1434 ret = -EINVAL;
1339 goto cannot_probe; 1435 goto out;
1340 } 1436 }
1341 1437
1342 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1438 /* Check if are we probing a module */
1343 p->flags &= KPROBE_FLAG_DISABLED; 1439 *probed_mod = __module_text_address((unsigned long) p->addr);
1344 1440 if (*probed_mod) {
1345 /*
1346 * Check if are we probing a module.
1347 */
1348 probed_mod = __module_text_address((unsigned long) p->addr);
1349 if (probed_mod) {
1350 /* Return -ENOENT if fail. */
1351 ret = -ENOENT;
1352 /* 1441 /*
1353 * We must hold a refcount of the probed module while updating 1442 * We must hold a refcount of the probed module while updating
1354 * its code to prohibit unexpected unloading. 1443 * its code to prohibit unexpected unloading.
1355 */ 1444 */
1356 if (unlikely(!try_module_get(probed_mod))) 1445 if (unlikely(!try_module_get(*probed_mod))) {
1357 goto cannot_probe; 1446 ret = -ENOENT;
1447 goto out;
1448 }
1358 1449
1359 /* 1450 /*
1360 * If the module freed .init.text, we couldn't insert 1451 * If the module freed .init.text, we couldn't insert
1361 * kprobes in there. 1452 * kprobes in there.
1362 */ 1453 */
1363 if (within_module_init((unsigned long)p->addr, probed_mod) && 1454 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1364 probed_mod->state != MODULE_STATE_COMING) { 1455 (*probed_mod)->state != MODULE_STATE_COMING) {
1365 module_put(probed_mod); 1456 module_put(*probed_mod);
1366 goto cannot_probe; 1457 *probed_mod = NULL;
1458 ret = -ENOENT;
1367 } 1459 }
1368 /* ret will be updated by following code */
1369 } 1460 }
1461out:
1370 preempt_enable(); 1462 preempt_enable();
1371 jump_label_unlock(); 1463 jump_label_unlock();
1372 1464
1465 return ret;
1466}
1467
1468int __kprobes register_kprobe(struct kprobe *p)
1469{
1470 int ret;
1471 struct kprobe *old_p;
1472 struct module *probed_mod;
1473 kprobe_opcode_t *addr;
1474
1475 /* Adjust probe address from symbol */
1476 addr = kprobe_addr(p);
1477 if (IS_ERR(addr))
1478 return PTR_ERR(addr);
1479 p->addr = addr;
1480
1481 ret = check_kprobe_rereg(p);
1482 if (ret)
1483 return ret;
1484
1485 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1486 p->flags &= KPROBE_FLAG_DISABLED;
1373 p->nmissed = 0; 1487 p->nmissed = 0;
1374 INIT_LIST_HEAD(&p->list); 1488 INIT_LIST_HEAD(&p->list);
1375 mutex_lock(&kprobe_mutex);
1376 1489
1377 jump_label_lock(); /* needed to call jump_label_text_reserved() */ 1490 ret = check_kprobe_address_safe(p, &probed_mod);
1491 if (ret)
1492 return ret;
1378 1493
1379 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1494 mutex_lock(&kprobe_mutex);
1380 mutex_lock(&text_mutex);
1381 1495
1382 old_p = get_kprobe(p->addr); 1496 old_p = get_kprobe(p->addr);
1383 if (old_p) { 1497 if (old_p) {
@@ -1386,7 +1500,9 @@ int __kprobes register_kprobe(struct kprobe *p)
1386 goto out; 1500 goto out;
1387 } 1501 }
1388 1502
1389 ret = arch_prepare_kprobe(p); 1503 mutex_lock(&text_mutex); /* Avoiding text modification */
1504 ret = prepare_kprobe(p);
1505 mutex_unlock(&text_mutex);
1390 if (ret) 1506 if (ret)
1391 goto out; 1507 goto out;
1392 1508
@@ -1395,26 +1511,18 @@ int __kprobes register_kprobe(struct kprobe *p)
1395 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1511 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1396 1512
1397 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1513 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1398 __arm_kprobe(p); 1514 arm_kprobe(p);
1399 1515
1400 /* Try to optimize kprobe */ 1516 /* Try to optimize kprobe */
1401 try_to_optimize_kprobe(p); 1517 try_to_optimize_kprobe(p);
1402 1518
1403out: 1519out:
1404 mutex_unlock(&text_mutex);
1405 put_online_cpus();
1406 jump_label_unlock();
1407 mutex_unlock(&kprobe_mutex); 1520 mutex_unlock(&kprobe_mutex);
1408 1521
1409 if (probed_mod) 1522 if (probed_mod)
1410 module_put(probed_mod); 1523 module_put(probed_mod);
1411 1524
1412 return ret; 1525 return ret;
1413
1414cannot_probe:
1415 preempt_enable();
1416 jump_label_unlock();
1417 return ret;
1418} 1526}
1419EXPORT_SYMBOL_GPL(register_kprobe); 1527EXPORT_SYMBOL_GPL(register_kprobe);
1420 1528
@@ -1451,7 +1559,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1451 1559
1452 /* Try to disarm and disable this/parent probe */ 1560 /* Try to disarm and disable this/parent probe */
1453 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1561 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1454 disarm_kprobe(orig_p); 1562 disarm_kprobe(orig_p, true);
1455 orig_p->flags |= KPROBE_FLAG_DISABLED; 1563 orig_p->flags |= KPROBE_FLAG_DISABLED;
1456 } 1564 }
1457 } 1565 }
@@ -2049,10 +2157,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2049 2157
2050 if (!pp) 2158 if (!pp)
2051 pp = p; 2159 pp = p;
2052 seq_printf(pi, "%s%s%s\n", 2160 seq_printf(pi, "%s%s%s%s\n",
2053 (kprobe_gone(p) ? "[GONE]" : ""), 2161 (kprobe_gone(p) ? "[GONE]" : ""),
2054 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2162 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2055 (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); 2163 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2164 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2056} 2165}
2057 2166
2058static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2167static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -2131,14 +2240,12 @@ static void __kprobes arm_all_kprobes(void)
2131 goto already_enabled; 2240 goto already_enabled;
2132 2241
2133 /* Arming kprobes doesn't optimize kprobe itself */ 2242 /* Arming kprobes doesn't optimize kprobe itself */
2134 mutex_lock(&text_mutex);
2135 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2243 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2136 head = &kprobe_table[i]; 2244 head = &kprobe_table[i];
2137 hlist_for_each_entry_rcu(p, node, head, hlist) 2245 hlist_for_each_entry_rcu(p, node, head, hlist)
2138 if (!kprobe_disabled(p)) 2246 if (!kprobe_disabled(p))
2139 __arm_kprobe(p); 2247 arm_kprobe(p);
2140 } 2248 }
2141 mutex_unlock(&text_mutex);
2142 2249
2143 kprobes_all_disarmed = false; 2250 kprobes_all_disarmed = false;
2144 printk(KERN_INFO "Kprobes globally enabled\n"); 2251 printk(KERN_INFO "Kprobes globally enabled\n");
@@ -2166,15 +2273,13 @@ static void __kprobes disarm_all_kprobes(void)
2166 kprobes_all_disarmed = true; 2273 kprobes_all_disarmed = true;
2167 printk(KERN_INFO "Kprobes globally disabled\n"); 2274 printk(KERN_INFO "Kprobes globally disabled\n");
2168 2275
2169 mutex_lock(&text_mutex);
2170 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2276 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2171 head = &kprobe_table[i]; 2277 head = &kprobe_table[i];
2172 hlist_for_each_entry_rcu(p, node, head, hlist) { 2278 hlist_for_each_entry_rcu(p, node, head, hlist) {
2173 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2279 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2174 __disarm_kprobe(p, false); 2280 disarm_kprobe(p, false);
2175 } 2281 }
2176 } 2282 }
2177 mutex_unlock(&text_mutex);
2178 mutex_unlock(&kprobe_mutex); 2283 mutex_unlock(&kprobe_mutex);
2179 2284
2180 /* Wait for disarming all kprobes by optimizer */ 2285 /* Wait for disarming all kprobes by optimizer */