aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/kprobes.c100
1 files changed, 75 insertions, 25 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index da2ccf142358..2d988141ab85 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -978,18 +978,36 @@ static int prepare_kprobe(struct kprobe *p)
978} 978}
979 979
980/* Caller must lock kprobe_mutex */ 980/* Caller must lock kprobe_mutex */
981static void arm_kprobe_ftrace(struct kprobe *p) 981static int arm_kprobe_ftrace(struct kprobe *p)
982{ 982{
983 int ret; 983 int ret = 0;
984 984
985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
986 (unsigned long)p->addr, 0, 0); 986 (unsigned long)p->addr, 0, 0);
987 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 987 if (ret) {
988 kprobe_ftrace_enabled++; 988 pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
989 if (kprobe_ftrace_enabled == 1) { 989 return ret;
990 }
991
992 if (kprobe_ftrace_enabled == 0) {
990 ret = register_ftrace_function(&kprobe_ftrace_ops); 993 ret = register_ftrace_function(&kprobe_ftrace_ops);
991 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 994 if (ret) {
995 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
996 goto err_ftrace;
997 }
992 } 998 }
999
1000 kprobe_ftrace_enabled++;
1001 return ret;
1002
1003err_ftrace:
1004 /*
1005 * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1006 * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1007 * empty filter_hash which would undesirably trace all functions.
1008 */
1009 ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
1010 return ret;
993} 1011}
994 1012
995/* Caller must lock kprobe_mutex */ 1013/* Caller must lock kprobe_mutex */
@@ -1008,22 +1026,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
1008} 1026}
1009#else /* !CONFIG_KPROBES_ON_FTRACE */ 1027#else /* !CONFIG_KPROBES_ON_FTRACE */
1010#define prepare_kprobe(p) arch_prepare_kprobe(p) 1028#define prepare_kprobe(p) arch_prepare_kprobe(p)
1011#define arm_kprobe_ftrace(p) do {} while (0) 1029#define arm_kprobe_ftrace(p) (-ENODEV)
1012#define disarm_kprobe_ftrace(p) do {} while (0) 1030#define disarm_kprobe_ftrace(p) do {} while (0)
1013#endif 1031#endif
1014 1032
1015/* Arm a kprobe with text_mutex */ 1033/* Arm a kprobe with text_mutex */
1016static void arm_kprobe(struct kprobe *kp) 1034static int arm_kprobe(struct kprobe *kp)
1017{ 1035{
1018 if (unlikely(kprobe_ftrace(kp))) { 1036 if (unlikely(kprobe_ftrace(kp)))
1019 arm_kprobe_ftrace(kp); 1037 return arm_kprobe_ftrace(kp);
1020 return; 1038
1021 }
1022 cpus_read_lock(); 1039 cpus_read_lock();
1023 mutex_lock(&text_mutex); 1040 mutex_lock(&text_mutex);
1024 __arm_kprobe(kp); 1041 __arm_kprobe(kp);
1025 mutex_unlock(&text_mutex); 1042 mutex_unlock(&text_mutex);
1026 cpus_read_unlock(); 1043 cpus_read_unlock();
1044
1045 return 0;
1027} 1046}
1028 1047
1029/* Disarm a kprobe with text_mutex */ 1048/* Disarm a kprobe with text_mutex */
@@ -1362,9 +1381,15 @@ out:
1362 1381
1363 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1382 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1364 ap->flags &= ~KPROBE_FLAG_DISABLED; 1383 ap->flags &= ~KPROBE_FLAG_DISABLED;
1365 if (!kprobes_all_disarmed) 1384 if (!kprobes_all_disarmed) {
1366 /* Arm the breakpoint again. */ 1385 /* Arm the breakpoint again. */
1367 arm_kprobe(ap); 1386 ret = arm_kprobe(ap);
1387 if (ret) {
1388 ap->flags |= KPROBE_FLAG_DISABLED;
1389 list_del_rcu(&p->list);
1390 synchronize_sched();
1391 }
1392 }
1368 } 1393 }
1369 return ret; 1394 return ret;
1370} 1395}
@@ -1573,8 +1598,14 @@ int register_kprobe(struct kprobe *p)
1573 hlist_add_head_rcu(&p->hlist, 1598 hlist_add_head_rcu(&p->hlist,
1574 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1599 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1575 1600
1576 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1601 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1577 arm_kprobe(p); 1602 ret = arm_kprobe(p);
1603 if (ret) {
1604 hlist_del_rcu(&p->hlist);
1605 synchronize_sched();
1606 goto out;
1607 }
1608 }
1578 1609
1579 /* Try to optimize kprobe */ 1610 /* Try to optimize kprobe */
1580 try_to_optimize_kprobe(p); 1611 try_to_optimize_kprobe(p);
@@ -2116,7 +2147,9 @@ int enable_kprobe(struct kprobe *kp)
2116 2147
2117 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2148 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2118 p->flags &= ~KPROBE_FLAG_DISABLED; 2149 p->flags &= ~KPROBE_FLAG_DISABLED;
2119 arm_kprobe(p); 2150 ret = arm_kprobe(p);
2151 if (ret)
2152 p->flags |= KPROBE_FLAG_DISABLED;
2120 } 2153 }
2121out: 2154out:
2122 mutex_unlock(&kprobe_mutex); 2155 mutex_unlock(&kprobe_mutex);
@@ -2407,11 +2440,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
2407 .release = seq_release, 2440 .release = seq_release,
2408}; 2441};
2409 2442
2410static void arm_all_kprobes(void) 2443static int arm_all_kprobes(void)
2411{ 2444{
2412 struct hlist_head *head; 2445 struct hlist_head *head;
2413 struct kprobe *p; 2446 struct kprobe *p;
2414 unsigned int i; 2447 unsigned int i, total = 0, errors = 0;
2448 int err, ret = 0;
2415 2449
2416 mutex_lock(&kprobe_mutex); 2450 mutex_lock(&kprobe_mutex);
2417 2451
@@ -2428,16 +2462,28 @@ static void arm_all_kprobes(void)
2428 /* Arming kprobes doesn't optimize kprobe itself */ 2462 /* Arming kprobes doesn't optimize kprobe itself */
2429 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2463 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2430 head = &kprobe_table[i]; 2464 head = &kprobe_table[i];
2431 hlist_for_each_entry_rcu(p, head, hlist) 2465 /* Arm all kprobes on a best-effort basis */
2432 if (!kprobe_disabled(p)) 2466 hlist_for_each_entry_rcu(p, head, hlist) {
2433 arm_kprobe(p); 2467 if (!kprobe_disabled(p)) {
2468 err = arm_kprobe(p);
2469 if (err) {
2470 errors++;
2471 ret = err;
2472 }
2473 total++;
2474 }
2475 }
2434 } 2476 }
2435 2477
2436 printk(KERN_INFO "Kprobes globally enabled\n"); 2478 if (errors)
2479 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2480 errors, total);
2481 else
2482 pr_info("Kprobes globally enabled\n");
2437 2483
2438already_enabled: 2484already_enabled:
2439 mutex_unlock(&kprobe_mutex); 2485 mutex_unlock(&kprobe_mutex);
2440 return; 2486 return ret;
2441} 2487}
2442 2488
2443static void disarm_all_kprobes(void) 2489static void disarm_all_kprobes(void)
@@ -2494,6 +2540,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
2494{ 2540{
2495 char buf[32]; 2541 char buf[32];
2496 size_t buf_size; 2542 size_t buf_size;
2543 int ret = 0;
2497 2544
2498 buf_size = min(count, (sizeof(buf)-1)); 2545 buf_size = min(count, (sizeof(buf)-1));
2499 if (copy_from_user(buf, user_buf, buf_size)) 2546 if (copy_from_user(buf, user_buf, buf_size))
@@ -2504,7 +2551,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
2504 case 'y': 2551 case 'y':
2505 case 'Y': 2552 case 'Y':
2506 case '1': 2553 case '1':
2507 arm_all_kprobes(); 2554 ret = arm_all_kprobes();
2508 break; 2555 break;
2509 case 'n': 2556 case 'n':
2510 case 'N': 2557 case 'N':
@@ -2515,6 +2562,9 @@ static ssize_t write_enabled_file_bool(struct file *file,
2515 return -EINVAL; 2562 return -EINVAL;
2516 } 2563 }
2517 2564
2565 if (ret)
2566 return ret;
2567
2518 return count; 2568 return count;
2519} 2569}
2520 2570