diff options
author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2012-06-05 06:28:26 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2012-07-31 10:29:57 -0400 |
commit | 25764288d8dc4792f0f487baf043ccfee5d8c2ba (patch) | |
tree | edf03ef2b8f12aee328f4cfadc6eebecdb174061 /kernel/kprobes.c | |
parent | f7fa6ef0ded995aad68650a877198f70e44b7621 (diff) |
kprobes: Move locks into appropriate functions
Break a big critical region into fine-grained pieces at
registering kprobe path. This helps us to solve circular
locking dependency when introducing ftrace-based kprobes.
Link: http://lkml.kernel.org/r/20120605102826.27845.81689.stgit@localhost.localdomain
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: "Frank Ch. Eigler" <fche@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 63 |
1 files changed, 42 insertions, 21 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6137fe32b4b8..9e47f44f3531 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -759,20 +759,28 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
759 | struct kprobe *ap; | 759 | struct kprobe *ap; |
760 | struct optimized_kprobe *op; | 760 | struct optimized_kprobe *op; |
761 | 761 | ||
762 | /* For preparing optimization, jump_label_text_reserved() is called */ | ||
763 | jump_label_lock(); | ||
764 | mutex_lock(&text_mutex); | ||
765 | |||
762 | ap = alloc_aggr_kprobe(p); | 766 | ap = alloc_aggr_kprobe(p); |
763 | if (!ap) | 767 | if (!ap) |
764 | return; | 768 | goto out; |
765 | 769 | ||
766 | op = container_of(ap, struct optimized_kprobe, kp); | 770 | op = container_of(ap, struct optimized_kprobe, kp); |
767 | if (!arch_prepared_optinsn(&op->optinsn)) { | 771 | if (!arch_prepared_optinsn(&op->optinsn)) { |
768 | /* If failed to setup optimizing, fallback to kprobe */ | 772 | /* If failed to setup optimizing, fallback to kprobe */ |
769 | arch_remove_optimized_kprobe(op); | 773 | arch_remove_optimized_kprobe(op); |
770 | kfree(op); | 774 | kfree(op); |
771 | return; | 775 | goto out; |
772 | } | 776 | } |
773 | 777 | ||
774 | init_aggr_kprobe(ap, p); | 778 | init_aggr_kprobe(ap, p); |
775 | optimize_kprobe(ap); | 779 | optimize_kprobe(ap); /* This just kicks optimizer thread */ |
780 | |||
781 | out: | ||
782 | mutex_unlock(&text_mutex); | ||
783 | jump_label_unlock(); | ||
776 | } | 784 | } |
777 | 785 | ||
778 | #ifdef CONFIG_SYSCTL | 786 | #ifdef CONFIG_SYSCTL |
@@ -1144,12 +1152,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
1144 | if (p->post_handler && !ap->post_handler) | 1152 | if (p->post_handler && !ap->post_handler) |
1145 | ap->post_handler = aggr_post_handler; | 1153 | ap->post_handler = aggr_post_handler; |
1146 | 1154 | ||
1147 | if (kprobe_disabled(ap) && !kprobe_disabled(p)) { | ||
1148 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
1149 | if (!kprobes_all_disarmed) | ||
1150 | /* Arm the breakpoint again. */ | ||
1151 | __arm_kprobe(ap); | ||
1152 | } | ||
1153 | return 0; | 1155 | return 0; |
1154 | } | 1156 | } |
1155 | 1157 | ||
@@ -1189,11 +1191,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1189 | int ret = 0; | 1191 | int ret = 0; |
1190 | struct kprobe *ap = orig_p; | 1192 | struct kprobe *ap = orig_p; |
1191 | 1193 | ||
1194 | /* For preparing optimization, jump_label_text_reserved() is called */ | ||
1195 | jump_label_lock(); | ||
1196 | /* | ||
1197 | * Get online CPUs to avoid text_mutex deadlock.with stop machine, | ||
1198 | * which is invoked by unoptimize_kprobe() in add_new_kprobe() | ||
1199 | */ | ||
1200 | get_online_cpus(); | ||
1201 | mutex_lock(&text_mutex); | ||
1202 | |||
1192 | if (!kprobe_aggrprobe(orig_p)) { | 1203 | if (!kprobe_aggrprobe(orig_p)) { |
1193 | /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ | 1204 | /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ |
1194 | ap = alloc_aggr_kprobe(orig_p); | 1205 | ap = alloc_aggr_kprobe(orig_p); |
1195 | if (!ap) | 1206 | if (!ap) { |
1196 | return -ENOMEM; | 1207 | ret = -ENOMEM; |
1208 | goto out; | ||
1209 | } | ||
1197 | init_aggr_kprobe(ap, orig_p); | 1210 | init_aggr_kprobe(ap, orig_p); |
1198 | } else if (kprobe_unused(ap)) | 1211 | } else if (kprobe_unused(ap)) |
1199 | /* This probe is going to die. Rescue it */ | 1212 | /* This probe is going to die. Rescue it */ |
@@ -1213,7 +1226,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1213 | * free aggr_probe. It will be used next time, or | 1226 | * free aggr_probe. It will be used next time, or |
1214 | * freed by unregister_kprobe. | 1227 | * freed by unregister_kprobe. |
1215 | */ | 1228 | */ |
1216 | return ret; | 1229 | goto out; |
1217 | 1230 | ||
1218 | /* Prepare optimized instructions if possible. */ | 1231 | /* Prepare optimized instructions if possible. */ |
1219 | prepare_optimized_kprobe(ap); | 1232 | prepare_optimized_kprobe(ap); |
@@ -1228,7 +1241,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1228 | 1241 | ||
1229 | /* Copy ap's insn slot to p */ | 1242 | /* Copy ap's insn slot to p */ |
1230 | copy_kprobe(ap, p); | 1243 | copy_kprobe(ap, p); |
1231 | return add_new_kprobe(ap, p); | 1244 | ret = add_new_kprobe(ap, p); |
1245 | |||
1246 | out: | ||
1247 | mutex_unlock(&text_mutex); | ||
1248 | put_online_cpus(); | ||
1249 | jump_label_unlock(); | ||
1250 | |||
1251 | if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { | ||
1252 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
1253 | if (!kprobes_all_disarmed) | ||
1254 | /* Arm the breakpoint again. */ | ||
1255 | arm_kprobe(ap); | ||
1256 | } | ||
1257 | return ret; | ||
1232 | } | 1258 | } |
1233 | 1259 | ||
1234 | static int __kprobes in_kprobes_functions(unsigned long addr) | 1260 | static int __kprobes in_kprobes_functions(unsigned long addr) |
@@ -1387,10 +1413,6 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1387 | return ret; | 1413 | return ret; |
1388 | 1414 | ||
1389 | mutex_lock(&kprobe_mutex); | 1415 | mutex_lock(&kprobe_mutex); |
1390 | jump_label_lock(); /* needed to call jump_label_text_reserved() */ | ||
1391 | |||
1392 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ | ||
1393 | mutex_lock(&text_mutex); | ||
1394 | 1416 | ||
1395 | old_p = get_kprobe(p->addr); | 1417 | old_p = get_kprobe(p->addr); |
1396 | if (old_p) { | 1418 | if (old_p) { |
@@ -1399,7 +1421,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1399 | goto out; | 1421 | goto out; |
1400 | } | 1422 | } |
1401 | 1423 | ||
1424 | mutex_lock(&text_mutex); /* Avoiding text modification */ | ||
1402 | ret = arch_prepare_kprobe(p); | 1425 | ret = arch_prepare_kprobe(p); |
1426 | mutex_unlock(&text_mutex); | ||
1403 | if (ret) | 1427 | if (ret) |
1404 | goto out; | 1428 | goto out; |
1405 | 1429 | ||
@@ -1408,15 +1432,12 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1408 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 1432 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
1409 | 1433 | ||
1410 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) | 1434 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) |
1411 | __arm_kprobe(p); | 1435 | arm_kprobe(p); |
1412 | 1436 | ||
1413 | /* Try to optimize kprobe */ | 1437 | /* Try to optimize kprobe */ |
1414 | try_to_optimize_kprobe(p); | 1438 | try_to_optimize_kprobe(p); |
1415 | 1439 | ||
1416 | out: | 1440 | out: |
1417 | mutex_unlock(&text_mutex); | ||
1418 | put_online_cpus(); | ||
1419 | jump_label_unlock(); | ||
1420 | mutex_unlock(&kprobe_mutex); | 1441 | mutex_unlock(&kprobe_mutex); |
1421 | 1442 | ||
1422 | if (probed_mod) | 1443 | if (probed_mod) |