aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2014-04-17 04:17:54 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-24 04:03:01 -0400
commit55479f64756fc508182a05e35e52f01395a50d4d (patch)
tree7b54c148e68e74f38108edde74a2408c1d04fe2c /kernel/kprobes.c
parent7ec8a97a990da8e3ba87175a757731e17f74072e (diff)
kprobes: Allow probe on some kprobe functions
There is no need to prohibit probing on the functions used for preparation, registeration, optimization, controll etc. Those are safely probed because those are not invoked from breakpoint/fault/debug handlers, there is no chance to cause recursive exceptions. Following functions are now removed from the kprobes blacklist: add_new_kprobe aggr_kprobe_disabled alloc_aggr_kprobe alloc_aggr_kprobe arm_all_kprobes __arm_kprobe arm_kprobe arm_kprobe_ftrace check_kprobe_address_safe collect_garbage_slots collect_garbage_slots collect_one_slot debugfs_kprobe_init __disable_kprobe disable_kprobe disarm_all_kprobes __disarm_kprobe disarm_kprobe disarm_kprobe_ftrace do_free_cleaned_kprobes do_optimize_kprobes do_unoptimize_kprobes enable_kprobe force_unoptimize_kprobe free_aggr_kprobe free_aggr_kprobe __free_insn_slot __get_insn_slot get_optimized_kprobe __get_valid_kprobe init_aggr_kprobe init_aggr_kprobe in_nokprobe_functions kick_kprobe_optimizer kill_kprobe kill_optimized_kprobe kprobe_addr kprobe_optimizer kprobe_queued kprobe_seq_next kprobe_seq_start kprobe_seq_stop kprobes_module_callback kprobes_open optimize_all_kprobes optimize_kprobe prepare_kprobe prepare_optimized_kprobe register_aggr_kprobe register_jprobe register_jprobes register_kprobe register_kprobes register_kretprobe register_kretprobe register_kretprobes register_kretprobes report_probe show_kprobe_addr try_to_optimize_kprobe unoptimize_all_kprobes unoptimize_kprobe unregister_jprobe unregister_jprobes unregister_kprobe __unregister_kprobe_bottom unregister_kprobes __unregister_kprobe_top unregister_kretprobe unregister_kretprobe unregister_kretprobes unregister_kretprobes wait_for_kprobe_optimizer I tested those functions by putting kprobes on all instructions in the functions with the bash script I sent to LKML. See: https://lkml.org/lkml/2014/3/27/33 Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Link: http://lkml.kernel.org/r/20140417081753.26341.57889.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: fche@redhat.com Cc: systemtap@sourceware.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c153
1 files changed, 76 insertions, 77 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5ffc6875d2a7..4db2cc616f50 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -138,13 +138,13 @@ struct kprobe_insn_cache kprobe_insn_slots = {
138 .insn_size = MAX_INSN_SIZE, 138 .insn_size = MAX_INSN_SIZE,
139 .nr_garbage = 0, 139 .nr_garbage = 0,
140}; 140};
141static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); 141static int collect_garbage_slots(struct kprobe_insn_cache *c);
142 142
143/** 143/**
144 * __get_insn_slot() - Find a slot on an executable page for an instruction. 144 * __get_insn_slot() - Find a slot on an executable page for an instruction.
145 * We allocate an executable page if there's no room on existing ones. 145 * We allocate an executable page if there's no room on existing ones.
146 */ 146 */
147kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 147kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
148{ 148{
149 struct kprobe_insn_page *kip; 149 struct kprobe_insn_page *kip;
150 kprobe_opcode_t *slot = NULL; 150 kprobe_opcode_t *slot = NULL;
@@ -201,7 +201,7 @@ out:
201} 201}
202 202
203/* Return 1 if all garbages are collected, otherwise 0. */ 203/* Return 1 if all garbages are collected, otherwise 0. */
204static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 204static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
205{ 205{
206 kip->slot_used[idx] = SLOT_CLEAN; 206 kip->slot_used[idx] = SLOT_CLEAN;
207 kip->nused--; 207 kip->nused--;
@@ -222,7 +222,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
222 return 0; 222 return 0;
223} 223}
224 224
225static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) 225static int collect_garbage_slots(struct kprobe_insn_cache *c)
226{ 226{
227 struct kprobe_insn_page *kip, *next; 227 struct kprobe_insn_page *kip, *next;
228 228
@@ -244,8 +244,8 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
244 return 0; 244 return 0;
245} 245}
246 246
247void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 247void __free_insn_slot(struct kprobe_insn_cache *c,
248 kprobe_opcode_t *slot, int dirty) 248 kprobe_opcode_t *slot, int dirty)
249{ 249{
250 struct kprobe_insn_page *kip; 250 struct kprobe_insn_page *kip;
251 251
@@ -361,7 +361,7 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
361} 361}
362 362
363/* Free optimized instructions and optimized_kprobe */ 363/* Free optimized instructions and optimized_kprobe */
364static __kprobes void free_aggr_kprobe(struct kprobe *p) 364static void free_aggr_kprobe(struct kprobe *p)
365{ 365{
366 struct optimized_kprobe *op; 366 struct optimized_kprobe *op;
367 367
@@ -399,7 +399,7 @@ static inline int kprobe_disarmed(struct kprobe *p)
399} 399}
400 400
401/* Return true(!0) if the probe is queued on (un)optimizing lists */ 401/* Return true(!0) if the probe is queued on (un)optimizing lists */
402static int __kprobes kprobe_queued(struct kprobe *p) 402static int kprobe_queued(struct kprobe *p)
403{ 403{
404 struct optimized_kprobe *op; 404 struct optimized_kprobe *op;
405 405
@@ -415,7 +415,7 @@ static int __kprobes kprobe_queued(struct kprobe *p)
415 * Return an optimized kprobe whose optimizing code replaces 415 * Return an optimized kprobe whose optimizing code replaces
416 * instructions including addr (exclude breakpoint). 416 * instructions including addr (exclude breakpoint).
417 */ 417 */
418static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) 418static struct kprobe *get_optimized_kprobe(unsigned long addr)
419{ 419{
420 int i; 420 int i;
421 struct kprobe *p = NULL; 421 struct kprobe *p = NULL;
@@ -447,7 +447,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
447 * Optimize (replace a breakpoint with a jump) kprobes listed on 447 * Optimize (replace a breakpoint with a jump) kprobes listed on
448 * optimizing_list. 448 * optimizing_list.
449 */ 449 */
450static __kprobes void do_optimize_kprobes(void) 450static void do_optimize_kprobes(void)
451{ 451{
452 /* Optimization never be done when disarmed */ 452 /* Optimization never be done when disarmed */
453 if (kprobes_all_disarmed || !kprobes_allow_optimization || 453 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
@@ -475,7 +475,7 @@ static __kprobes void do_optimize_kprobes(void)
475 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 475 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
476 * if need) kprobes listed on unoptimizing_list. 476 * if need) kprobes listed on unoptimizing_list.
477 */ 477 */
478static __kprobes void do_unoptimize_kprobes(void) 478static void do_unoptimize_kprobes(void)
479{ 479{
480 struct optimized_kprobe *op, *tmp; 480 struct optimized_kprobe *op, *tmp;
481 481
@@ -507,7 +507,7 @@ static __kprobes void do_unoptimize_kprobes(void)
507} 507}
508 508
509/* Reclaim all kprobes on the free_list */ 509/* Reclaim all kprobes on the free_list */
510static __kprobes void do_free_cleaned_kprobes(void) 510static void do_free_cleaned_kprobes(void)
511{ 511{
512 struct optimized_kprobe *op, *tmp; 512 struct optimized_kprobe *op, *tmp;
513 513
@@ -519,13 +519,13 @@ static __kprobes void do_free_cleaned_kprobes(void)
519} 519}
520 520
521/* Start optimizer after OPTIMIZE_DELAY passed */ 521/* Start optimizer after OPTIMIZE_DELAY passed */
522static __kprobes void kick_kprobe_optimizer(void) 522static void kick_kprobe_optimizer(void)
523{ 523{
524 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 524 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
525} 525}
526 526
527/* Kprobe jump optimizer */ 527/* Kprobe jump optimizer */
528static __kprobes void kprobe_optimizer(struct work_struct *work) 528static void kprobe_optimizer(struct work_struct *work)
529{ 529{
530 mutex_lock(&kprobe_mutex); 530 mutex_lock(&kprobe_mutex);
531 /* Lock modules while optimizing kprobes */ 531 /* Lock modules while optimizing kprobes */
@@ -561,7 +561,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
561} 561}
562 562
563/* Wait for completing optimization and unoptimization */ 563/* Wait for completing optimization and unoptimization */
564static __kprobes void wait_for_kprobe_optimizer(void) 564static void wait_for_kprobe_optimizer(void)
565{ 565{
566 mutex_lock(&kprobe_mutex); 566 mutex_lock(&kprobe_mutex);
567 567
@@ -580,7 +580,7 @@ static __kprobes void wait_for_kprobe_optimizer(void)
580} 580}
581 581
582/* Optimize kprobe if p is ready to be optimized */ 582/* Optimize kprobe if p is ready to be optimized */
583static __kprobes void optimize_kprobe(struct kprobe *p) 583static void optimize_kprobe(struct kprobe *p)
584{ 584{
585 struct optimized_kprobe *op; 585 struct optimized_kprobe *op;
586 586
@@ -614,7 +614,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
614} 614}
615 615
616/* Short cut to direct unoptimizing */ 616/* Short cut to direct unoptimizing */
617static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) 617static void force_unoptimize_kprobe(struct optimized_kprobe *op)
618{ 618{
619 get_online_cpus(); 619 get_online_cpus();
620 arch_unoptimize_kprobe(op); 620 arch_unoptimize_kprobe(op);
@@ -624,7 +624,7 @@ static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
624} 624}
625 625
626/* Unoptimize a kprobe if p is optimized */ 626/* Unoptimize a kprobe if p is optimized */
627static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) 627static void unoptimize_kprobe(struct kprobe *p, bool force)
628{ 628{
629 struct optimized_kprobe *op; 629 struct optimized_kprobe *op;
630 630
@@ -684,7 +684,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
684} 684}
685 685
686/* Remove optimized instructions */ 686/* Remove optimized instructions */
687static void __kprobes kill_optimized_kprobe(struct kprobe *p) 687static void kill_optimized_kprobe(struct kprobe *p)
688{ 688{
689 struct optimized_kprobe *op; 689 struct optimized_kprobe *op;
690 690
@@ -710,7 +710,7 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
710} 710}
711 711
712/* Try to prepare optimized instructions */ 712/* Try to prepare optimized instructions */
713static __kprobes void prepare_optimized_kprobe(struct kprobe *p) 713static void prepare_optimized_kprobe(struct kprobe *p)
714{ 714{
715 struct optimized_kprobe *op; 715 struct optimized_kprobe *op;
716 716
@@ -719,7 +719,7 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
719} 719}
720 720
721/* Allocate new optimized_kprobe and try to prepare optimized instructions */ 721/* Allocate new optimized_kprobe and try to prepare optimized instructions */
722static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 722static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
723{ 723{
724 struct optimized_kprobe *op; 724 struct optimized_kprobe *op;
725 725
@@ -734,13 +734,13 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
734 return &op->kp; 734 return &op->kp;
735} 735}
736 736
737static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 737static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
738 738
739/* 739/*
740 * Prepare an optimized_kprobe and optimize it 740 * Prepare an optimized_kprobe and optimize it
741 * NOTE: p must be a normal registered kprobe 741 * NOTE: p must be a normal registered kprobe
742 */ 742 */
743static __kprobes void try_to_optimize_kprobe(struct kprobe *p) 743static void try_to_optimize_kprobe(struct kprobe *p)
744{ 744{
745 struct kprobe *ap; 745 struct kprobe *ap;
746 struct optimized_kprobe *op; 746 struct optimized_kprobe *op;
@@ -774,7 +774,7 @@ out:
774} 774}
775 775
776#ifdef CONFIG_SYSCTL 776#ifdef CONFIG_SYSCTL
777static void __kprobes optimize_all_kprobes(void) 777static void optimize_all_kprobes(void)
778{ 778{
779 struct hlist_head *head; 779 struct hlist_head *head;
780 struct kprobe *p; 780 struct kprobe *p;
@@ -797,7 +797,7 @@ out:
797 mutex_unlock(&kprobe_mutex); 797 mutex_unlock(&kprobe_mutex);
798} 798}
799 799
800static void __kprobes unoptimize_all_kprobes(void) 800static void unoptimize_all_kprobes(void)
801{ 801{
802 struct hlist_head *head; 802 struct hlist_head *head;
803 struct kprobe *p; 803 struct kprobe *p;
@@ -848,7 +848,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
848#endif /* CONFIG_SYSCTL */ 848#endif /* CONFIG_SYSCTL */
849 849
850/* Put a breakpoint for a probe. Must be called with text_mutex locked */ 850/* Put a breakpoint for a probe. Must be called with text_mutex locked */
851static void __kprobes __arm_kprobe(struct kprobe *p) 851static void __arm_kprobe(struct kprobe *p)
852{ 852{
853 struct kprobe *_p; 853 struct kprobe *_p;
854 854
@@ -863,7 +863,7 @@ static void __kprobes __arm_kprobe(struct kprobe *p)
863} 863}
864 864
865/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 865/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
866static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) 866static void __disarm_kprobe(struct kprobe *p, bool reopt)
867{ 867{
868 struct kprobe *_p; 868 struct kprobe *_p;
869 869
@@ -898,13 +898,13 @@ static void reuse_unused_kprobe(struct kprobe *ap)
898 BUG_ON(kprobe_unused(ap)); 898 BUG_ON(kprobe_unused(ap));
899} 899}
900 900
901static __kprobes void free_aggr_kprobe(struct kprobe *p) 901static void free_aggr_kprobe(struct kprobe *p)
902{ 902{
903 arch_remove_kprobe(p); 903 arch_remove_kprobe(p);
904 kfree(p); 904 kfree(p);
905} 905}
906 906
907static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 907static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
908{ 908{
909 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 909 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
910} 910}
@@ -918,7 +918,7 @@ static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
918static int kprobe_ftrace_enabled; 918static int kprobe_ftrace_enabled;
919 919
920/* Must ensure p->addr is really on ftrace */ 920/* Must ensure p->addr is really on ftrace */
921static int __kprobes prepare_kprobe(struct kprobe *p) 921static int prepare_kprobe(struct kprobe *p)
922{ 922{
923 if (!kprobe_ftrace(p)) 923 if (!kprobe_ftrace(p))
924 return arch_prepare_kprobe(p); 924 return arch_prepare_kprobe(p);
@@ -927,7 +927,7 @@ static int __kprobes prepare_kprobe(struct kprobe *p)
927} 927}
928 928
929/* Caller must lock kprobe_mutex */ 929/* Caller must lock kprobe_mutex */
930static void __kprobes arm_kprobe_ftrace(struct kprobe *p) 930static void arm_kprobe_ftrace(struct kprobe *p)
931{ 931{
932 int ret; 932 int ret;
933 933
@@ -942,7 +942,7 @@ static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
942} 942}
943 943
944/* Caller must lock kprobe_mutex */ 944/* Caller must lock kprobe_mutex */
945static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) 945static void disarm_kprobe_ftrace(struct kprobe *p)
946{ 946{
947 int ret; 947 int ret;
948 948
@@ -962,7 +962,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
962#endif 962#endif
963 963
964/* Arm a kprobe with text_mutex */ 964/* Arm a kprobe with text_mutex */
965static void __kprobes arm_kprobe(struct kprobe *kp) 965static void arm_kprobe(struct kprobe *kp)
966{ 966{
967 if (unlikely(kprobe_ftrace(kp))) { 967 if (unlikely(kprobe_ftrace(kp))) {
968 arm_kprobe_ftrace(kp); 968 arm_kprobe_ftrace(kp);
@@ -979,7 +979,7 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
979} 979}
980 980
981/* Disarm a kprobe with text_mutex */ 981/* Disarm a kprobe with text_mutex */
982static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt) 982static void disarm_kprobe(struct kprobe *kp, bool reopt)
983{ 983{
984 if (unlikely(kprobe_ftrace(kp))) { 984 if (unlikely(kprobe_ftrace(kp))) {
985 disarm_kprobe_ftrace(kp); 985 disarm_kprobe_ftrace(kp);
@@ -1189,7 +1189,7 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1189* Add the new probe to ap->list. Fail if this is the 1189* Add the new probe to ap->list. Fail if this is the
1190* second jprobe at the address - two jprobes can't coexist 1190* second jprobe at the address - two jprobes can't coexist
1191*/ 1191*/
1192static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1192static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1193{ 1193{
1194 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 1194 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1195 1195
@@ -1213,7 +1213,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1213 * Fill in the required fields of the "manager kprobe". Replace the 1213 * Fill in the required fields of the "manager kprobe". Replace the
1214 * earlier kprobe in the hlist with the manager kprobe 1214 * earlier kprobe in the hlist with the manager kprobe
1215 */ 1215 */
1216static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1216static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1217{ 1217{
1218 /* Copy p's insn slot to ap */ 1218 /* Copy p's insn slot to ap */
1219 copy_kprobe(p, ap); 1219 copy_kprobe(p, ap);
@@ -1239,8 +1239,7 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1239 * This is the second or subsequent kprobe at the address - handle 1239 * This is the second or subsequent kprobe at the address - handle
1240 * the intricacies 1240 * the intricacies
1241 */ 1241 */
1242static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, 1242static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1243 struct kprobe *p)
1244{ 1243{
1245 int ret = 0; 1244 int ret = 0;
1246 struct kprobe *ap = orig_p; 1245 struct kprobe *ap = orig_p;
@@ -1318,7 +1317,7 @@ bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1318 addr < (unsigned long)__kprobes_text_end; 1317 addr < (unsigned long)__kprobes_text_end;
1319} 1318}
1320 1319
1321static bool __kprobes within_kprobe_blacklist(unsigned long addr) 1320static bool within_kprobe_blacklist(unsigned long addr)
1322{ 1321{
1323 struct kprobe_blacklist_entry *ent; 1322 struct kprobe_blacklist_entry *ent;
1324 1323
@@ -1342,7 +1341,7 @@ static bool __kprobes within_kprobe_blacklist(unsigned long addr)
1342 * This returns encoded errors if it fails to look up symbol or invalid 1341 * This returns encoded errors if it fails to look up symbol or invalid
1343 * combination of parameters. 1342 * combination of parameters.
1344 */ 1343 */
1345static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 1344static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1346{ 1345{
1347 kprobe_opcode_t *addr = p->addr; 1346 kprobe_opcode_t *addr = p->addr;
1348 1347
@@ -1365,7 +1364,7 @@ invalid:
1365} 1364}
1366 1365
1367/* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1366/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1368static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 1367static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1369{ 1368{
1370 struct kprobe *ap, *list_p; 1369 struct kprobe *ap, *list_p;
1371 1370
@@ -1397,8 +1396,8 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1397 return ret; 1396 return ret;
1398} 1397}
1399 1398
1400static __kprobes int check_kprobe_address_safe(struct kprobe *p, 1399static int check_kprobe_address_safe(struct kprobe *p,
1401 struct module **probed_mod) 1400 struct module **probed_mod)
1402{ 1401{
1403 int ret = 0; 1402 int ret = 0;
1404 unsigned long ftrace_addr; 1403 unsigned long ftrace_addr;
@@ -1460,7 +1459,7 @@ out:
1460 return ret; 1459 return ret;
1461} 1460}
1462 1461
1463int __kprobes register_kprobe(struct kprobe *p) 1462int register_kprobe(struct kprobe *p)
1464{ 1463{
1465 int ret; 1464 int ret;
1466 struct kprobe *old_p; 1465 struct kprobe *old_p;
@@ -1522,7 +1521,7 @@ out:
1522EXPORT_SYMBOL_GPL(register_kprobe); 1521EXPORT_SYMBOL_GPL(register_kprobe);
1523 1522
1524/* Check if all probes on the aggrprobe are disabled */ 1523/* Check if all probes on the aggrprobe are disabled */
1525static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) 1524static int aggr_kprobe_disabled(struct kprobe *ap)
1526{ 1525{
1527 struct kprobe *kp; 1526 struct kprobe *kp;
1528 1527
@@ -1538,7 +1537,7 @@ static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1538} 1537}
1539 1538
1540/* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1539/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1541static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) 1540static struct kprobe *__disable_kprobe(struct kprobe *p)
1542{ 1541{
1543 struct kprobe *orig_p; 1542 struct kprobe *orig_p;
1544 1543
@@ -1565,7 +1564,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1565/* 1564/*
1566 * Unregister a kprobe without a scheduler synchronization. 1565 * Unregister a kprobe without a scheduler synchronization.
1567 */ 1566 */
1568static int __kprobes __unregister_kprobe_top(struct kprobe *p) 1567static int __unregister_kprobe_top(struct kprobe *p)
1569{ 1568{
1570 struct kprobe *ap, *list_p; 1569 struct kprobe *ap, *list_p;
1571 1570
@@ -1622,7 +1621,7 @@ disarmed:
1622 return 0; 1621 return 0;
1623} 1622}
1624 1623
1625static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 1624static void __unregister_kprobe_bottom(struct kprobe *p)
1626{ 1625{
1627 struct kprobe *ap; 1626 struct kprobe *ap;
1628 1627
@@ -1638,7 +1637,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1638 /* Otherwise, do nothing. */ 1637 /* Otherwise, do nothing. */
1639} 1638}
1640 1639
1641int __kprobes register_kprobes(struct kprobe **kps, int num) 1640int register_kprobes(struct kprobe **kps, int num)
1642{ 1641{
1643 int i, ret = 0; 1642 int i, ret = 0;
1644 1643
@@ -1656,13 +1655,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
1656} 1655}
1657EXPORT_SYMBOL_GPL(register_kprobes); 1656EXPORT_SYMBOL_GPL(register_kprobes);
1658 1657
1659void __kprobes unregister_kprobe(struct kprobe *p) 1658void unregister_kprobe(struct kprobe *p)
1660{ 1659{
1661 unregister_kprobes(&p, 1); 1660 unregister_kprobes(&p, 1);
1662} 1661}
1663EXPORT_SYMBOL_GPL(unregister_kprobe); 1662EXPORT_SYMBOL_GPL(unregister_kprobe);
1664 1663
1665void __kprobes unregister_kprobes(struct kprobe **kps, int num) 1664void unregister_kprobes(struct kprobe **kps, int num)
1666{ 1665{
1667 int i; 1666 int i;
1668 1667
@@ -1691,7 +1690,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
1691 return (unsigned long)entry; 1690 return (unsigned long)entry;
1692} 1691}
1693 1692
1694int __kprobes register_jprobes(struct jprobe **jps, int num) 1693int register_jprobes(struct jprobe **jps, int num)
1695{ 1694{
1696 struct jprobe *jp; 1695 struct jprobe *jp;
1697 int ret = 0, i; 1696 int ret = 0, i;
@@ -1722,19 +1721,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
1722} 1721}
1723EXPORT_SYMBOL_GPL(register_jprobes); 1722EXPORT_SYMBOL_GPL(register_jprobes);
1724 1723
1725int __kprobes register_jprobe(struct jprobe *jp) 1724int register_jprobe(struct jprobe *jp)
1726{ 1725{
1727 return register_jprobes(&jp, 1); 1726 return register_jprobes(&jp, 1);
1728} 1727}
1729EXPORT_SYMBOL_GPL(register_jprobe); 1728EXPORT_SYMBOL_GPL(register_jprobe);
1730 1729
1731void __kprobes unregister_jprobe(struct jprobe *jp) 1730void unregister_jprobe(struct jprobe *jp)
1732{ 1731{
1733 unregister_jprobes(&jp, 1); 1732 unregister_jprobes(&jp, 1);
1734} 1733}
1735EXPORT_SYMBOL_GPL(unregister_jprobe); 1734EXPORT_SYMBOL_GPL(unregister_jprobe);
1736 1735
1737void __kprobes unregister_jprobes(struct jprobe **jps, int num) 1736void unregister_jprobes(struct jprobe **jps, int num)
1738{ 1737{
1739 int i; 1738 int i;
1740 1739
@@ -1799,7 +1798,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1799 return 0; 1798 return 0;
1800} 1799}
1801 1800
1802int __kprobes register_kretprobe(struct kretprobe *rp) 1801int register_kretprobe(struct kretprobe *rp)
1803{ 1802{
1804 int ret = 0; 1803 int ret = 0;
1805 struct kretprobe_instance *inst; 1804 struct kretprobe_instance *inst;
@@ -1852,7 +1851,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1852} 1851}
1853EXPORT_SYMBOL_GPL(register_kretprobe); 1852EXPORT_SYMBOL_GPL(register_kretprobe);
1854 1853
1855int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1854int register_kretprobes(struct kretprobe **rps, int num)
1856{ 1855{
1857 int ret = 0, i; 1856 int ret = 0, i;
1858 1857
@@ -1870,13 +1869,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1870} 1869}
1871EXPORT_SYMBOL_GPL(register_kretprobes); 1870EXPORT_SYMBOL_GPL(register_kretprobes);
1872 1871
1873void __kprobes unregister_kretprobe(struct kretprobe *rp) 1872void unregister_kretprobe(struct kretprobe *rp)
1874{ 1873{
1875 unregister_kretprobes(&rp, 1); 1874 unregister_kretprobes(&rp, 1);
1876} 1875}
1877EXPORT_SYMBOL_GPL(unregister_kretprobe); 1876EXPORT_SYMBOL_GPL(unregister_kretprobe);
1878 1877
1879void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1878void unregister_kretprobes(struct kretprobe **rps, int num)
1880{ 1879{
1881 int i; 1880 int i;
1882 1881
@@ -1899,24 +1898,24 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1899EXPORT_SYMBOL_GPL(unregister_kretprobes); 1898EXPORT_SYMBOL_GPL(unregister_kretprobes);
1900 1899
1901#else /* CONFIG_KRETPROBES */ 1900#else /* CONFIG_KRETPROBES */
1902int __kprobes register_kretprobe(struct kretprobe *rp) 1901int register_kretprobe(struct kretprobe *rp)
1903{ 1902{
1904 return -ENOSYS; 1903 return -ENOSYS;
1905} 1904}
1906EXPORT_SYMBOL_GPL(register_kretprobe); 1905EXPORT_SYMBOL_GPL(register_kretprobe);
1907 1906
1908int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1907int register_kretprobes(struct kretprobe **rps, int num)
1909{ 1908{
1910 return -ENOSYS; 1909 return -ENOSYS;
1911} 1910}
1912EXPORT_SYMBOL_GPL(register_kretprobes); 1911EXPORT_SYMBOL_GPL(register_kretprobes);
1913 1912
1914void __kprobes unregister_kretprobe(struct kretprobe *rp) 1913void unregister_kretprobe(struct kretprobe *rp)
1915{ 1914{
1916} 1915}
1917EXPORT_SYMBOL_GPL(unregister_kretprobe); 1916EXPORT_SYMBOL_GPL(unregister_kretprobe);
1918 1917
1919void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1918void unregister_kretprobes(struct kretprobe **rps, int num)
1920{ 1919{
1921} 1920}
1922EXPORT_SYMBOL_GPL(unregister_kretprobes); 1921EXPORT_SYMBOL_GPL(unregister_kretprobes);
@@ -1930,7 +1929,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1930#endif /* CONFIG_KRETPROBES */ 1929#endif /* CONFIG_KRETPROBES */
1931 1930
1932/* Set the kprobe gone and remove its instruction buffer. */ 1931/* Set the kprobe gone and remove its instruction buffer. */
1933static void __kprobes kill_kprobe(struct kprobe *p) 1932static void kill_kprobe(struct kprobe *p)
1934{ 1933{
1935 struct kprobe *kp; 1934 struct kprobe *kp;
1936 1935
@@ -1954,7 +1953,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1954} 1953}
1955 1954
1956/* Disable one kprobe */ 1955/* Disable one kprobe */
1957int __kprobes disable_kprobe(struct kprobe *kp) 1956int disable_kprobe(struct kprobe *kp)
1958{ 1957{
1959 int ret = 0; 1958 int ret = 0;
1960 1959
@@ -1970,7 +1969,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
1970EXPORT_SYMBOL_GPL(disable_kprobe); 1969EXPORT_SYMBOL_GPL(disable_kprobe);
1971 1970
1972/* Enable one kprobe */ 1971/* Enable one kprobe */
1973int __kprobes enable_kprobe(struct kprobe *kp) 1972int enable_kprobe(struct kprobe *kp)
1974{ 1973{
1975 int ret = 0; 1974 int ret = 0;
1976 struct kprobe *p; 1975 struct kprobe *p;
@@ -2043,8 +2042,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
2043} 2042}
2044 2043
2045/* Module notifier call back, checking kprobes on the module */ 2044/* Module notifier call back, checking kprobes on the module */
2046static int __kprobes kprobes_module_callback(struct notifier_block *nb, 2045static int kprobes_module_callback(struct notifier_block *nb,
2047 unsigned long val, void *data) 2046 unsigned long val, void *data)
2048{ 2047{
2049 struct module *mod = data; 2048 struct module *mod = data;
2050 struct hlist_head *head; 2049 struct hlist_head *head;
@@ -2145,7 +2144,7 @@ static int __init init_kprobes(void)
2145} 2144}
2146 2145
2147#ifdef CONFIG_DEBUG_FS 2146#ifdef CONFIG_DEBUG_FS
2148static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 2147static void report_probe(struct seq_file *pi, struct kprobe *p,
2149 const char *sym, int offset, char *modname, struct kprobe *pp) 2148 const char *sym, int offset, char *modname, struct kprobe *pp)
2150{ 2149{
2151 char *kprobe_type; 2150 char *kprobe_type;
@@ -2174,12 +2173,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2174 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2173 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2175} 2174}
2176 2175
2177static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2176static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2178{ 2177{
2179 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2178 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2180} 2179}
2181 2180
2182static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2181static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2183{ 2182{
2184 (*pos)++; 2183 (*pos)++;
2185 if (*pos >= KPROBE_TABLE_SIZE) 2184 if (*pos >= KPROBE_TABLE_SIZE)
@@ -2187,12 +2186,12 @@ static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2187 return pos; 2186 return pos;
2188} 2187}
2189 2188
2190static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 2189static void kprobe_seq_stop(struct seq_file *f, void *v)
2191{ 2190{
2192 /* Nothing to do */ 2191 /* Nothing to do */
2193} 2192}
2194 2193
2195static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2194static int show_kprobe_addr(struct seq_file *pi, void *v)
2196{ 2195{
2197 struct hlist_head *head; 2196 struct hlist_head *head;
2198 struct kprobe *p, *kp; 2197 struct kprobe *p, *kp;
@@ -2223,7 +2222,7 @@ static const struct seq_operations kprobes_seq_ops = {
2223 .show = show_kprobe_addr 2222 .show = show_kprobe_addr
2224}; 2223};
2225 2224
2226static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 2225static int kprobes_open(struct inode *inode, struct file *filp)
2227{ 2226{
2228 return seq_open(filp, &kprobes_seq_ops); 2227 return seq_open(filp, &kprobes_seq_ops);
2229} 2228}
@@ -2235,7 +2234,7 @@ static const struct file_operations debugfs_kprobes_operations = {
2235 .release = seq_release, 2234 .release = seq_release,
2236}; 2235};
2237 2236
2238static void __kprobes arm_all_kprobes(void) 2237static void arm_all_kprobes(void)
2239{ 2238{
2240 struct hlist_head *head; 2239 struct hlist_head *head;
2241 struct kprobe *p; 2240 struct kprobe *p;
@@ -2263,7 +2262,7 @@ already_enabled:
2263 return; 2262 return;
2264} 2263}
2265 2264
2266static void __kprobes disarm_all_kprobes(void) 2265static void disarm_all_kprobes(void)
2267{ 2266{
2268 struct hlist_head *head; 2267 struct hlist_head *head;
2269 struct kprobe *p; 2268 struct kprobe *p;
@@ -2347,7 +2346,7 @@ static const struct file_operations fops_kp = {
2347 .llseek = default_llseek, 2346 .llseek = default_llseek,
2348}; 2347};
2349 2348
2350static int __kprobes debugfs_kprobe_init(void) 2349static int __init debugfs_kprobe_init(void)
2351{ 2350{
2352 struct dentry *dir, *file; 2351 struct dentry *dir, *file;
2353 unsigned int value = 1; 2352 unsigned int value = 1;