summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 22:18:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 22:18:49 -0400
commit3737a12761636ebde0f09ef49daebb8eed18cc8a (patch)
tree965057f4bccd97049f8c0140f8670c5d4278ca3e /kernel
parentc29deef32e3699e40da3e9e82267610de04e6b54 (diff)
parent82b897782d10fcc4930c9d4a15b175348fdd2871 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull more perf updates from Ingo Molnar: "A second round of perf updates: - wide reaching kprobes sanitization and robustization, with the hope of fixing all 'probe this function crashes the kernel' bugs, by Masami Hiramatsu. - uprobes updates from Oleg Nesterov: tmpfs support, corner case fixes and robustization work. - perf tooling updates and fixes from Jiri Olsa, Namhyung Ki, Arnaldo et al: * Add support to accumulate hist periods (Namhyung Kim) * various fixes, refactorings and enhancements" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (101 commits) perf: Differentiate exec() and non-exec() comm events perf: Fix perf_event_comm() vs. exec() assumption uprobes/x86: Rename arch_uprobe->def to ->defparam, minor comment updates perf/documentation: Add description for conditional branch filter perf/x86: Add conditional branch filtering support perf/tool: Add conditional branch filter 'cond' to perf record perf: Add new conditional branch filter 'PERF_SAMPLE_BRANCH_COND' uprobes: Teach copy_insn() to support tmpfs uprobes: Shift ->readpage check from __copy_insn() to uprobe_register() perf/x86: Use common PMU interrupt disabled code perf/ARM: Use common PMU interrupt disabled code perf: Disable sampled events if no PMU interrupt perf: Fix use after free in perf_remove_from_context() perf tools: Fix 'make help' message error perf record: Fix poll return value propagation perf tools: Move elide bool into perf_hpp_fmt struct perf tools: Remove elide setup for SORT_MODE__MEMORY mode perf tools: Fix "==" into "=" in ui_browser__warning assignment perf tools: Allow overriding sysfs and proc finding with env var perf tools: Consider header files outside perf directory in tags target ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c43
-rw-r--r--kernel/events/uprobes.c52
-rw-r--r--kernel/kprobes.c392
-rw-r--r--kernel/notifier.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/trace/trace_event_perf.c5
-rw-r--r--kernel/trace/trace_kprobe.c71
-rw-r--r--kernel/trace/trace_probe.c65
-rw-r--r--kernel/trace/trace_probe.h15
-rw-r--r--kernel/trace/trace_uprobe.c66
10 files changed, 428 insertions, 310 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 24d35cc38e42..5fa58e4cffac 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2974,6 +2974,22 @@ out:
2974 local_irq_restore(flags); 2974 local_irq_restore(flags);
2975} 2975}
2976 2976
2977void perf_event_exec(void)
2978{
2979 struct perf_event_context *ctx;
2980 int ctxn;
2981
2982 rcu_read_lock();
2983 for_each_task_context_nr(ctxn) {
2984 ctx = current->perf_event_ctxp[ctxn];
2985 if (!ctx)
2986 continue;
2987
2988 perf_event_enable_on_exec(ctx);
2989 }
2990 rcu_read_unlock();
2991}
2992
2977/* 2993/*
2978 * Cross CPU call to read the hardware event 2994 * Cross CPU call to read the hardware event
2979 */ 2995 */
@@ -5075,21 +5091,9 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
5075 NULL); 5091 NULL);
5076} 5092}
5077 5093
5078void perf_event_comm(struct task_struct *task) 5094void perf_event_comm(struct task_struct *task, bool exec)
5079{ 5095{
5080 struct perf_comm_event comm_event; 5096 struct perf_comm_event comm_event;
5081 struct perf_event_context *ctx;
5082 int ctxn;
5083
5084 rcu_read_lock();
5085 for_each_task_context_nr(ctxn) {
5086 ctx = task->perf_event_ctxp[ctxn];
5087 if (!ctx)
5088 continue;
5089
5090 perf_event_enable_on_exec(ctx);
5091 }
5092 rcu_read_unlock();
5093 5097
5094 if (!atomic_read(&nr_comm_events)) 5098 if (!atomic_read(&nr_comm_events))
5095 return; 5099 return;
@@ -5101,7 +5105,7 @@ void perf_event_comm(struct task_struct *task)
5101 .event_id = { 5105 .event_id = {
5102 .header = { 5106 .header = {
5103 .type = PERF_RECORD_COMM, 5107 .type = PERF_RECORD_COMM,
5104 .misc = 0, 5108 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
5105 /* .size */ 5109 /* .size */
5106 }, 5110 },
5107 /* .pid */ 5111 /* .pid */
@@ -7122,6 +7126,13 @@ SYSCALL_DEFINE5(perf_event_open,
7122 } 7126 }
7123 } 7127 }
7124 7128
7129 if (is_sampling_event(event)) {
7130 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
7131 err = -ENOTSUPP;
7132 goto err_alloc;
7133 }
7134 }
7135
7125 account_event(event); 7136 account_event(event);
7126 7137
7127 /* 7138 /*
@@ -7433,7 +7444,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7433 7444
7434static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7445static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7435{ 7446{
7436 struct perf_event *child_event; 7447 struct perf_event *child_event, *next;
7437 struct perf_event_context *child_ctx; 7448 struct perf_event_context *child_ctx;
7438 unsigned long flags; 7449 unsigned long flags;
7439 7450
@@ -7487,7 +7498,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7487 */ 7498 */
7488 mutex_lock(&child_ctx->mutex); 7499 mutex_lock(&child_ctx->mutex);
7489 7500
7490 list_for_each_entry_rcu(child_event, &child_ctx->event_list, event_entry) 7501 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
7491 __perf_event_exit_task(child_event, child_ctx, child); 7502 __perf_event_exit_task(child_event, child_ctx, child);
7492 7503
7493 mutex_unlock(&child_ctx->mutex); 7504 mutex_unlock(&child_ctx->mutex);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index adcd76a96839..c445e392e93f 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -36,6 +36,7 @@
36#include "../../mm/internal.h" /* munlock_vma_page */ 36#include "../../mm/internal.h" /* munlock_vma_page */
37#include <linux/percpu-rwsem.h> 37#include <linux/percpu-rwsem.h>
38#include <linux/task_work.h> 38#include <linux/task_work.h>
39#include <linux/shmem_fs.h>
39 40
40#include <linux/uprobes.h> 41#include <linux/uprobes.h>
41 42
@@ -127,7 +128,7 @@ struct xol_area {
127 */ 128 */
128static bool valid_vma(struct vm_area_struct *vma, bool is_register) 129static bool valid_vma(struct vm_area_struct *vma, bool is_register)
129{ 130{
130 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED; 131 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
131 132
132 if (is_register) 133 if (is_register)
133 flags |= VM_WRITE; 134 flags |= VM_WRITE;
@@ -279,18 +280,13 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t
279 * supported by that architecture then we need to modify is_trap_at_addr and 280 * supported by that architecture then we need to modify is_trap_at_addr and
280 * uprobe_write_opcode accordingly. This would never be a problem for archs 281 * uprobe_write_opcode accordingly. This would never be a problem for archs
281 * that have fixed length instructions. 282 * that have fixed length instructions.
282 */ 283 *
283
284/*
285 * uprobe_write_opcode - write the opcode at a given virtual address. 284 * uprobe_write_opcode - write the opcode at a given virtual address.
286 * @mm: the probed process address space. 285 * @mm: the probed process address space.
287 * @vaddr: the virtual address to store the opcode. 286 * @vaddr: the virtual address to store the opcode.
288 * @opcode: opcode to be written at @vaddr. 287 * @opcode: opcode to be written at @vaddr.
289 * 288 *
290 * Called with mm->mmap_sem held (for read and with a reference to 289 * Called with mm->mmap_sem held for write.
291 * mm).
292 *
293 * For mm @mm, write the opcode at @vaddr.
294 * Return 0 (success) or a negative errno. 290 * Return 0 (success) or a negative errno.
295 */ 291 */
296int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, 292int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
@@ -310,21 +306,25 @@ retry:
310 if (ret <= 0) 306 if (ret <= 0)
311 goto put_old; 307 goto put_old;
312 308
309 ret = anon_vma_prepare(vma);
310 if (ret)
311 goto put_old;
312
313 ret = -ENOMEM; 313 ret = -ENOMEM;
314 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 314 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
315 if (!new_page) 315 if (!new_page)
316 goto put_old; 316 goto put_old;
317 317
318 __SetPageUptodate(new_page); 318 if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))
319 goto put_new;
319 320
321 __SetPageUptodate(new_page);
320 copy_highpage(new_page, old_page); 322 copy_highpage(new_page, old_page);
321 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); 323 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
322 324
323 ret = anon_vma_prepare(vma);
324 if (ret)
325 goto put_new;
326
327 ret = __replace_page(vma, vaddr, old_page, new_page); 325 ret = __replace_page(vma, vaddr, old_page, new_page);
326 if (ret)
327 mem_cgroup_uncharge_page(new_page);
328 328
329put_new: 329put_new:
330 page_cache_release(new_page); 330 page_cache_release(new_page);
@@ -537,14 +537,15 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
537 void *insn, int nbytes, loff_t offset) 537 void *insn, int nbytes, loff_t offset)
538{ 538{
539 struct page *page; 539 struct page *page;
540
541 if (!mapping->a_ops->readpage)
542 return -EIO;
543 /* 540 /*
544 * Ensure that the page that has the original instruction is 541 * Ensure that the page that has the original instruction is populated
545 * populated and in page-cache. 542 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
543 * see uprobe_register().
546 */ 544 */
547 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); 545 if (mapping->a_ops->readpage)
546 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
547 else
548 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
548 if (IS_ERR(page)) 549 if (IS_ERR(page))
549 return PTR_ERR(page); 550 return PTR_ERR(page);
550 551
@@ -880,6 +881,9 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
880 if (!uc->handler && !uc->ret_handler) 881 if (!uc->handler && !uc->ret_handler)
881 return -EINVAL; 882 return -EINVAL;
882 883
884 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
885 if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
886 return -EIO;
883 /* Racy, just to catch the obvious mistakes */ 887 /* Racy, just to catch the obvious mistakes */
884 if (offset > i_size_read(inode)) 888 if (offset > i_size_read(inode))
885 return -EINVAL; 889 return -EINVAL;
@@ -1361,6 +1365,16 @@ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1361 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; 1365 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1362} 1366}
1363 1367
1368unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1369{
1370 struct uprobe_task *utask = current->utask;
1371
1372 if (unlikely(utask && utask->active_uprobe))
1373 return utask->vaddr;
1374
1375 return instruction_pointer(regs);
1376}
1377
1364/* 1378/*
1365 * Called with no locks held. 1379 * Called with no locks held.
1366 * Called in context of a exiting or a exec-ing thread. 1380 * Called in context of a exiting or a exec-ing thread.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ceeadfcabb76..3214289df5a7 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -86,21 +86,8 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
86 return &(kretprobe_table_locks[hash].lock); 86 return &(kretprobe_table_locks[hash].lock);
87} 87}
88 88
89/* 89/* Blacklist -- list of struct kprobe_blacklist_entry */
90 * Normally, functions that we'd want to prohibit kprobes in, are marked 90static LIST_HEAD(kprobe_blacklist);
91 * __kprobes. But, there are cases where such functions already belong to
92 * a different section (__sched for preempt_schedule)
93 *
94 * For such cases, we now have a blacklist
95 */
96static struct kprobe_blackpoint kprobe_blacklist[] = {
97 {"preempt_schedule",},
98 {"native_get_debugreg",},
99 {"irq_entries_start",},
100 {"common_interrupt",},
101 {"mcount",}, /* mcount can be called from everywhere */
102 {NULL} /* Terminator */
103};
104 91
105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 92#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
106/* 93/*
@@ -151,13 +138,13 @@ struct kprobe_insn_cache kprobe_insn_slots = {
151 .insn_size = MAX_INSN_SIZE, 138 .insn_size = MAX_INSN_SIZE,
152 .nr_garbage = 0, 139 .nr_garbage = 0,
153}; 140};
154static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); 141static int collect_garbage_slots(struct kprobe_insn_cache *c);
155 142
156/** 143/**
157 * __get_insn_slot() - Find a slot on an executable page for an instruction. 144 * __get_insn_slot() - Find a slot on an executable page for an instruction.
158 * We allocate an executable page if there's no room on existing ones. 145 * We allocate an executable page if there's no room on existing ones.
159 */ 146 */
160kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 147kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
161{ 148{
162 struct kprobe_insn_page *kip; 149 struct kprobe_insn_page *kip;
163 kprobe_opcode_t *slot = NULL; 150 kprobe_opcode_t *slot = NULL;
@@ -214,7 +201,7 @@ out:
214} 201}
215 202
216/* Return 1 if all garbages are collected, otherwise 0. */ 203/* Return 1 if all garbages are collected, otherwise 0. */
217static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 204static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
218{ 205{
219 kip->slot_used[idx] = SLOT_CLEAN; 206 kip->slot_used[idx] = SLOT_CLEAN;
220 kip->nused--; 207 kip->nused--;
@@ -235,7 +222,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
235 return 0; 222 return 0;
236} 223}
237 224
238static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) 225static int collect_garbage_slots(struct kprobe_insn_cache *c)
239{ 226{
240 struct kprobe_insn_page *kip, *next; 227 struct kprobe_insn_page *kip, *next;
241 228
@@ -257,8 +244,8 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
257 return 0; 244 return 0;
258} 245}
259 246
260void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 247void __free_insn_slot(struct kprobe_insn_cache *c,
261 kprobe_opcode_t *slot, int dirty) 248 kprobe_opcode_t *slot, int dirty)
262{ 249{
263 struct kprobe_insn_page *kip; 250 struct kprobe_insn_page *kip;
264 251
@@ -314,7 +301,7 @@ static inline void reset_kprobe_instance(void)
314 * OR 301 * OR
315 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 302 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
316 */ 303 */
317struct kprobe __kprobes *get_kprobe(void *addr) 304struct kprobe *get_kprobe(void *addr)
318{ 305{
319 struct hlist_head *head; 306 struct hlist_head *head;
320 struct kprobe *p; 307 struct kprobe *p;
@@ -327,8 +314,9 @@ struct kprobe __kprobes *get_kprobe(void *addr)
327 314
328 return NULL; 315 return NULL;
329} 316}
317NOKPROBE_SYMBOL(get_kprobe);
330 318
331static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 319static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
332 320
333/* Return true if the kprobe is an aggregator */ 321/* Return true if the kprobe is an aggregator */
334static inline int kprobe_aggrprobe(struct kprobe *p) 322static inline int kprobe_aggrprobe(struct kprobe *p)
@@ -360,7 +348,7 @@ static bool kprobes_allow_optimization;
360 * Call all pre_handler on the list, but ignores its return value. 348 * Call all pre_handler on the list, but ignores its return value.
361 * This must be called from arch-dep optimized caller. 349 * This must be called from arch-dep optimized caller.
362 */ 350 */
363void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 351void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
364{ 352{
365 struct kprobe *kp; 353 struct kprobe *kp;
366 354
@@ -372,9 +360,10 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
372 reset_kprobe_instance(); 360 reset_kprobe_instance();
373 } 361 }
374} 362}
363NOKPROBE_SYMBOL(opt_pre_handler);
375 364
376/* Free optimized instructions and optimized_kprobe */ 365/* Free optimized instructions and optimized_kprobe */
377static __kprobes void free_aggr_kprobe(struct kprobe *p) 366static void free_aggr_kprobe(struct kprobe *p)
378{ 367{
379 struct optimized_kprobe *op; 368 struct optimized_kprobe *op;
380 369
@@ -412,7 +401,7 @@ static inline int kprobe_disarmed(struct kprobe *p)
412} 401}
413 402
414/* Return true(!0) if the probe is queued on (un)optimizing lists */ 403/* Return true(!0) if the probe is queued on (un)optimizing lists */
415static int __kprobes kprobe_queued(struct kprobe *p) 404static int kprobe_queued(struct kprobe *p)
416{ 405{
417 struct optimized_kprobe *op; 406 struct optimized_kprobe *op;
418 407
@@ -428,7 +417,7 @@ static int __kprobes kprobe_queued(struct kprobe *p)
428 * Return an optimized kprobe whose optimizing code replaces 417 * Return an optimized kprobe whose optimizing code replaces
429 * instructions including addr (exclude breakpoint). 418 * instructions including addr (exclude breakpoint).
430 */ 419 */
431static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) 420static struct kprobe *get_optimized_kprobe(unsigned long addr)
432{ 421{
433 int i; 422 int i;
434 struct kprobe *p = NULL; 423 struct kprobe *p = NULL;
@@ -460,7 +449,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
460 * Optimize (replace a breakpoint with a jump) kprobes listed on 449 * Optimize (replace a breakpoint with a jump) kprobes listed on
461 * optimizing_list. 450 * optimizing_list.
462 */ 451 */
463static __kprobes void do_optimize_kprobes(void) 452static void do_optimize_kprobes(void)
464{ 453{
465 /* Optimization never be done when disarmed */ 454 /* Optimization never be done when disarmed */
466 if (kprobes_all_disarmed || !kprobes_allow_optimization || 455 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
@@ -488,7 +477,7 @@ static __kprobes void do_optimize_kprobes(void)
488 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 477 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
489 * if need) kprobes listed on unoptimizing_list. 478 * if need) kprobes listed on unoptimizing_list.
490 */ 479 */
491static __kprobes void do_unoptimize_kprobes(void) 480static void do_unoptimize_kprobes(void)
492{ 481{
493 struct optimized_kprobe *op, *tmp; 482 struct optimized_kprobe *op, *tmp;
494 483
@@ -520,7 +509,7 @@ static __kprobes void do_unoptimize_kprobes(void)
520} 509}
521 510
522/* Reclaim all kprobes on the free_list */ 511/* Reclaim all kprobes on the free_list */
523static __kprobes void do_free_cleaned_kprobes(void) 512static void do_free_cleaned_kprobes(void)
524{ 513{
525 struct optimized_kprobe *op, *tmp; 514 struct optimized_kprobe *op, *tmp;
526 515
@@ -532,13 +521,13 @@ static __kprobes void do_free_cleaned_kprobes(void)
532} 521}
533 522
534/* Start optimizer after OPTIMIZE_DELAY passed */ 523/* Start optimizer after OPTIMIZE_DELAY passed */
535static __kprobes void kick_kprobe_optimizer(void) 524static void kick_kprobe_optimizer(void)
536{ 525{
537 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 526 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
538} 527}
539 528
540/* Kprobe jump optimizer */ 529/* Kprobe jump optimizer */
541static __kprobes void kprobe_optimizer(struct work_struct *work) 530static void kprobe_optimizer(struct work_struct *work)
542{ 531{
543 mutex_lock(&kprobe_mutex); 532 mutex_lock(&kprobe_mutex);
544 /* Lock modules while optimizing kprobes */ 533 /* Lock modules while optimizing kprobes */
@@ -574,7 +563,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
574} 563}
575 564
576/* Wait for completing optimization and unoptimization */ 565/* Wait for completing optimization and unoptimization */
577static __kprobes void wait_for_kprobe_optimizer(void) 566static void wait_for_kprobe_optimizer(void)
578{ 567{
579 mutex_lock(&kprobe_mutex); 568 mutex_lock(&kprobe_mutex);
580 569
@@ -593,7 +582,7 @@ static __kprobes void wait_for_kprobe_optimizer(void)
593} 582}
594 583
595/* Optimize kprobe if p is ready to be optimized */ 584/* Optimize kprobe if p is ready to be optimized */
596static __kprobes void optimize_kprobe(struct kprobe *p) 585static void optimize_kprobe(struct kprobe *p)
597{ 586{
598 struct optimized_kprobe *op; 587 struct optimized_kprobe *op;
599 588
@@ -627,7 +616,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
627} 616}
628 617
629/* Short cut to direct unoptimizing */ 618/* Short cut to direct unoptimizing */
630static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) 619static void force_unoptimize_kprobe(struct optimized_kprobe *op)
631{ 620{
632 get_online_cpus(); 621 get_online_cpus();
633 arch_unoptimize_kprobe(op); 622 arch_unoptimize_kprobe(op);
@@ -637,7 +626,7 @@ static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
637} 626}
638 627
639/* Unoptimize a kprobe if p is optimized */ 628/* Unoptimize a kprobe if p is optimized */
640static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) 629static void unoptimize_kprobe(struct kprobe *p, bool force)
641{ 630{
642 struct optimized_kprobe *op; 631 struct optimized_kprobe *op;
643 632
@@ -697,7 +686,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
697} 686}
698 687
699/* Remove optimized instructions */ 688/* Remove optimized instructions */
700static void __kprobes kill_optimized_kprobe(struct kprobe *p) 689static void kill_optimized_kprobe(struct kprobe *p)
701{ 690{
702 struct optimized_kprobe *op; 691 struct optimized_kprobe *op;
703 692
@@ -723,7 +712,7 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
723} 712}
724 713
725/* Try to prepare optimized instructions */ 714/* Try to prepare optimized instructions */
726static __kprobes void prepare_optimized_kprobe(struct kprobe *p) 715static void prepare_optimized_kprobe(struct kprobe *p)
727{ 716{
728 struct optimized_kprobe *op; 717 struct optimized_kprobe *op;
729 718
@@ -732,7 +721,7 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
732} 721}
733 722
734/* Allocate new optimized_kprobe and try to prepare optimized instructions */ 723/* Allocate new optimized_kprobe and try to prepare optimized instructions */
735static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 724static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
736{ 725{
737 struct optimized_kprobe *op; 726 struct optimized_kprobe *op;
738 727
@@ -747,13 +736,13 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
747 return &op->kp; 736 return &op->kp;
748} 737}
749 738
750static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 739static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
751 740
752/* 741/*
753 * Prepare an optimized_kprobe and optimize it 742 * Prepare an optimized_kprobe and optimize it
754 * NOTE: p must be a normal registered kprobe 743 * NOTE: p must be a normal registered kprobe
755 */ 744 */
756static __kprobes void try_to_optimize_kprobe(struct kprobe *p) 745static void try_to_optimize_kprobe(struct kprobe *p)
757{ 746{
758 struct kprobe *ap; 747 struct kprobe *ap;
759 struct optimized_kprobe *op; 748 struct optimized_kprobe *op;
@@ -787,7 +776,7 @@ out:
787} 776}
788 777
789#ifdef CONFIG_SYSCTL 778#ifdef CONFIG_SYSCTL
790static void __kprobes optimize_all_kprobes(void) 779static void optimize_all_kprobes(void)
791{ 780{
792 struct hlist_head *head; 781 struct hlist_head *head;
793 struct kprobe *p; 782 struct kprobe *p;
@@ -810,7 +799,7 @@ out:
810 mutex_unlock(&kprobe_mutex); 799 mutex_unlock(&kprobe_mutex);
811} 800}
812 801
813static void __kprobes unoptimize_all_kprobes(void) 802static void unoptimize_all_kprobes(void)
814{ 803{
815 struct hlist_head *head; 804 struct hlist_head *head;
816 struct kprobe *p; 805 struct kprobe *p;
@@ -861,7 +850,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
861#endif /* CONFIG_SYSCTL */ 850#endif /* CONFIG_SYSCTL */
862 851
863/* Put a breakpoint for a probe. Must be called with text_mutex locked */ 852/* Put a breakpoint for a probe. Must be called with text_mutex locked */
864static void __kprobes __arm_kprobe(struct kprobe *p) 853static void __arm_kprobe(struct kprobe *p)
865{ 854{
866 struct kprobe *_p; 855 struct kprobe *_p;
867 856
@@ -876,7 +865,7 @@ static void __kprobes __arm_kprobe(struct kprobe *p)
876} 865}
877 866
878/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 867/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
879static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) 868static void __disarm_kprobe(struct kprobe *p, bool reopt)
880{ 869{
881 struct kprobe *_p; 870 struct kprobe *_p;
882 871
@@ -911,13 +900,13 @@ static void reuse_unused_kprobe(struct kprobe *ap)
911 BUG_ON(kprobe_unused(ap)); 900 BUG_ON(kprobe_unused(ap));
912} 901}
913 902
914static __kprobes void free_aggr_kprobe(struct kprobe *p) 903static void free_aggr_kprobe(struct kprobe *p)
915{ 904{
916 arch_remove_kprobe(p); 905 arch_remove_kprobe(p);
917 kfree(p); 906 kfree(p);
918} 907}
919 908
920static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 909static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
921{ 910{
922 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 911 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
923} 912}
@@ -931,7 +920,7 @@ static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
931static int kprobe_ftrace_enabled; 920static int kprobe_ftrace_enabled;
932 921
933/* Must ensure p->addr is really on ftrace */ 922/* Must ensure p->addr is really on ftrace */
934static int __kprobes prepare_kprobe(struct kprobe *p) 923static int prepare_kprobe(struct kprobe *p)
935{ 924{
936 if (!kprobe_ftrace(p)) 925 if (!kprobe_ftrace(p))
937 return arch_prepare_kprobe(p); 926 return arch_prepare_kprobe(p);
@@ -940,7 +929,7 @@ static int __kprobes prepare_kprobe(struct kprobe *p)
940} 929}
941 930
942/* Caller must lock kprobe_mutex */ 931/* Caller must lock kprobe_mutex */
943static void __kprobes arm_kprobe_ftrace(struct kprobe *p) 932static void arm_kprobe_ftrace(struct kprobe *p)
944{ 933{
945 int ret; 934 int ret;
946 935
@@ -955,7 +944,7 @@ static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
955} 944}
956 945
957/* Caller must lock kprobe_mutex */ 946/* Caller must lock kprobe_mutex */
958static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) 947static void disarm_kprobe_ftrace(struct kprobe *p)
959{ 948{
960 int ret; 949 int ret;
961 950
@@ -975,7 +964,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
975#endif 964#endif
976 965
977/* Arm a kprobe with text_mutex */ 966/* Arm a kprobe with text_mutex */
978static void __kprobes arm_kprobe(struct kprobe *kp) 967static void arm_kprobe(struct kprobe *kp)
979{ 968{
980 if (unlikely(kprobe_ftrace(kp))) { 969 if (unlikely(kprobe_ftrace(kp))) {
981 arm_kprobe_ftrace(kp); 970 arm_kprobe_ftrace(kp);
@@ -992,7 +981,7 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
992} 981}
993 982
994/* Disarm a kprobe with text_mutex */ 983/* Disarm a kprobe with text_mutex */
995static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt) 984static void disarm_kprobe(struct kprobe *kp, bool reopt)
996{ 985{
997 if (unlikely(kprobe_ftrace(kp))) { 986 if (unlikely(kprobe_ftrace(kp))) {
998 disarm_kprobe_ftrace(kp); 987 disarm_kprobe_ftrace(kp);
@@ -1008,7 +997,7 @@ static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
1008 * Aggregate handlers for multiple kprobes support - these handlers 997 * Aggregate handlers for multiple kprobes support - these handlers
1009 * take care of invoking the individual kprobe handlers on p->list 998 * take care of invoking the individual kprobe handlers on p->list
1010 */ 999 */
1011static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1000static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1012{ 1001{
1013 struct kprobe *kp; 1002 struct kprobe *kp;
1014 1003
@@ -1022,9 +1011,10 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1022 } 1011 }
1023 return 0; 1012 return 0;
1024} 1013}
1014NOKPROBE_SYMBOL(aggr_pre_handler);
1025 1015
1026static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1016static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1027 unsigned long flags) 1017 unsigned long flags)
1028{ 1018{
1029 struct kprobe *kp; 1019 struct kprobe *kp;
1030 1020
@@ -1036,9 +1026,10 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1036 } 1026 }
1037 } 1027 }
1038} 1028}
1029NOKPROBE_SYMBOL(aggr_post_handler);
1039 1030
1040static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 1031static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1041 int trapnr) 1032 int trapnr)
1042{ 1033{
1043 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1034 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1044 1035
@@ -1052,8 +1043,9 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1052 } 1043 }
1053 return 0; 1044 return 0;
1054} 1045}
1046NOKPROBE_SYMBOL(aggr_fault_handler);
1055 1047
1056static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 1048static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1057{ 1049{
1058 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1050 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1059 int ret = 0; 1051 int ret = 0;
@@ -1065,9 +1057,10 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1065 reset_kprobe_instance(); 1057 reset_kprobe_instance();
1066 return ret; 1058 return ret;
1067} 1059}
1060NOKPROBE_SYMBOL(aggr_break_handler);
1068 1061
1069/* Walks the list and increments nmissed count for multiprobe case */ 1062/* Walks the list and increments nmissed count for multiprobe case */
1070void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 1063void kprobes_inc_nmissed_count(struct kprobe *p)
1071{ 1064{
1072 struct kprobe *kp; 1065 struct kprobe *kp;
1073 if (!kprobe_aggrprobe(p)) { 1066 if (!kprobe_aggrprobe(p)) {
@@ -1078,9 +1071,10 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1078 } 1071 }
1079 return; 1072 return;
1080} 1073}
1074NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1081 1075
1082void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 1076void recycle_rp_inst(struct kretprobe_instance *ri,
1083 struct hlist_head *head) 1077 struct hlist_head *head)
1084{ 1078{
1085 struct kretprobe *rp = ri->rp; 1079 struct kretprobe *rp = ri->rp;
1086 1080
@@ -1095,8 +1089,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1095 /* Unregistering */ 1089 /* Unregistering */
1096 hlist_add_head(&ri->hlist, head); 1090 hlist_add_head(&ri->hlist, head);
1097} 1091}
1092NOKPROBE_SYMBOL(recycle_rp_inst);
1098 1093
1099void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 1094void kretprobe_hash_lock(struct task_struct *tsk,
1100 struct hlist_head **head, unsigned long *flags) 1095 struct hlist_head **head, unsigned long *flags)
1101__acquires(hlist_lock) 1096__acquires(hlist_lock)
1102{ 1097{
@@ -1107,17 +1102,19 @@ __acquires(hlist_lock)
1107 hlist_lock = kretprobe_table_lock_ptr(hash); 1102 hlist_lock = kretprobe_table_lock_ptr(hash);
1108 raw_spin_lock_irqsave(hlist_lock, *flags); 1103 raw_spin_lock_irqsave(hlist_lock, *flags);
1109} 1104}
1105NOKPROBE_SYMBOL(kretprobe_hash_lock);
1110 1106
1111static void __kprobes kretprobe_table_lock(unsigned long hash, 1107static void kretprobe_table_lock(unsigned long hash,
1112 unsigned long *flags) 1108 unsigned long *flags)
1113__acquires(hlist_lock) 1109__acquires(hlist_lock)
1114{ 1110{
1115 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1111 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1116 raw_spin_lock_irqsave(hlist_lock, *flags); 1112 raw_spin_lock_irqsave(hlist_lock, *flags);
1117} 1113}
1114NOKPROBE_SYMBOL(kretprobe_table_lock);
1118 1115
1119void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 1116void kretprobe_hash_unlock(struct task_struct *tsk,
1120 unsigned long *flags) 1117 unsigned long *flags)
1121__releases(hlist_lock) 1118__releases(hlist_lock)
1122{ 1119{
1123 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1120 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
@@ -1126,14 +1123,16 @@ __releases(hlist_lock)
1126 hlist_lock = kretprobe_table_lock_ptr(hash); 1123 hlist_lock = kretprobe_table_lock_ptr(hash);
1127 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1124 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1128} 1125}
1126NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1129 1127
1130static void __kprobes kretprobe_table_unlock(unsigned long hash, 1128static void kretprobe_table_unlock(unsigned long hash,
1131 unsigned long *flags) 1129 unsigned long *flags)
1132__releases(hlist_lock) 1130__releases(hlist_lock)
1133{ 1131{
1134 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1132 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1135 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1133 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1136} 1134}
1135NOKPROBE_SYMBOL(kretprobe_table_unlock);
1137 1136
1138/* 1137/*
1139 * This function is called from finish_task_switch when task tk becomes dead, 1138 * This function is called from finish_task_switch when task tk becomes dead,
@@ -1141,7 +1140,7 @@ __releases(hlist_lock)
1141 * with this task. These left over instances represent probed functions 1140 * with this task. These left over instances represent probed functions
1142 * that have been called but will never return. 1141 * that have been called but will never return.
1143 */ 1142 */
1144void __kprobes kprobe_flush_task(struct task_struct *tk) 1143void kprobe_flush_task(struct task_struct *tk)
1145{ 1144{
1146 struct kretprobe_instance *ri; 1145 struct kretprobe_instance *ri;
1147 struct hlist_head *head, empty_rp; 1146 struct hlist_head *head, empty_rp;
@@ -1166,6 +1165,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1166 kfree(ri); 1165 kfree(ri);
1167 } 1166 }
1168} 1167}
1168NOKPROBE_SYMBOL(kprobe_flush_task);
1169 1169
1170static inline void free_rp_inst(struct kretprobe *rp) 1170static inline void free_rp_inst(struct kretprobe *rp)
1171{ 1171{
@@ -1178,7 +1178,7 @@ static inline void free_rp_inst(struct kretprobe *rp)
1178 } 1178 }
1179} 1179}
1180 1180
1181static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 1181static void cleanup_rp_inst(struct kretprobe *rp)
1182{ 1182{
1183 unsigned long flags, hash; 1183 unsigned long flags, hash;
1184 struct kretprobe_instance *ri; 1184 struct kretprobe_instance *ri;
@@ -1197,12 +1197,13 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1197 } 1197 }
1198 free_rp_inst(rp); 1198 free_rp_inst(rp);
1199} 1199}
1200NOKPROBE_SYMBOL(cleanup_rp_inst);
1200 1201
1201/* 1202/*
1202* Add the new probe to ap->list. Fail if this is the 1203* Add the new probe to ap->list. Fail if this is the
1203* second jprobe at the address - two jprobes can't coexist 1204* second jprobe at the address - two jprobes can't coexist
1204*/ 1205*/
1205static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1206static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1206{ 1207{
1207 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 1208 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1208 1209
@@ -1226,7 +1227,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1226 * Fill in the required fields of the "manager kprobe". Replace the 1227 * Fill in the required fields of the "manager kprobe". Replace the
1227 * earlier kprobe in the hlist with the manager kprobe 1228 * earlier kprobe in the hlist with the manager kprobe
1228 */ 1229 */
1229static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1230static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1230{ 1231{
1231 /* Copy p's insn slot to ap */ 1232 /* Copy p's insn slot to ap */
1232 copy_kprobe(p, ap); 1233 copy_kprobe(p, ap);
@@ -1252,8 +1253,7 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1252 * This is the second or subsequent kprobe at the address - handle 1253 * This is the second or subsequent kprobe at the address - handle
1253 * the intricacies 1254 * the intricacies
1254 */ 1255 */
1255static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, 1256static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1256 struct kprobe *p)
1257{ 1257{
1258 int ret = 0; 1258 int ret = 0;
1259 struct kprobe *ap = orig_p; 1259 struct kprobe *ap = orig_p;
@@ -1324,25 +1324,29 @@ out:
1324 return ret; 1324 return ret;
1325} 1325}
1326 1326
1327static int __kprobes in_kprobes_functions(unsigned long addr) 1327bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1328{ 1328{
1329 struct kprobe_blackpoint *kb; 1329 /* The __kprobes marked functions and entry code must not be probed */
1330 return addr >= (unsigned long)__kprobes_text_start &&
1331 addr < (unsigned long)__kprobes_text_end;
1332}
1330 1333
1331 if (addr >= (unsigned long)__kprobes_text_start && 1334static bool within_kprobe_blacklist(unsigned long addr)
1332 addr < (unsigned long)__kprobes_text_end) 1335{
1333 return -EINVAL; 1336 struct kprobe_blacklist_entry *ent;
1337
1338 if (arch_within_kprobe_blacklist(addr))
1339 return true;
1334 /* 1340 /*
1335 * If there exists a kprobe_blacklist, verify and 1341 * If there exists a kprobe_blacklist, verify and
1336 * fail any probe registration in the prohibited area 1342 * fail any probe registration in the prohibited area
1337 */ 1343 */
1338 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1344 list_for_each_entry(ent, &kprobe_blacklist, list) {
1339 if (kb->start_addr) { 1345 if (addr >= ent->start_addr && addr < ent->end_addr)
1340 if (addr >= kb->start_addr && 1346 return true;
1341 addr < (kb->start_addr + kb->range))
1342 return -EINVAL;
1343 }
1344 } 1347 }
1345 return 0; 1348
1349 return false;
1346} 1350}
1347 1351
1348/* 1352/*
@@ -1351,7 +1355,7 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
1351 * This returns encoded errors if it fails to look up symbol or invalid 1355 * This returns encoded errors if it fails to look up symbol or invalid
1352 * combination of parameters. 1356 * combination of parameters.
1353 */ 1357 */
1354static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 1358static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1355{ 1359{
1356 kprobe_opcode_t *addr = p->addr; 1360 kprobe_opcode_t *addr = p->addr;
1357 1361
@@ -1374,7 +1378,7 @@ invalid:
1374} 1378}
1375 1379
1376/* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1380/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1377static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 1381static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1378{ 1382{
1379 struct kprobe *ap, *list_p; 1383 struct kprobe *ap, *list_p;
1380 1384
@@ -1406,8 +1410,8 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1406 return ret; 1410 return ret;
1407} 1411}
1408 1412
1409static __kprobes int check_kprobe_address_safe(struct kprobe *p, 1413static int check_kprobe_address_safe(struct kprobe *p,
1410 struct module **probed_mod) 1414 struct module **probed_mod)
1411{ 1415{
1412 int ret = 0; 1416 int ret = 0;
1413 unsigned long ftrace_addr; 1417 unsigned long ftrace_addr;
@@ -1433,7 +1437,7 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1433 1437
1434 /* Ensure it is not in reserved area nor out of text */ 1438 /* Ensure it is not in reserved area nor out of text */
1435 if (!kernel_text_address((unsigned long) p->addr) || 1439 if (!kernel_text_address((unsigned long) p->addr) ||
1436 in_kprobes_functions((unsigned long) p->addr) || 1440 within_kprobe_blacklist((unsigned long) p->addr) ||
1437 jump_label_text_reserved(p->addr, p->addr)) { 1441 jump_label_text_reserved(p->addr, p->addr)) {
1438 ret = -EINVAL; 1442 ret = -EINVAL;
1439 goto out; 1443 goto out;
@@ -1469,7 +1473,7 @@ out:
1469 return ret; 1473 return ret;
1470} 1474}
1471 1475
1472int __kprobes register_kprobe(struct kprobe *p) 1476int register_kprobe(struct kprobe *p)
1473{ 1477{
1474 int ret; 1478 int ret;
1475 struct kprobe *old_p; 1479 struct kprobe *old_p;
@@ -1531,7 +1535,7 @@ out:
1531EXPORT_SYMBOL_GPL(register_kprobe); 1535EXPORT_SYMBOL_GPL(register_kprobe);
1532 1536
1533/* Check if all probes on the aggrprobe are disabled */ 1537/* Check if all probes on the aggrprobe are disabled */
1534static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) 1538static int aggr_kprobe_disabled(struct kprobe *ap)
1535{ 1539{
1536 struct kprobe *kp; 1540 struct kprobe *kp;
1537 1541
@@ -1547,7 +1551,7 @@ static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1547} 1551}
1548 1552
1549/* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1553/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1550static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) 1554static struct kprobe *__disable_kprobe(struct kprobe *p)
1551{ 1555{
1552 struct kprobe *orig_p; 1556 struct kprobe *orig_p;
1553 1557
@@ -1574,7 +1578,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1574/* 1578/*
1575 * Unregister a kprobe without a scheduler synchronization. 1579 * Unregister a kprobe without a scheduler synchronization.
1576 */ 1580 */
1577static int __kprobes __unregister_kprobe_top(struct kprobe *p) 1581static int __unregister_kprobe_top(struct kprobe *p)
1578{ 1582{
1579 struct kprobe *ap, *list_p; 1583 struct kprobe *ap, *list_p;
1580 1584
@@ -1631,7 +1635,7 @@ disarmed:
1631 return 0; 1635 return 0;
1632} 1636}
1633 1637
1634static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 1638static void __unregister_kprobe_bottom(struct kprobe *p)
1635{ 1639{
1636 struct kprobe *ap; 1640 struct kprobe *ap;
1637 1641
@@ -1647,7 +1651,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1647 /* Otherwise, do nothing. */ 1651 /* Otherwise, do nothing. */
1648} 1652}
1649 1653
1650int __kprobes register_kprobes(struct kprobe **kps, int num) 1654int register_kprobes(struct kprobe **kps, int num)
1651{ 1655{
1652 int i, ret = 0; 1656 int i, ret = 0;
1653 1657
@@ -1665,13 +1669,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
1665} 1669}
1666EXPORT_SYMBOL_GPL(register_kprobes); 1670EXPORT_SYMBOL_GPL(register_kprobes);
1667 1671
1668void __kprobes unregister_kprobe(struct kprobe *p) 1672void unregister_kprobe(struct kprobe *p)
1669{ 1673{
1670 unregister_kprobes(&p, 1); 1674 unregister_kprobes(&p, 1);
1671} 1675}
1672EXPORT_SYMBOL_GPL(unregister_kprobe); 1676EXPORT_SYMBOL_GPL(unregister_kprobe);
1673 1677
1674void __kprobes unregister_kprobes(struct kprobe **kps, int num) 1678void unregister_kprobes(struct kprobe **kps, int num)
1675{ 1679{
1676 int i; 1680 int i;
1677 1681
@@ -1700,7 +1704,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
1700 return (unsigned long)entry; 1704 return (unsigned long)entry;
1701} 1705}
1702 1706
1703int __kprobes register_jprobes(struct jprobe **jps, int num) 1707int register_jprobes(struct jprobe **jps, int num)
1704{ 1708{
1705 struct jprobe *jp; 1709 struct jprobe *jp;
1706 int ret = 0, i; 1710 int ret = 0, i;
@@ -1731,19 +1735,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
1731} 1735}
1732EXPORT_SYMBOL_GPL(register_jprobes); 1736EXPORT_SYMBOL_GPL(register_jprobes);
1733 1737
1734int __kprobes register_jprobe(struct jprobe *jp) 1738int register_jprobe(struct jprobe *jp)
1735{ 1739{
1736 return register_jprobes(&jp, 1); 1740 return register_jprobes(&jp, 1);
1737} 1741}
1738EXPORT_SYMBOL_GPL(register_jprobe); 1742EXPORT_SYMBOL_GPL(register_jprobe);
1739 1743
1740void __kprobes unregister_jprobe(struct jprobe *jp) 1744void unregister_jprobe(struct jprobe *jp)
1741{ 1745{
1742 unregister_jprobes(&jp, 1); 1746 unregister_jprobes(&jp, 1);
1743} 1747}
1744EXPORT_SYMBOL_GPL(unregister_jprobe); 1748EXPORT_SYMBOL_GPL(unregister_jprobe);
1745 1749
1746void __kprobes unregister_jprobes(struct jprobe **jps, int num) 1750void unregister_jprobes(struct jprobe **jps, int num)
1747{ 1751{
1748 int i; 1752 int i;
1749 1753
@@ -1768,8 +1772,7 @@ EXPORT_SYMBOL_GPL(unregister_jprobes);
1768 * This kprobe pre_handler is registered with every kretprobe. When probe 1772 * This kprobe pre_handler is registered with every kretprobe. When probe
1769 * hits it will set up the return probe. 1773 * hits it will set up the return probe.
1770 */ 1774 */
1771static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1775static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1772 struct pt_regs *regs)
1773{ 1776{
1774 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1777 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1775 unsigned long hash, flags = 0; 1778 unsigned long hash, flags = 0;
@@ -1807,8 +1810,9 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1807 } 1810 }
1808 return 0; 1811 return 0;
1809} 1812}
1813NOKPROBE_SYMBOL(pre_handler_kretprobe);
1810 1814
1811int __kprobes register_kretprobe(struct kretprobe *rp) 1815int register_kretprobe(struct kretprobe *rp)
1812{ 1816{
1813 int ret = 0; 1817 int ret = 0;
1814 struct kretprobe_instance *inst; 1818 struct kretprobe_instance *inst;
@@ -1861,7 +1865,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1861} 1865}
1862EXPORT_SYMBOL_GPL(register_kretprobe); 1866EXPORT_SYMBOL_GPL(register_kretprobe);
1863 1867
1864int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1868int register_kretprobes(struct kretprobe **rps, int num)
1865{ 1869{
1866 int ret = 0, i; 1870 int ret = 0, i;
1867 1871
@@ -1879,13 +1883,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1879} 1883}
1880EXPORT_SYMBOL_GPL(register_kretprobes); 1884EXPORT_SYMBOL_GPL(register_kretprobes);
1881 1885
1882void __kprobes unregister_kretprobe(struct kretprobe *rp) 1886void unregister_kretprobe(struct kretprobe *rp)
1883{ 1887{
1884 unregister_kretprobes(&rp, 1); 1888 unregister_kretprobes(&rp, 1);
1885} 1889}
1886EXPORT_SYMBOL_GPL(unregister_kretprobe); 1890EXPORT_SYMBOL_GPL(unregister_kretprobe);
1887 1891
1888void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1892void unregister_kretprobes(struct kretprobe **rps, int num)
1889{ 1893{
1890 int i; 1894 int i;
1891 1895
@@ -1908,38 +1912,38 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1908EXPORT_SYMBOL_GPL(unregister_kretprobes); 1912EXPORT_SYMBOL_GPL(unregister_kretprobes);
1909 1913
1910#else /* CONFIG_KRETPROBES */ 1914#else /* CONFIG_KRETPROBES */
1911int __kprobes register_kretprobe(struct kretprobe *rp) 1915int register_kretprobe(struct kretprobe *rp)
1912{ 1916{
1913 return -ENOSYS; 1917 return -ENOSYS;
1914} 1918}
1915EXPORT_SYMBOL_GPL(register_kretprobe); 1919EXPORT_SYMBOL_GPL(register_kretprobe);
1916 1920
1917int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1921int register_kretprobes(struct kretprobe **rps, int num)
1918{ 1922{
1919 return -ENOSYS; 1923 return -ENOSYS;
1920} 1924}
1921EXPORT_SYMBOL_GPL(register_kretprobes); 1925EXPORT_SYMBOL_GPL(register_kretprobes);
1922 1926
1923void __kprobes unregister_kretprobe(struct kretprobe *rp) 1927void unregister_kretprobe(struct kretprobe *rp)
1924{ 1928{
1925} 1929}
1926EXPORT_SYMBOL_GPL(unregister_kretprobe); 1930EXPORT_SYMBOL_GPL(unregister_kretprobe);
1927 1931
1928void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1932void unregister_kretprobes(struct kretprobe **rps, int num)
1929{ 1933{
1930} 1934}
1931EXPORT_SYMBOL_GPL(unregister_kretprobes); 1935EXPORT_SYMBOL_GPL(unregister_kretprobes);
1932 1936
1933static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1937static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1934 struct pt_regs *regs)
1935{ 1938{
1936 return 0; 1939 return 0;
1937} 1940}
1941NOKPROBE_SYMBOL(pre_handler_kretprobe);
1938 1942
1939#endif /* CONFIG_KRETPROBES */ 1943#endif /* CONFIG_KRETPROBES */
1940 1944
1941/* Set the kprobe gone and remove its instruction buffer. */ 1945/* Set the kprobe gone and remove its instruction buffer. */
1942static void __kprobes kill_kprobe(struct kprobe *p) 1946static void kill_kprobe(struct kprobe *p)
1943{ 1947{
1944 struct kprobe *kp; 1948 struct kprobe *kp;
1945 1949
@@ -1963,7 +1967,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1963} 1967}
1964 1968
1965/* Disable one kprobe */ 1969/* Disable one kprobe */
1966int __kprobes disable_kprobe(struct kprobe *kp) 1970int disable_kprobe(struct kprobe *kp)
1967{ 1971{
1968 int ret = 0; 1972 int ret = 0;
1969 1973
@@ -1979,7 +1983,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
1979EXPORT_SYMBOL_GPL(disable_kprobe); 1983EXPORT_SYMBOL_GPL(disable_kprobe);
1980 1984
1981/* Enable one kprobe */ 1985/* Enable one kprobe */
1982int __kprobes enable_kprobe(struct kprobe *kp) 1986int enable_kprobe(struct kprobe *kp)
1983{ 1987{
1984 int ret = 0; 1988 int ret = 0;
1985 struct kprobe *p; 1989 struct kprobe *p;
@@ -2012,16 +2016,49 @@ out:
2012} 2016}
2013EXPORT_SYMBOL_GPL(enable_kprobe); 2017EXPORT_SYMBOL_GPL(enable_kprobe);
2014 2018
2015void __kprobes dump_kprobe(struct kprobe *kp) 2019void dump_kprobe(struct kprobe *kp)
2016{ 2020{
2017 printk(KERN_WARNING "Dumping kprobe:\n"); 2021 printk(KERN_WARNING "Dumping kprobe:\n");
2018 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", 2022 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2019 kp->symbol_name, kp->addr, kp->offset); 2023 kp->symbol_name, kp->addr, kp->offset);
2020} 2024}
2025NOKPROBE_SYMBOL(dump_kprobe);
2026
2027/*
2028 * Lookup and populate the kprobe_blacklist.
2029 *
2030 * Unlike the kretprobe blacklist, we'll need to determine
2031 * the range of addresses that belong to the said functions,
2032 * since a kprobe need not necessarily be at the beginning
2033 * of a function.
2034 */
2035static int __init populate_kprobe_blacklist(unsigned long *start,
2036 unsigned long *end)
2037{
2038 unsigned long *iter;
2039 struct kprobe_blacklist_entry *ent;
2040 unsigned long offset = 0, size = 0;
2041
2042 for (iter = start; iter < end; iter++) {
2043 if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
2044 pr_err("Failed to find blacklist %p\n", (void *)*iter);
2045 continue;
2046 }
2047
2048 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2049 if (!ent)
2050 return -ENOMEM;
2051 ent->start_addr = *iter;
2052 ent->end_addr = *iter + size;
2053 INIT_LIST_HEAD(&ent->list);
2054 list_add_tail(&ent->list, &kprobe_blacklist);
2055 }
2056 return 0;
2057}
2021 2058
2022/* Module notifier call back, checking kprobes on the module */ 2059/* Module notifier call back, checking kprobes on the module */
2023static int __kprobes kprobes_module_callback(struct notifier_block *nb, 2060static int kprobes_module_callback(struct notifier_block *nb,
2024 unsigned long val, void *data) 2061 unsigned long val, void *data)
2025{ 2062{
2026 struct module *mod = data; 2063 struct module *mod = data;
2027 struct hlist_head *head; 2064 struct hlist_head *head;
@@ -2062,14 +2099,13 @@ static struct notifier_block kprobe_module_nb = {
2062 .priority = 0 2099 .priority = 0
2063}; 2100};
2064 2101
2102/* Markers of _kprobe_blacklist section */
2103extern unsigned long __start_kprobe_blacklist[];
2104extern unsigned long __stop_kprobe_blacklist[];
2105
2065static int __init init_kprobes(void) 2106static int __init init_kprobes(void)
2066{ 2107{
2067 int i, err = 0; 2108 int i, err = 0;
2068 unsigned long offset = 0, size = 0;
2069 char *modname, namebuf[KSYM_NAME_LEN];
2070 const char *symbol_name;
2071 void *addr;
2072 struct kprobe_blackpoint *kb;
2073 2109
2074 /* FIXME allocate the probe table, currently defined statically */ 2110 /* FIXME allocate the probe table, currently defined statically */
2075 /* initialize all list heads */ 2111 /* initialize all list heads */
@@ -2079,26 +2115,11 @@ static int __init init_kprobes(void)
2079 raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 2115 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2080 } 2116 }
2081 2117
2082 /* 2118 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2083 * Lookup and populate the kprobe_blacklist. 2119 __stop_kprobe_blacklist);
2084 * 2120 if (err) {
2085 * Unlike the kretprobe blacklist, we'll need to determine 2121 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2086 * the range of addresses that belong to the said functions, 2122 pr_err("Please take care of using kprobes.\n");
2087 * since a kprobe need not necessarily be at the beginning
2088 * of a function.
2089 */
2090 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2091 kprobe_lookup_name(kb->name, addr);
2092 if (!addr)
2093 continue;
2094
2095 kb->start_addr = (unsigned long)addr;
2096 symbol_name = kallsyms_lookup(kb->start_addr,
2097 &size, &offset, &modname, namebuf);
2098 if (!symbol_name)
2099 kb->range = 0;
2100 else
2101 kb->range = size;
2102 } 2123 }
2103 2124
2104 if (kretprobe_blacklist_size) { 2125 if (kretprobe_blacklist_size) {
@@ -2138,7 +2159,7 @@ static int __init init_kprobes(void)
2138} 2159}
2139 2160
2140#ifdef CONFIG_DEBUG_FS 2161#ifdef CONFIG_DEBUG_FS
2141static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 2162static void report_probe(struct seq_file *pi, struct kprobe *p,
2142 const char *sym, int offset, char *modname, struct kprobe *pp) 2163 const char *sym, int offset, char *modname, struct kprobe *pp)
2143{ 2164{
2144 char *kprobe_type; 2165 char *kprobe_type;
@@ -2167,12 +2188,12 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2167 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2188 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2168} 2189}
2169 2190
2170static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2191static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2171{ 2192{
2172 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2193 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2173} 2194}
2174 2195
2175static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2196static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2176{ 2197{
2177 (*pos)++; 2198 (*pos)++;
2178 if (*pos >= KPROBE_TABLE_SIZE) 2199 if (*pos >= KPROBE_TABLE_SIZE)
@@ -2180,12 +2201,12 @@ static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2180 return pos; 2201 return pos;
2181} 2202}
2182 2203
2183static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 2204static void kprobe_seq_stop(struct seq_file *f, void *v)
2184{ 2205{
2185 /* Nothing to do */ 2206 /* Nothing to do */
2186} 2207}
2187 2208
2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2209static int show_kprobe_addr(struct seq_file *pi, void *v)
2189{ 2210{
2190 struct hlist_head *head; 2211 struct hlist_head *head;
2191 struct kprobe *p, *kp; 2212 struct kprobe *p, *kp;
@@ -2216,7 +2237,7 @@ static const struct seq_operations kprobes_seq_ops = {
2216 .show = show_kprobe_addr 2237 .show = show_kprobe_addr
2217}; 2238};
2218 2239
2219static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 2240static int kprobes_open(struct inode *inode, struct file *filp)
2220{ 2241{
2221 return seq_open(filp, &kprobes_seq_ops); 2242 return seq_open(filp, &kprobes_seq_ops);
2222} 2243}
@@ -2228,7 +2249,47 @@ static const struct file_operations debugfs_kprobes_operations = {
2228 .release = seq_release, 2249 .release = seq_release,
2229}; 2250};
2230 2251
2231static void __kprobes arm_all_kprobes(void) 2252/* kprobes/blacklist -- shows which functions can not be probed */
2253static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2254{
2255 return seq_list_start(&kprobe_blacklist, *pos);
2256}
2257
2258static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2259{
2260 return seq_list_next(v, &kprobe_blacklist, pos);
2261}
2262
2263static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2264{
2265 struct kprobe_blacklist_entry *ent =
2266 list_entry(v, struct kprobe_blacklist_entry, list);
2267
2268 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2269 (void *)ent->end_addr, (void *)ent->start_addr);
2270 return 0;
2271}
2272
2273static const struct seq_operations kprobe_blacklist_seq_ops = {
2274 .start = kprobe_blacklist_seq_start,
2275 .next = kprobe_blacklist_seq_next,
2276 .stop = kprobe_seq_stop, /* Reuse void function */
2277 .show = kprobe_blacklist_seq_show,
2278};
2279
2280static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2281{
2282 return seq_open(filp, &kprobe_blacklist_seq_ops);
2283}
2284
2285static const struct file_operations debugfs_kprobe_blacklist_ops = {
2286 .open = kprobe_blacklist_open,
2287 .read = seq_read,
2288 .llseek = seq_lseek,
2289 .release = seq_release,
2290};
2291
2292static void arm_all_kprobes(void)
2232{ 2293{
2233 struct hlist_head *head; 2294 struct hlist_head *head;
2234 struct kprobe *p; 2295 struct kprobe *p;
@@ -2256,7 +2317,7 @@ already_enabled:
2256 return; 2317 return;
2257} 2318}
2258 2319
2259static void __kprobes disarm_all_kprobes(void) 2320static void disarm_all_kprobes(void)
2260{ 2321{
2261 struct hlist_head *head; 2322 struct hlist_head *head;
2262 struct kprobe *p; 2323 struct kprobe *p;
@@ -2340,7 +2401,7 @@ static const struct file_operations fops_kp = {
2340 .llseek = default_llseek, 2401 .llseek = default_llseek,
2341}; 2402};
2342 2403
2343static int __kprobes debugfs_kprobe_init(void) 2404static int __init debugfs_kprobe_init(void)
2344{ 2405{
2345 struct dentry *dir, *file; 2406 struct dentry *dir, *file;
2346 unsigned int value = 1; 2407 unsigned int value = 1;
@@ -2351,19 +2412,24 @@ static int __kprobes debugfs_kprobe_init(void)
2351 2412
2352 file = debugfs_create_file("list", 0444, dir, NULL, 2413 file = debugfs_create_file("list", 0444, dir, NULL,
2353 &debugfs_kprobes_operations); 2414 &debugfs_kprobes_operations);
2354 if (!file) { 2415 if (!file)
2355 debugfs_remove(dir); 2416 goto error;
2356 return -ENOMEM;
2357 }
2358 2417
2359 file = debugfs_create_file("enabled", 0600, dir, 2418 file = debugfs_create_file("enabled", 0600, dir,
2360 &value, &fops_kp); 2419 &value, &fops_kp);
2361 if (!file) { 2420 if (!file)
2362 debugfs_remove(dir); 2421 goto error;
2363 return -ENOMEM; 2422
2364 } 2423 file = debugfs_create_file("blacklist", 0444, dir, NULL,
2424 &debugfs_kprobe_blacklist_ops);
2425 if (!file)
2426 goto error;
2365 2427
2366 return 0; 2428 return 0;
2429
2430error:
2431 debugfs_remove(dir);
2432 return -ENOMEM;
2367} 2433}
2368 2434
2369late_initcall(debugfs_kprobe_init); 2435late_initcall(debugfs_kprobe_init);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index db4c8b08a50c..4803da6eab62 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -71,9 +71,9 @@ static int notifier_chain_unregister(struct notifier_block **nl,
71 * @returns: notifier_call_chain returns the value returned by the 71 * @returns: notifier_call_chain returns the value returned by the
72 * last notifier function called. 72 * last notifier function called.
73 */ 73 */
74static int __kprobes notifier_call_chain(struct notifier_block **nl, 74static int notifier_call_chain(struct notifier_block **nl,
75 unsigned long val, void *v, 75 unsigned long val, void *v,
76 int nr_to_call, int *nr_calls) 76 int nr_to_call, int *nr_calls)
77{ 77{
78 int ret = NOTIFY_DONE; 78 int ret = NOTIFY_DONE;
79 struct notifier_block *nb, *next_nb; 79 struct notifier_block *nb, *next_nb;
@@ -102,6 +102,7 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
102 } 102 }
103 return ret; 103 return ret;
104} 104}
105NOKPROBE_SYMBOL(notifier_call_chain);
105 106
106/* 107/*
107 * Atomic notifier chain routines. Registration and unregistration 108 * Atomic notifier chain routines. Registration and unregistration
@@ -172,9 +173,9 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
172 * Otherwise the return value is the return value 173 * Otherwise the return value is the return value
173 * of the last notifier function called. 174 * of the last notifier function called.
174 */ 175 */
175int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, 176int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
176 unsigned long val, void *v, 177 unsigned long val, void *v,
177 int nr_to_call, int *nr_calls) 178 int nr_to_call, int *nr_calls)
178{ 179{
179 int ret; 180 int ret;
180 181
@@ -184,13 +185,15 @@ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
184 return ret; 185 return ret;
185} 186}
186EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); 187EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
188NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
187 189
188int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, 190int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
189 unsigned long val, void *v) 191 unsigned long val, void *v)
190{ 192{
191 return __atomic_notifier_call_chain(nh, val, v, -1, NULL); 193 return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
192} 194}
193EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 195EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
196NOKPROBE_SYMBOL(atomic_notifier_call_chain);
194 197
195/* 198/*
196 * Blocking notifier chain routines. All access to the chain is 199 * Blocking notifier chain routines. All access to the chain is
@@ -527,7 +530,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
527 530
528static ATOMIC_NOTIFIER_HEAD(die_chain); 531static ATOMIC_NOTIFIER_HEAD(die_chain);
529 532
530int notrace __kprobes notify_die(enum die_val val, const char *str, 533int notrace notify_die(enum die_val val, const char *str,
531 struct pt_regs *regs, long err, int trap, int sig) 534 struct pt_regs *regs, long err, int trap, int sig)
532{ 535{
533 struct die_args args = { 536 struct die_args args = {
@@ -540,6 +543,7 @@ int notrace __kprobes notify_die(enum die_val val, const char *str,
540 }; 543 };
541 return atomic_notifier_call_chain(&die_chain, val, &args); 544 return atomic_notifier_call_chain(&die_chain, val, &args);
542} 545}
546NOKPROBE_SYMBOL(notify_die);
543 547
544int register_die_notifier(struct notifier_block *nb) 548int register_die_notifier(struct notifier_block *nb)
545{ 549{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c6b98793d647..4f611561ba4c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2527,7 +2527,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
2527#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 2527#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2528 defined(CONFIG_PREEMPT_TRACER)) 2528 defined(CONFIG_PREEMPT_TRACER))
2529 2529
2530void __kprobes preempt_count_add(int val) 2530void preempt_count_add(int val)
2531{ 2531{
2532#ifdef CONFIG_DEBUG_PREEMPT 2532#ifdef CONFIG_DEBUG_PREEMPT
2533 /* 2533 /*
@@ -2553,8 +2553,9 @@ void __kprobes preempt_count_add(int val)
2553 } 2553 }
2554} 2554}
2555EXPORT_SYMBOL(preempt_count_add); 2555EXPORT_SYMBOL(preempt_count_add);
2556NOKPROBE_SYMBOL(preempt_count_add);
2556 2557
2557void __kprobes preempt_count_sub(int val) 2558void preempt_count_sub(int val)
2558{ 2559{
2559#ifdef CONFIG_DEBUG_PREEMPT 2560#ifdef CONFIG_DEBUG_PREEMPT
2560 /* 2561 /*
@@ -2575,6 +2576,7 @@ void __kprobes preempt_count_sub(int val)
2575 __preempt_count_sub(val); 2576 __preempt_count_sub(val);
2576} 2577}
2577EXPORT_SYMBOL(preempt_count_sub); 2578EXPORT_SYMBOL(preempt_count_sub);
2579NOKPROBE_SYMBOL(preempt_count_sub);
2578 2580
2579#endif 2581#endif
2580 2582
@@ -2857,6 +2859,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
2857 barrier(); 2859 barrier();
2858 } while (need_resched()); 2860 } while (need_resched());
2859} 2861}
2862NOKPROBE_SYMBOL(preempt_schedule);
2860EXPORT_SYMBOL(preempt_schedule); 2863EXPORT_SYMBOL(preempt_schedule);
2861#endif /* CONFIG_PREEMPT */ 2864#endif /* CONFIG_PREEMPT */
2862 2865
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index c894614de14d..5d12bb407b44 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -248,8 +248,8 @@ void perf_trace_del(struct perf_event *p_event, int flags)
248 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); 248 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
249} 249}
250 250
251__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, 251void *perf_trace_buf_prepare(int size, unsigned short type,
252 struct pt_regs *regs, int *rctxp) 252 struct pt_regs *regs, int *rctxp)
253{ 253{
254 struct trace_entry *entry; 254 struct trace_entry *entry;
255 unsigned long flags; 255 unsigned long flags;
@@ -281,6 +281,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
281 return raw_data; 281 return raw_data;
282} 282}
283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); 283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
284NOKPROBE_SYMBOL(perf_trace_buf_prepare);
284 285
285#ifdef CONFIG_FUNCTION_TRACER 286#ifdef CONFIG_FUNCTION_TRACER
286static void 287static void
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index ef2fba1f46b5..282f6e4e5539 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -40,27 +40,27 @@ struct trace_kprobe {
40 (sizeof(struct probe_arg) * (n))) 40 (sizeof(struct probe_arg) * (n)))
41 41
42 42
43static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk) 43static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
44{ 44{
45 return tk->rp.handler != NULL; 45 return tk->rp.handler != NULL;
46} 46}
47 47
48static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk) 48static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
49{ 49{
50 return tk->symbol ? tk->symbol : "unknown"; 50 return tk->symbol ? tk->symbol : "unknown";
51} 51}
52 52
53static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk) 53static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
54{ 54{
55 return tk->rp.kp.offset; 55 return tk->rp.kp.offset;
56} 56}
57 57
58static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk) 58static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
59{ 59{
60 return !!(kprobe_gone(&tk->rp.kp)); 60 return !!(kprobe_gone(&tk->rp.kp));
61} 61}
62 62
63static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, 63static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
64 struct module *mod) 64 struct module *mod)
65{ 65{
66 int len = strlen(mod->name); 66 int len = strlen(mod->name);
@@ -68,7 +68,7 @@ static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
68 return strncmp(mod->name, name, len) == 0 && name[len] == ':'; 68 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
69} 69}
70 70
71static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk) 71static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
72{ 72{
73 return !!strchr(trace_kprobe_symbol(tk), ':'); 73 return !!strchr(trace_kprobe_symbol(tk), ':');
74} 74}
@@ -132,19 +132,21 @@ struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
132 * Kprobes-specific fetch functions 132 * Kprobes-specific fetch functions
133 */ 133 */
134#define DEFINE_FETCH_stack(type) \ 134#define DEFINE_FETCH_stack(type) \
135static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ 135static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
136 void *offset, void *dest) \ 136 void *offset, void *dest) \
137{ \ 137{ \
138 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ 138 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
139 (unsigned int)((unsigned long)offset)); \ 139 (unsigned int)((unsigned long)offset)); \
140} 140} \
141NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
142
141DEFINE_BASIC_FETCH_FUNCS(stack) 143DEFINE_BASIC_FETCH_FUNCS(stack)
142/* No string on the stack entry */ 144/* No string on the stack entry */
143#define fetch_stack_string NULL 145#define fetch_stack_string NULL
144#define fetch_stack_string_size NULL 146#define fetch_stack_string_size NULL
145 147
146#define DEFINE_FETCH_memory(type) \ 148#define DEFINE_FETCH_memory(type) \
147static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ 149static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
148 void *addr, void *dest) \ 150 void *addr, void *dest) \
149{ \ 151{ \
150 type retval; \ 152 type retval; \
@@ -152,14 +154,16 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
152 *(type *)dest = 0; \ 154 *(type *)dest = 0; \
153 else \ 155 else \
154 *(type *)dest = retval; \ 156 *(type *)dest = retval; \
155} 157} \
158NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
159
156DEFINE_BASIC_FETCH_FUNCS(memory) 160DEFINE_BASIC_FETCH_FUNCS(memory)
157/* 161/*
158 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
159 * length and relative data location. 163 * length and relative data location.
160 */ 164 */
161static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 165static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
162 void *addr, void *dest) 166 void *addr, void *dest)
163{ 167{
164 long ret; 168 long ret;
165 int maxlen = get_rloc_len(*(u32 *)dest); 169 int maxlen = get_rloc_len(*(u32 *)dest);
@@ -193,10 +197,11 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
193 get_rloc_offs(*(u32 *)dest)); 197 get_rloc_offs(*(u32 *)dest));
194 } 198 }
195} 199}
200NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
196 201
197/* Return the length of string -- including null terminal byte */ 202/* Return the length of string -- including null terminal byte */
198static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 203static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
199 void *addr, void *dest) 204 void *addr, void *dest)
200{ 205{
201 mm_segment_t old_fs; 206 mm_segment_t old_fs;
202 int ret, len = 0; 207 int ret, len = 0;
@@ -219,17 +224,19 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
219 else 224 else
220 *(u32 *)dest = len; 225 *(u32 *)dest = len;
221} 226}
227NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
222 228
223#define DEFINE_FETCH_symbol(type) \ 229#define DEFINE_FETCH_symbol(type) \
224__kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \ 230void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
225 void *data, void *dest) \
226{ \ 231{ \
227 struct symbol_cache *sc = data; \ 232 struct symbol_cache *sc = data; \
228 if (sc->addr) \ 233 if (sc->addr) \
229 fetch_memory_##type(regs, (void *)sc->addr, dest); \ 234 fetch_memory_##type(regs, (void *)sc->addr, dest); \
230 else \ 235 else \
231 *(type *)dest = 0; \ 236 *(type *)dest = 0; \
232} 237} \
238NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
239
233DEFINE_BASIC_FETCH_FUNCS(symbol) 240DEFINE_BASIC_FETCH_FUNCS(symbol)
234DEFINE_FETCH_symbol(string) 241DEFINE_FETCH_symbol(string)
235DEFINE_FETCH_symbol(string_size) 242DEFINE_FETCH_symbol(string_size)
@@ -907,7 +914,7 @@ static const struct file_operations kprobe_profile_ops = {
907}; 914};
908 915
909/* Kprobe handler */ 916/* Kprobe handler */
910static __kprobes void 917static nokprobe_inline void
911__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 918__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
912 struct ftrace_event_file *ftrace_file) 919 struct ftrace_event_file *ftrace_file)
913{ 920{
@@ -943,7 +950,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
943 entry, irq_flags, pc, regs); 950 entry, irq_flags, pc, regs);
944} 951}
945 952
946static __kprobes void 953static void
947kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) 954kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
948{ 955{
949 struct event_file_link *link; 956 struct event_file_link *link;
@@ -951,9 +958,10 @@ kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
951 list_for_each_entry_rcu(link, &tk->tp.files, list) 958 list_for_each_entry_rcu(link, &tk->tp.files, list)
952 __kprobe_trace_func(tk, regs, link->file); 959 __kprobe_trace_func(tk, regs, link->file);
953} 960}
961NOKPROBE_SYMBOL(kprobe_trace_func);
954 962
955/* Kretprobe handler */ 963/* Kretprobe handler */
956static __kprobes void 964static nokprobe_inline void
957__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 965__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
958 struct pt_regs *regs, 966 struct pt_regs *regs,
959 struct ftrace_event_file *ftrace_file) 967 struct ftrace_event_file *ftrace_file)
@@ -991,7 +999,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
991 entry, irq_flags, pc, regs); 999 entry, irq_flags, pc, regs);
992} 1000}
993 1001
994static __kprobes void 1002static void
995kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1003kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
996 struct pt_regs *regs) 1004 struct pt_regs *regs)
997{ 1005{
@@ -1000,6 +1008,7 @@ kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1000 list_for_each_entry_rcu(link, &tk->tp.files, list) 1008 list_for_each_entry_rcu(link, &tk->tp.files, list)
1001 __kretprobe_trace_func(tk, ri, regs, link->file); 1009 __kretprobe_trace_func(tk, ri, regs, link->file);
1002} 1010}
1011NOKPROBE_SYMBOL(kretprobe_trace_func);
1003 1012
1004/* Event entry printers */ 1013/* Event entry printers */
1005static enum print_line_t 1014static enum print_line_t
@@ -1131,7 +1140,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1131#ifdef CONFIG_PERF_EVENTS 1140#ifdef CONFIG_PERF_EVENTS
1132 1141
1133/* Kprobe profile handler */ 1142/* Kprobe profile handler */
1134static __kprobes void 1143static void
1135kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1144kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1136{ 1145{
1137 struct ftrace_event_call *call = &tk->tp.call; 1146 struct ftrace_event_call *call = &tk->tp.call;
@@ -1158,9 +1167,10 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1158 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1167 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1159 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1168 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1160} 1169}
1170NOKPROBE_SYMBOL(kprobe_perf_func);
1161 1171
1162/* Kretprobe profile handler */ 1172/* Kretprobe profile handler */
1163static __kprobes void 1173static void
1164kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1174kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1165 struct pt_regs *regs) 1175 struct pt_regs *regs)
1166{ 1176{
@@ -1188,6 +1198,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1188 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1198 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1189 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1199 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1190} 1200}
1201NOKPROBE_SYMBOL(kretprobe_perf_func);
1191#endif /* CONFIG_PERF_EVENTS */ 1202#endif /* CONFIG_PERF_EVENTS */
1192 1203
1193/* 1204/*
@@ -1196,9 +1207,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1196 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1207 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1197 * lockless, but we can't race with this __init function. 1208 * lockless, but we can't race with this __init function.
1198 */ 1209 */
1199static __kprobes 1210static int kprobe_register(struct ftrace_event_call *event,
1200int kprobe_register(struct ftrace_event_call *event, 1211 enum trace_reg type, void *data)
1201 enum trace_reg type, void *data)
1202{ 1212{
1203 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1213 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1204 struct ftrace_event_file *file = data; 1214 struct ftrace_event_file *file = data;
@@ -1224,8 +1234,7 @@ int kprobe_register(struct ftrace_event_call *event,
1224 return 0; 1234 return 0;
1225} 1235}
1226 1236
1227static __kprobes 1237static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1228int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1229{ 1238{
1230 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); 1239 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1231 1240
@@ -1239,9 +1248,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1239#endif 1248#endif
1240 return 0; /* We don't tweek kernel, so just return 0 */ 1249 return 0; /* We don't tweek kernel, so just return 0 */
1241} 1250}
1251NOKPROBE_SYMBOL(kprobe_dispatcher);
1242 1252
1243static __kprobes 1253static int
1244int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1254kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1245{ 1255{
1246 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); 1256 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1247 1257
@@ -1255,6 +1265,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1255#endif 1265#endif
1256 return 0; /* We don't tweek kernel, so just return 0 */ 1266 return 0; /* We don't tweek kernel, so just return 0 */
1257} 1267}
1268NOKPROBE_SYMBOL(kretprobe_dispatcher);
1258 1269
1259static struct trace_event_functions kretprobe_funcs = { 1270static struct trace_event_functions kretprobe_funcs = {
1260 .trace = print_kretprobe_event 1271 .trace = print_kretprobe_event
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 8364a421b4df..d4b9fc22cd27 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -37,13 +37,13 @@ const char *reserved_field_names[] = {
37 37
38/* Printing in basic type function template */ 38/* Printing in basic type function template */
39#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \ 39#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \
40__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ 40int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
41 const char *name, \ 41 void *data, void *ent) \
42 void *data, void *ent) \
43{ \ 42{ \
44 return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ 43 return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \
45} \ 44} \
46const char PRINT_TYPE_FMT_NAME(type)[] = fmt; 45const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \
46NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type));
47 47
48DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x") 48DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x")
49DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x") 49DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x")
@@ -55,9 +55,8 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%d")
55DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld") 55DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld")
56 56
57/* Print type function for string type */ 57/* Print type function for string type */
58__kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, 58int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name,
59 const char *name, 59 void *data, void *ent)
60 void *data, void *ent)
61{ 60{
62 int len = *(u32 *)data >> 16; 61 int len = *(u32 *)data >> 16;
63 62
@@ -67,6 +66,7 @@ __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
67 return trace_seq_printf(s, " %s=\"%s\"", name, 66 return trace_seq_printf(s, " %s=\"%s\"", name,
68 (const char *)get_loc_data(data, ent)); 67 (const char *)get_loc_data(data, ent));
69} 68}
69NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string));
70 70
71const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; 71const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
72 72
@@ -81,23 +81,24 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
81 81
82/* Data fetch function templates */ 82/* Data fetch function templates */
83#define DEFINE_FETCH_reg(type) \ 83#define DEFINE_FETCH_reg(type) \
84__kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ 84void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, void *offset, void *dest) \
85 void *offset, void *dest) \
86{ \ 85{ \
87 *(type *)dest = (type)regs_get_register(regs, \ 86 *(type *)dest = (type)regs_get_register(regs, \
88 (unsigned int)((unsigned long)offset)); \ 87 (unsigned int)((unsigned long)offset)); \
89} 88} \
89NOKPROBE_SYMBOL(FETCH_FUNC_NAME(reg, type));
90DEFINE_BASIC_FETCH_FUNCS(reg) 90DEFINE_BASIC_FETCH_FUNCS(reg)
91/* No string on the register */ 91/* No string on the register */
92#define fetch_reg_string NULL 92#define fetch_reg_string NULL
93#define fetch_reg_string_size NULL 93#define fetch_reg_string_size NULL
94 94
95#define DEFINE_FETCH_retval(type) \ 95#define DEFINE_FETCH_retval(type) \
96__kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \ 96void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \
97 void *dummy, void *dest) \ 97 void *dummy, void *dest) \
98{ \ 98{ \
99 *(type *)dest = (type)regs_return_value(regs); \ 99 *(type *)dest = (type)regs_return_value(regs); \
100} 100} \
101NOKPROBE_SYMBOL(FETCH_FUNC_NAME(retval, type));
101DEFINE_BASIC_FETCH_FUNCS(retval) 102DEFINE_BASIC_FETCH_FUNCS(retval)
102/* No string on the retval */ 103/* No string on the retval */
103#define fetch_retval_string NULL 104#define fetch_retval_string NULL
@@ -112,8 +113,8 @@ struct deref_fetch_param {
112}; 113};
113 114
114#define DEFINE_FETCH_deref(type) \ 115#define DEFINE_FETCH_deref(type) \
115__kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ 116void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
116 void *data, void *dest) \ 117 void *data, void *dest) \
117{ \ 118{ \
118 struct deref_fetch_param *dprm = data; \ 119 struct deref_fetch_param *dprm = data; \
119 unsigned long addr; \ 120 unsigned long addr; \
@@ -123,12 +124,13 @@ __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
123 dprm->fetch(regs, (void *)addr, dest); \ 124 dprm->fetch(regs, (void *)addr, dest); \
124 } else \ 125 } else \
125 *(type *)dest = 0; \ 126 *(type *)dest = 0; \
126} 127} \
128NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, type));
127DEFINE_BASIC_FETCH_FUNCS(deref) 129DEFINE_BASIC_FETCH_FUNCS(deref)
128DEFINE_FETCH_deref(string) 130DEFINE_FETCH_deref(string)
129 131
130__kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, 132void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
131 void *data, void *dest) 133 void *data, void *dest)
132{ 134{
133 struct deref_fetch_param *dprm = data; 135 struct deref_fetch_param *dprm = data;
134 unsigned long addr; 136 unsigned long addr;
@@ -140,16 +142,18 @@ __kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
140 } else 142 } else
141 *(string_size *)dest = 0; 143 *(string_size *)dest = 0;
142} 144}
145NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, string_size));
143 146
144static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) 147static void update_deref_fetch_param(struct deref_fetch_param *data)
145{ 148{
146 if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) 149 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
147 update_deref_fetch_param(data->orig.data); 150 update_deref_fetch_param(data->orig.data);
148 else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) 151 else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
149 update_symbol_cache(data->orig.data); 152 update_symbol_cache(data->orig.data);
150} 153}
154NOKPROBE_SYMBOL(update_deref_fetch_param);
151 155
152static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) 156static void free_deref_fetch_param(struct deref_fetch_param *data)
153{ 157{
154 if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) 158 if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
155 free_deref_fetch_param(data->orig.data); 159 free_deref_fetch_param(data->orig.data);
@@ -157,6 +161,7 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
157 free_symbol_cache(data->orig.data); 161 free_symbol_cache(data->orig.data);
158 kfree(data); 162 kfree(data);
159} 163}
164NOKPROBE_SYMBOL(free_deref_fetch_param);
160 165
161/* Bitfield fetch function */ 166/* Bitfield fetch function */
162struct bitfield_fetch_param { 167struct bitfield_fetch_param {
@@ -166,8 +171,8 @@ struct bitfield_fetch_param {
166}; 171};
167 172
168#define DEFINE_FETCH_bitfield(type) \ 173#define DEFINE_FETCH_bitfield(type) \
169__kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ 174void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
170 void *data, void *dest) \ 175 void *data, void *dest) \
171{ \ 176{ \
172 struct bitfield_fetch_param *bprm = data; \ 177 struct bitfield_fetch_param *bprm = data; \
173 type buf = 0; \ 178 type buf = 0; \
@@ -177,13 +182,13 @@ __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
177 buf >>= bprm->low_shift; \ 182 buf >>= bprm->low_shift; \
178 } \ 183 } \
179 *(type *)dest = buf; \ 184 *(type *)dest = buf; \
180} 185} \
181 186NOKPROBE_SYMBOL(FETCH_FUNC_NAME(bitfield, type));
182DEFINE_BASIC_FETCH_FUNCS(bitfield) 187DEFINE_BASIC_FETCH_FUNCS(bitfield)
183#define fetch_bitfield_string NULL 188#define fetch_bitfield_string NULL
184#define fetch_bitfield_string_size NULL 189#define fetch_bitfield_string_size NULL
185 190
186static __kprobes void 191static void
187update_bitfield_fetch_param(struct bitfield_fetch_param *data) 192update_bitfield_fetch_param(struct bitfield_fetch_param *data)
188{ 193{
189 /* 194 /*
@@ -196,7 +201,7 @@ update_bitfield_fetch_param(struct bitfield_fetch_param *data)
196 update_symbol_cache(data->orig.data); 201 update_symbol_cache(data->orig.data);
197} 202}
198 203
199static __kprobes void 204static void
200free_bitfield_fetch_param(struct bitfield_fetch_param *data) 205free_bitfield_fetch_param(struct bitfield_fetch_param *data)
201{ 206{
202 /* 207 /*
@@ -255,17 +260,17 @@ fail:
255} 260}
256 261
257/* Special function : only accept unsigned long */ 262/* Special function : only accept unsigned long */
258static __kprobes void fetch_kernel_stack_address(struct pt_regs *regs, 263static void fetch_kernel_stack_address(struct pt_regs *regs, void *dummy, void *dest)
259 void *dummy, void *dest)
260{ 264{
261 *(unsigned long *)dest = kernel_stack_pointer(regs); 265 *(unsigned long *)dest = kernel_stack_pointer(regs);
262} 266}
267NOKPROBE_SYMBOL(fetch_kernel_stack_address);
263 268
264static __kprobes void fetch_user_stack_address(struct pt_regs *regs, 269static void fetch_user_stack_address(struct pt_regs *regs, void *dummy, void *dest)
265 void *dummy, void *dest)
266{ 270{
267 *(unsigned long *)dest = user_stack_pointer(regs); 271 *(unsigned long *)dest = user_stack_pointer(regs);
268} 272}
273NOKPROBE_SYMBOL(fetch_user_stack_address);
269 274
270static fetch_func_t get_fetch_size_function(const struct fetch_type *type, 275static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
271 fetch_func_t orig_fn, 276 fetch_func_t orig_fn,
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index fb1ab5dfbd42..4f815fbce16d 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -81,13 +81,13 @@
81 */ 81 */
82#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) 82#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
83 83
84static inline void *get_rloc_data(u32 *dl) 84static nokprobe_inline void *get_rloc_data(u32 *dl)
85{ 85{
86 return (u8 *)dl + get_rloc_offs(*dl); 86 return (u8 *)dl + get_rloc_offs(*dl);
87} 87}
88 88
89/* For data_loc conversion */ 89/* For data_loc conversion */
90static inline void *get_loc_data(u32 *dl, void *ent) 90static nokprobe_inline void *get_loc_data(u32 *dl, void *ent)
91{ 91{
92 return (u8 *)ent + get_rloc_offs(*dl); 92 return (u8 *)ent + get_rloc_offs(*dl);
93} 93}
@@ -136,9 +136,8 @@ typedef u32 string_size;
136 136
137/* Printing in basic type function template */ 137/* Printing in basic type function template */
138#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \ 138#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \
139__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ 139int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
140 const char *name, \ 140 void *data, void *ent); \
141 void *data, void *ent); \
142extern const char PRINT_TYPE_FMT_NAME(type)[] 141extern const char PRINT_TYPE_FMT_NAME(type)[]
143 142
144DECLARE_BASIC_PRINT_TYPE_FUNC(u8); 143DECLARE_BASIC_PRINT_TYPE_FUNC(u8);
@@ -303,7 +302,7 @@ static inline bool trace_probe_is_registered(struct trace_probe *tp)
303 return !!(tp->flags & TP_FLAG_REGISTERED); 302 return !!(tp->flags & TP_FLAG_REGISTERED);
304} 303}
305 304
306static inline __kprobes void call_fetch(struct fetch_param *fprm, 305static nokprobe_inline void call_fetch(struct fetch_param *fprm,
307 struct pt_regs *regs, void *dest) 306 struct pt_regs *regs, void *dest)
308{ 307{
309 return fprm->fn(regs, fprm->data, dest); 308 return fprm->fn(regs, fprm->data, dest);
@@ -351,7 +350,7 @@ extern ssize_t traceprobe_probes_write(struct file *file,
351extern int traceprobe_command(const char *buf, int (*createfn)(int, char**)); 350extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
352 351
353/* Sum up total data length for dynamic arraies (strings) */ 352/* Sum up total data length for dynamic arraies (strings) */
354static inline __kprobes int 353static nokprobe_inline int
355__get_data_size(struct trace_probe *tp, struct pt_regs *regs) 354__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
356{ 355{
357 int i, ret = 0; 356 int i, ret = 0;
@@ -367,7 +366,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
367} 366}
368 367
369/* Store the value of each argument */ 368/* Store the value of each argument */
370static inline __kprobes void 369static nokprobe_inline void
371store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, 370store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs,
372 u8 *data, int maxlen) 371 u8 *data, int maxlen)
373{ 372{
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index c082a7441345..04fdb5de823c 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -108,8 +108,8 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
108 * Uprobes-specific fetch functions 108 * Uprobes-specific fetch functions
109 */ 109 */
110#define DEFINE_FETCH_stack(type) \ 110#define DEFINE_FETCH_stack(type) \
111static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ 111static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \ 112 void *offset, void *dest) \
113{ \ 113{ \
114 *(type *)dest = (type)get_user_stack_nth(regs, \ 114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \ 115 ((unsigned long)offset)); \
@@ -120,8 +120,8 @@ DEFINE_BASIC_FETCH_FUNCS(stack)
120#define fetch_stack_string_size NULL 120#define fetch_stack_string_size NULL
121 121
122#define DEFINE_FETCH_memory(type) \ 122#define DEFINE_FETCH_memory(type) \
123static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ 123static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \ 124 void *addr, void *dest) \
125{ \ 125{ \
126 type retval; \ 126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \ 127 void __user *vaddr = (void __force __user *) addr; \
@@ -136,8 +136,8 @@ DEFINE_BASIC_FETCH_FUNCS(memory)
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max 136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location. 137 * length and relative data location.
138 */ 138 */
139static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, 139static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest) 140 void *addr, void *dest)
141{ 141{
142 long ret; 142 long ret;
143 u32 rloc = *(u32 *)dest; 143 u32 rloc = *(u32 *)dest;
@@ -158,8 +158,8 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
158 } 158 }
159} 159}
160 160
161static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, 161static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest) 162 void *addr, void *dest)
163{ 163{
164 int len; 164 int len;
165 void __user *vaddr = (void __force __user *) addr; 165 void __user *vaddr = (void __force __user *) addr;
@@ -184,8 +184,8 @@ static unsigned long translate_user_vaddr(void *file_offset)
184} 184}
185 185
186#define DEFINE_FETCH_file_offset(type) \ 186#define DEFINE_FETCH_file_offset(type) \
187static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\ 187static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188 void *offset, void *dest) \ 188 void *offset, void *dest)\
189{ \ 189{ \
190 void *vaddr = (void *)translate_user_vaddr(offset); \ 190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \ 191 \
@@ -1009,56 +1009,60 @@ uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1009 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm); 1009 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
1010} 1010}
1011 1011
1012static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) 1012static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1013{ 1013{
1014 bool done; 1014 bool done;
1015 1015
1016 write_lock(&tu->filter.rwlock); 1016 write_lock(&tu->filter.rwlock);
1017 if (event->hw.tp_target) { 1017 if (event->hw.tp_target) {
1018 /* 1018 list_del(&event->hw.tp_list);
1019 * event->parent != NULL means copy_process(), we can avoid
1020 * uprobe_apply(). current->mm must be probed and we can rely
1021 * on dup_mmap() which preserves the already installed bp's.
1022 *
1023 * attr.enable_on_exec means that exec/mmap will install the
1024 * breakpoints we need.
1025 */
1026 done = tu->filter.nr_systemwide || 1019 done = tu->filter.nr_systemwide ||
1027 event->parent || event->attr.enable_on_exec || 1020 (event->hw.tp_target->flags & PF_EXITING) ||
1028 uprobe_filter_event(tu, event); 1021 uprobe_filter_event(tu, event);
1029 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1030 } else { 1022 } else {
1023 tu->filter.nr_systemwide--;
1031 done = tu->filter.nr_systemwide; 1024 done = tu->filter.nr_systemwide;
1032 tu->filter.nr_systemwide++;
1033 } 1025 }
1034 write_unlock(&tu->filter.rwlock); 1026 write_unlock(&tu->filter.rwlock);
1035 1027
1036 if (!done) 1028 if (!done)
1037 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); 1029 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1038 1030
1039 return 0; 1031 return 0;
1040} 1032}
1041 1033
1042static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) 1034static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1043{ 1035{
1044 bool done; 1036 bool done;
1037 int err;
1045 1038
1046 write_lock(&tu->filter.rwlock); 1039 write_lock(&tu->filter.rwlock);
1047 if (event->hw.tp_target) { 1040 if (event->hw.tp_target) {
1048 list_del(&event->hw.tp_list); 1041 /*
1042 * event->parent != NULL means copy_process(), we can avoid
1043 * uprobe_apply(). current->mm must be probed and we can rely
1044 * on dup_mmap() which preserves the already installed bp's.
1045 *
1046 * attr.enable_on_exec means that exec/mmap will install the
1047 * breakpoints we need.
1048 */
1049 done = tu->filter.nr_systemwide || 1049 done = tu->filter.nr_systemwide ||
1050 (event->hw.tp_target->flags & PF_EXITING) || 1050 event->parent || event->attr.enable_on_exec ||
1051 uprobe_filter_event(tu, event); 1051 uprobe_filter_event(tu, event);
1052 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1052 } else { 1053 } else {
1053 tu->filter.nr_systemwide--;
1054 done = tu->filter.nr_systemwide; 1054 done = tu->filter.nr_systemwide;
1055 tu->filter.nr_systemwide++;
1055 } 1056 }
1056 write_unlock(&tu->filter.rwlock); 1057 write_unlock(&tu->filter.rwlock);
1057 1058
1058 if (!done) 1059 err = 0;
1059 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); 1060 if (!done) {
1060 1061 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1061 return 0; 1062 if (err)
1063 uprobe_perf_close(tu, event);
1064 }
1065 return err;
1062} 1066}
1063 1067
1064static bool uprobe_perf_filter(struct uprobe_consumer *uc, 1068static bool uprobe_perf_filter(struct uprobe_consumer *uc,