aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c96
1 files changed, 88 insertions, 8 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f92d6ad5e080..3994a231eb92 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -19,6 +19,9 @@
19#include "trace_probe.h" 19#include "trace_probe.h"
20#include "trace.h" 20#include "trace.h"
21 21
22#define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24
22#ifdef CONFIG_MODULES 25#ifdef CONFIG_MODULES
23struct bpf_trace_module { 26struct bpf_trace_module {
24 struct module *module; 27 struct module *module;
@@ -567,6 +570,69 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
567 .arg3_type = ARG_ANYTHING, 570 .arg3_type = ARG_ANYTHING,
568}; 571};
569 572
573struct send_signal_irq_work {
574 struct irq_work irq_work;
575 struct task_struct *task;
576 u32 sig;
577};
578
579static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
580
581static void do_bpf_send_signal(struct irq_work *entry)
582{
583 struct send_signal_irq_work *work;
584
585 work = container_of(entry, struct send_signal_irq_work, irq_work);
586 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
587}
588
589BPF_CALL_1(bpf_send_signal, u32, sig)
590{
591 struct send_signal_irq_work *work = NULL;
592
593 /* Similar to bpf_probe_write_user, task needs to be
594 * in a sound condition and kernel memory access be
595 * permitted in order to send signal to the current
596 * task.
597 */
598 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
599 return -EPERM;
600 if (unlikely(uaccess_kernel()))
601 return -EPERM;
602 if (unlikely(!nmi_uaccess_okay()))
603 return -EPERM;
604
605 if (in_nmi()) {
606 /* Do an early check on signal validity. Otherwise,
607 * the error is lost in deferred irq_work.
608 */
609 if (unlikely(!valid_signal(sig)))
610 return -EINVAL;
611
612 work = this_cpu_ptr(&send_signal_work);
613 if (work->irq_work.flags & IRQ_WORK_BUSY)
614 return -EBUSY;
615
616 /* Add the current task, which is the target of sending signal,
617 * to the irq_work. The current task may change when queued
618 * irq works get executed.
619 */
620 work->task = current;
621 work->sig = sig;
622 irq_work_queue(&work->irq_work);
623 return 0;
624 }
625
626 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
627}
628
629static const struct bpf_func_proto bpf_send_signal_proto = {
630 .func = bpf_send_signal,
631 .gpl_only = false,
632 .ret_type = RET_INTEGER,
633 .arg1_type = ARG_ANYTHING,
634};
635
570static const struct bpf_func_proto * 636static const struct bpf_func_proto *
571tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 637tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
572{ 638{
@@ -617,6 +683,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
617 case BPF_FUNC_get_current_cgroup_id: 683 case BPF_FUNC_get_current_cgroup_id:
618 return &bpf_get_current_cgroup_id_proto; 684 return &bpf_get_current_cgroup_id_proto;
619#endif 685#endif
686 case BPF_FUNC_send_signal:
687 return &bpf_send_signal_proto;
620 default: 688 default:
621 return NULL; 689 return NULL;
622 } 690 }
@@ -1034,7 +1102,7 @@ static DEFINE_MUTEX(bpf_event_mutex);
1034int perf_event_attach_bpf_prog(struct perf_event *event, 1102int perf_event_attach_bpf_prog(struct perf_event *event,
1035 struct bpf_prog *prog) 1103 struct bpf_prog *prog)
1036{ 1104{
1037 struct bpf_prog_array __rcu *old_array; 1105 struct bpf_prog_array *old_array;
1038 struct bpf_prog_array *new_array; 1106 struct bpf_prog_array *new_array;
1039 int ret = -EEXIST; 1107 int ret = -EEXIST;
1040 1108
@@ -1052,7 +1120,7 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
1052 if (event->prog) 1120 if (event->prog)
1053 goto unlock; 1121 goto unlock;
1054 1122
1055 old_array = event->tp_event->prog_array; 1123 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1056 if (old_array && 1124 if (old_array &&
1057 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 1125 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1058 ret = -E2BIG; 1126 ret = -E2BIG;
@@ -1075,7 +1143,7 @@ unlock:
1075 1143
1076void perf_event_detach_bpf_prog(struct perf_event *event) 1144void perf_event_detach_bpf_prog(struct perf_event *event)
1077{ 1145{
1078 struct bpf_prog_array __rcu *old_array; 1146 struct bpf_prog_array *old_array;
1079 struct bpf_prog_array *new_array; 1147 struct bpf_prog_array *new_array;
1080 int ret; 1148 int ret;
1081 1149
@@ -1084,7 +1152,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
1084 if (!event->prog) 1152 if (!event->prog)
1085 goto unlock; 1153 goto unlock;
1086 1154
1087 old_array = event->tp_event->prog_array; 1155 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1088 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); 1156 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1089 if (ret == -ENOENT) 1157 if (ret == -ENOENT)
1090 goto unlock; 1158 goto unlock;
@@ -1106,6 +1174,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1106{ 1174{
1107 struct perf_event_query_bpf __user *uquery = info; 1175 struct perf_event_query_bpf __user *uquery = info;
1108 struct perf_event_query_bpf query = {}; 1176 struct perf_event_query_bpf query = {};
1177 struct bpf_prog_array *progs;
1109 u32 *ids, prog_cnt, ids_len; 1178 u32 *ids, prog_cnt, ids_len;
1110 int ret; 1179 int ret;
1111 1180
@@ -1130,10 +1199,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1130 */ 1199 */
1131 1200
1132 mutex_lock(&bpf_event_mutex); 1201 mutex_lock(&bpf_event_mutex);
1133 ret = bpf_prog_array_copy_info(event->tp_event->prog_array, 1202 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1134 ids, 1203 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1135 ids_len,
1136 &prog_cnt);
1137 mutex_unlock(&bpf_event_mutex); 1204 mutex_unlock(&bpf_event_mutex);
1138 1205
1139 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 1206 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
@@ -1343,5 +1410,18 @@ static int __init bpf_event_init(void)
1343 return 0; 1410 return 0;
1344} 1411}
1345 1412
1413static int __init send_signal_irq_work_init(void)
1414{
1415 int cpu;
1416 struct send_signal_irq_work *work;
1417
1418 for_each_possible_cpu(cpu) {
1419 work = per_cpu_ptr(&send_signal_work, cpu);
1420 init_irq_work(&work->irq_work, do_bpf_send_signal);
1421 }
1422 return 0;
1423}
1424
1346fs_initcall(bpf_event_init); 1425fs_initcall(bpf_event_init);
1426subsys_initcall(send_signal_irq_work_init);
1347#endif /* CONFIG_MODULES */ 1427#endif /* CONFIG_MODULES */