aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/helpers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/helpers.c')
-rw-r--r--kernel/bpf/helpers.c55
1 files changed, 18 insertions, 37 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1ea3afba1a4f..39918402e6e9 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -16,6 +16,7 @@
16#include <linux/ktime.h> 16#include <linux/ktime.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/uidgid.h> 18#include <linux/uidgid.h>
19#include <linux/filter.h>
19 20
20/* If kernel subsystem is allowing eBPF programs to call this function, 21/* If kernel subsystem is allowing eBPF programs to call this function,
21 * inside its own verifier_ops->get_func_proto() callback it should return 22 * inside its own verifier_ops->get_func_proto() callback it should return
@@ -26,48 +27,32 @@
26 * if program is allowed to access maps, so check rcu_read_lock_held in 27 * if program is allowed to access maps, so check rcu_read_lock_held in
27 * all three functions. 28 * all three functions.
28 */ 29 */
29static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 30BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
30{ 31{
31 /* verifier checked that R1 contains a valid pointer to bpf_map
32 * and R2 points to a program stack and map->key_size bytes were
33 * initialized
34 */
35 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
36 void *key = (void *) (unsigned long) r2;
37 void *value;
38
39 WARN_ON_ONCE(!rcu_read_lock_held()); 32 WARN_ON_ONCE(!rcu_read_lock_held());
40 33 return (unsigned long) map->ops->map_lookup_elem(map, key);
41 value = map->ops->map_lookup_elem(map, key);
42
43 /* lookup() returns either pointer to element value or NULL
44 * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
45 */
46 return (unsigned long) value;
47} 34}
48 35
49const struct bpf_func_proto bpf_map_lookup_elem_proto = { 36const struct bpf_func_proto bpf_map_lookup_elem_proto = {
50 .func = bpf_map_lookup_elem, 37 .func = bpf_map_lookup_elem,
51 .gpl_only = false, 38 .gpl_only = false,
39 .pkt_access = true,
52 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 40 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
53 .arg1_type = ARG_CONST_MAP_PTR, 41 .arg1_type = ARG_CONST_MAP_PTR,
54 .arg2_type = ARG_PTR_TO_MAP_KEY, 42 .arg2_type = ARG_PTR_TO_MAP_KEY,
55}; 43};
56 44
57static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 45BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46 void *, value, u64, flags)
58{ 47{
59 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
60 void *key = (void *) (unsigned long) r2;
61 void *value = (void *) (unsigned long) r3;
62
63 WARN_ON_ONCE(!rcu_read_lock_held()); 48 WARN_ON_ONCE(!rcu_read_lock_held());
64 49 return map->ops->map_update_elem(map, key, value, flags);
65 return map->ops->map_update_elem(map, key, value, r4);
66} 50}
67 51
68const struct bpf_func_proto bpf_map_update_elem_proto = { 52const struct bpf_func_proto bpf_map_update_elem_proto = {
69 .func = bpf_map_update_elem, 53 .func = bpf_map_update_elem,
70 .gpl_only = false, 54 .gpl_only = false,
55 .pkt_access = true,
71 .ret_type = RET_INTEGER, 56 .ret_type = RET_INTEGER,
72 .arg1_type = ARG_CONST_MAP_PTR, 57 .arg1_type = ARG_CONST_MAP_PTR,
73 .arg2_type = ARG_PTR_TO_MAP_KEY, 58 .arg2_type = ARG_PTR_TO_MAP_KEY,
@@ -75,19 +60,16 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
75 .arg4_type = ARG_ANYTHING, 60 .arg4_type = ARG_ANYTHING,
76}; 61};
77 62
78static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 63BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
79{ 64{
80 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
81 void *key = (void *) (unsigned long) r2;
82
83 WARN_ON_ONCE(!rcu_read_lock_held()); 65 WARN_ON_ONCE(!rcu_read_lock_held());
84
85 return map->ops->map_delete_elem(map, key); 66 return map->ops->map_delete_elem(map, key);
86} 67}
87 68
88const struct bpf_func_proto bpf_map_delete_elem_proto = { 69const struct bpf_func_proto bpf_map_delete_elem_proto = {
89 .func = bpf_map_delete_elem, 70 .func = bpf_map_delete_elem,
90 .gpl_only = false, 71 .gpl_only = false,
72 .pkt_access = true,
91 .ret_type = RET_INTEGER, 73 .ret_type = RET_INTEGER,
92 .arg1_type = ARG_CONST_MAP_PTR, 74 .arg1_type = ARG_CONST_MAP_PTR,
93 .arg2_type = ARG_PTR_TO_MAP_KEY, 75 .arg2_type = ARG_PTR_TO_MAP_KEY,
@@ -99,7 +81,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {
99 .ret_type = RET_INTEGER, 81 .ret_type = RET_INTEGER,
100}; 82};
101 83
102static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 84BPF_CALL_0(bpf_get_smp_processor_id)
103{ 85{
104 return smp_processor_id(); 86 return smp_processor_id();
105} 87}
@@ -110,7 +92,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
110 .ret_type = RET_INTEGER, 92 .ret_type = RET_INTEGER,
111}; 93};
112 94
113static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 95BPF_CALL_0(bpf_ktime_get_ns)
114{ 96{
115 /* NMI safe access to clock monotonic */ 97 /* NMI safe access to clock monotonic */
116 return ktime_get_mono_fast_ns(); 98 return ktime_get_mono_fast_ns();
@@ -122,11 +104,11 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto = {
122 .ret_type = RET_INTEGER, 104 .ret_type = RET_INTEGER,
123}; 105};
124 106
125static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 107BPF_CALL_0(bpf_get_current_pid_tgid)
126{ 108{
127 struct task_struct *task = current; 109 struct task_struct *task = current;
128 110
129 if (!task) 111 if (unlikely(!task))
130 return -EINVAL; 112 return -EINVAL;
131 113
132 return (u64) task->tgid << 32 | task->pid; 114 return (u64) task->tgid << 32 | task->pid;
@@ -138,18 +120,18 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
138 .ret_type = RET_INTEGER, 120 .ret_type = RET_INTEGER,
139}; 121};
140 122
141static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 123BPF_CALL_0(bpf_get_current_uid_gid)
142{ 124{
143 struct task_struct *task = current; 125 struct task_struct *task = current;
144 kuid_t uid; 126 kuid_t uid;
145 kgid_t gid; 127 kgid_t gid;
146 128
147 if (!task) 129 if (unlikely(!task))
148 return -EINVAL; 130 return -EINVAL;
149 131
150 current_uid_gid(&uid, &gid); 132 current_uid_gid(&uid, &gid);
151 return (u64) from_kgid(&init_user_ns, gid) << 32 | 133 return (u64) from_kgid(&init_user_ns, gid) << 32 |
152 from_kuid(&init_user_ns, uid); 134 from_kuid(&init_user_ns, uid);
153} 135}
154 136
155const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 137const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
@@ -158,10 +140,9 @@ const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
158 .ret_type = RET_INTEGER, 140 .ret_type = RET_INTEGER,
159}; 141};
160 142
161static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5) 143BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
162{ 144{
163 struct task_struct *task = current; 145 struct task_struct *task = current;
164 char *buf = (char *) (long) r1;
165 146
166 if (unlikely(!task)) 147 if (unlikely(!task))
167 goto err_clear; 148 goto err_clear;