aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-09-08 20:45:31 -0400
committerDavid S. Miller <davem@davemloft.net>2016-09-09 22:36:04 -0400
commitf3694e00123802d688180e7ae90b240669910e3c (patch)
tree321a9b95e9df3e64adbc8340a5f63a778db69e70
parent374fb54eeaaa6b2cb82bca73a11273687bb2a96a (diff)
bpf: add BPF_CALL_x macros for declaring helpers
This work adds BPF_CALL_<n>() macros and converts all the eBPF helper functions to use them, in a similar fashion like we do with SYSCALL_DEFINE<n>() macros that are used today. Motivation for this is to hide all the register handling and all necessary casts from the user, so that it is done automatically in the background when adding a BPF_CALL_<n>() call. This makes current helpers easier to review, eases to write future helpers, avoids getting the casting mess wrong, and allows for extending all helpers at once (f.e. build time checks, etc). It also helps detecting more easily in code reviews that unused registers are not instrumented in the code by accident, breaking compatibility with existing programs. BPF_CALL_<n>() internals are quite similar to SYSCALL_DEFINE<n>() ones with some fundamental differences, for example, for generating the actual helper function that carries all u64 regs, we need to fill unused regs, so that we always end up with 5 u64 regs as an argument. I reviewed several 0-5 generated BPF_CALL_<n>() variants of the .i results and they look all as expected. No sparse issue spotted. We let this also sit for a few days with Fengguang's kbuild test robot, and there were no issues seen. On s390, it barked on the "uses dynamic stack allocation" notice, which is an old one from bpf_perf_event_output{,_tp}() reappearing here due to the conversion to the call wrapper, just telling that the perf raw record/frag sits on stack (gcc with s390's -mwarn-dynamicstack), but that's all. Did various runtime tests and they were fine as well. All eBPF helpers are now converted to use these macros, getting rid of a good chunk of all the raw castings. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/filter.h50
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/helpers.c46
-rw-r--r--kernel/bpf/stackmap.c5
-rw-r--r--kernel/trace/bpf_trace.c75
-rw-r--r--net/core/filter.c129
6 files changed, 149 insertions, 158 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 7fabad8dc3fc..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -328,6 +328,56 @@ struct bpf_prog_aux;
328 __size; \ 328 __size; \
329 }) 329 })
330 330
331#define __BPF_MAP_0(m, v, ...) v
332#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
333#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
334#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
335#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
336#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
337
338#define __BPF_REG_0(...) __BPF_PAD(5)
339#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
340#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
341#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
342#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
343#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
344
345#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
346#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
347
348#define __BPF_CAST(t, a) \
349 (__force t) \
350 (__force \
351 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
352 (unsigned long)0, (t)0))) a
353#define __BPF_V void
354#define __BPF_N
355
356#define __BPF_DECL_ARGS(t, a) t a
357#define __BPF_DECL_REGS(t, a) u64 a
358
359#define __BPF_PAD(n) \
360 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
361 u64, __ur_3, u64, __ur_4, u64, __ur_5)
362
363#define BPF_CALL_x(x, name, ...) \
364 static __always_inline \
365 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
366 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
367 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
368 { \
369 return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
370 } \
371 static __always_inline \
372 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
373
374#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
375#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
376#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
377#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
378#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
379#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
380
331#ifdef CONFIG_COMPAT 381#ifdef CONFIG_COMPAT
332/* A struct sock_filter is architecture independent. */ 382/* A struct sock_filter is architecture independent. */
333struct compat_sock_fprog { 383struct compat_sock_fprog {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 03fd23d4d587..7b7baaed9ed4 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1018,7 +1018,7 @@ void bpf_user_rnd_init_once(void)
1018 prandom_init_once(&bpf_user_rnd_state); 1018 prandom_init_once(&bpf_user_rnd_state);
1019} 1019}
1020 1020
1021u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1021BPF_CALL_0(bpf_user_rnd_u32)
1022{ 1022{
1023 /* Should someone ever have the rather unwise idea to use some 1023 /* Should someone ever have the rather unwise idea to use some
1024 * of the registers passed into this function, then note that 1024 * of the registers passed into this function, then note that
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 6df73bd1ba34..a5b8bf8cfcfd 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -16,6 +16,7 @@
16#include <linux/ktime.h> 16#include <linux/ktime.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/uidgid.h> 18#include <linux/uidgid.h>
19#include <linux/filter.h>
19 20
20/* If kernel subsystem is allowing eBPF programs to call this function, 21/* If kernel subsystem is allowing eBPF programs to call this function,
21 * inside its own verifier_ops->get_func_proto() callback it should return 22 * inside its own verifier_ops->get_func_proto() callback it should return
@@ -26,24 +27,10 @@
26 * if program is allowed to access maps, so check rcu_read_lock_held in 27 * if program is allowed to access maps, so check rcu_read_lock_held in
27 * all three functions. 28 * all three functions.
28 */ 29 */
29static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 30BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
30{ 31{
31 /* verifier checked that R1 contains a valid pointer to bpf_map
32 * and R2 points to a program stack and map->key_size bytes were
33 * initialized
34 */
35 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
36 void *key = (void *) (unsigned long) r2;
37 void *value;
38
39 WARN_ON_ONCE(!rcu_read_lock_held()); 32 WARN_ON_ONCE(!rcu_read_lock_held());
40 33 return (unsigned long) map->ops->map_lookup_elem(map, key);
41 value = map->ops->map_lookup_elem(map, key);
42
43 /* lookup() returns either pointer to element value or NULL
44 * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
45 */
46 return (unsigned long) value;
47} 34}
48 35
49const struct bpf_func_proto bpf_map_lookup_elem_proto = { 36const struct bpf_func_proto bpf_map_lookup_elem_proto = {
@@ -54,15 +41,11 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
54 .arg2_type = ARG_PTR_TO_MAP_KEY, 41 .arg2_type = ARG_PTR_TO_MAP_KEY,
55}; 42};
56 43
57static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 44BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
45 void *, value, u64, flags)
58{ 46{
59 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
60 void *key = (void *) (unsigned long) r2;
61 void *value = (void *) (unsigned long) r3;
62
63 WARN_ON_ONCE(!rcu_read_lock_held()); 47 WARN_ON_ONCE(!rcu_read_lock_held());
64 48 return map->ops->map_update_elem(map, key, value, flags);
65 return map->ops->map_update_elem(map, key, value, r4);
66} 49}
67 50
68const struct bpf_func_proto bpf_map_update_elem_proto = { 51const struct bpf_func_proto bpf_map_update_elem_proto = {
@@ -75,13 +58,9 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
75 .arg4_type = ARG_ANYTHING, 58 .arg4_type = ARG_ANYTHING,
76}; 59};
77 60
78static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 61BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
79{ 62{
80 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
81 void *key = (void *) (unsigned long) r2;
82
83 WARN_ON_ONCE(!rcu_read_lock_held()); 63 WARN_ON_ONCE(!rcu_read_lock_held());
84
85 return map->ops->map_delete_elem(map, key); 64 return map->ops->map_delete_elem(map, key);
86} 65}
87 66
@@ -99,7 +78,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {
99 .ret_type = RET_INTEGER, 78 .ret_type = RET_INTEGER,
100}; 79};
101 80
102static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 81BPF_CALL_0(bpf_get_smp_processor_id)
103{ 82{
104 return smp_processor_id(); 83 return smp_processor_id();
105} 84}
@@ -110,7 +89,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
110 .ret_type = RET_INTEGER, 89 .ret_type = RET_INTEGER,
111}; 90};
112 91
113static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 92BPF_CALL_0(bpf_ktime_get_ns)
114{ 93{
115 /* NMI safe access to clock monotonic */ 94 /* NMI safe access to clock monotonic */
116 return ktime_get_mono_fast_ns(); 95 return ktime_get_mono_fast_ns();
@@ -122,7 +101,7 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto = {
122 .ret_type = RET_INTEGER, 101 .ret_type = RET_INTEGER,
123}; 102};
124 103
125static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 104BPF_CALL_0(bpf_get_current_pid_tgid)
126{ 105{
127 struct task_struct *task = current; 106 struct task_struct *task = current;
128 107
@@ -138,7 +117,7 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
138 .ret_type = RET_INTEGER, 117 .ret_type = RET_INTEGER,
139}; 118};
140 119
141static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 120BPF_CALL_0(bpf_get_current_uid_gid)
142{ 121{
143 struct task_struct *task = current; 122 struct task_struct *task = current;
144 kuid_t uid; 123 kuid_t uid;
@@ -158,10 +137,9 @@ const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
158 .ret_type = RET_INTEGER, 137 .ret_type = RET_INTEGER,
159}; 138};
160 139
161static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5) 140BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
162{ 141{
163 struct task_struct *task = current; 142 struct task_struct *task = current;
164 char *buf = (char *) (long) r1;
165 143
166 if (unlikely(!task)) 144 if (unlikely(!task))
167 goto err_clear; 145 goto err_clear;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index bf4495fcd25d..732ae16d12b7 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -116,10 +116,9 @@ free_smap:
116 return ERR_PTR(err); 116 return ERR_PTR(err);
117} 117}
118 118
119u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) 119BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
120 u64, flags)
120{ 121{
121 struct pt_regs *regs = (struct pt_regs *) (long) r1;
122 struct bpf_map *map = (struct bpf_map *) (long) r2;
123 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 122 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
124 struct perf_callchain_entry *trace; 123 struct perf_callchain_entry *trace;
125 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; 124 struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index e63d7d435796..5dcb99281259 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -61,11 +61,9 @@ unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
61} 61}
62EXPORT_SYMBOL_GPL(trace_call_bpf); 62EXPORT_SYMBOL_GPL(trace_call_bpf);
63 63
64static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 64BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
65{ 65{
66 void *dst = (void *) (long) r1; 66 int ret;
67 int ret, size = (int) r2;
68 void *unsafe_ptr = (void *) (long) r3;
69 67
70 ret = probe_kernel_read(dst, unsafe_ptr, size); 68 ret = probe_kernel_read(dst, unsafe_ptr, size);
71 if (unlikely(ret < 0)) 69 if (unlikely(ret < 0))
@@ -83,12 +81,9 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
83 .arg3_type = ARG_ANYTHING, 81 .arg3_type = ARG_ANYTHING,
84}; 82};
85 83
86static u64 bpf_probe_write_user(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 84BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
85 u32, size)
87{ 86{
88 void *unsafe_ptr = (void *) (long) r1;
89 void *src = (void *) (long) r2;
90 int size = (int) r3;
91
92 /* 87 /*
93 * Ensure we're in user context which is safe for the helper to 88 * Ensure we're in user context which is safe for the helper to
94 * run. This helper has no business in a kthread. 89 * run. This helper has no business in a kthread.
@@ -130,9 +125,9 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
130 * limited trace_printk() 125 * limited trace_printk()
131 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed 126 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
132 */ 127 */
133static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) 128BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129 u64, arg2, u64, arg3)
134{ 130{
135 char *fmt = (char *) (long) r1;
136 bool str_seen = false; 131 bool str_seen = false;
137 int mod[3] = {}; 132 int mod[3] = {};
138 int fmt_cnt = 0; 133 int fmt_cnt = 0;
@@ -178,16 +173,16 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
178 173
179 switch (fmt_cnt) { 174 switch (fmt_cnt) {
180 case 1: 175 case 1:
181 unsafe_addr = r3; 176 unsafe_addr = arg1;
182 r3 = (long) buf; 177 arg1 = (long) buf;
183 break; 178 break;
184 case 2: 179 case 2:
185 unsafe_addr = r4; 180 unsafe_addr = arg2;
186 r4 = (long) buf; 181 arg2 = (long) buf;
187 break; 182 break;
188 case 3: 183 case 3:
189 unsafe_addr = r5; 184 unsafe_addr = arg3;
190 r5 = (long) buf; 185 arg3 = (long) buf;
191 break; 186 break;
192 } 187 }
193 buf[0] = 0; 188 buf[0] = 0;
@@ -209,9 +204,9 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
209 } 204 }
210 205
211 return __trace_printk(1/* fake ip will not be printed */, fmt, 206 return __trace_printk(1/* fake ip will not be printed */, fmt,
212 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, 207 mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
213 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, 208 mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
214 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); 209 mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
215} 210}
216 211
217static const struct bpf_func_proto bpf_trace_printk_proto = { 212static const struct bpf_func_proto bpf_trace_printk_proto = {
@@ -233,9 +228,8 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
233 return &bpf_trace_printk_proto; 228 return &bpf_trace_printk_proto;
234} 229}
235 230
236static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5) 231BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
237{ 232{
238 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
239 struct bpf_array *array = container_of(map, struct bpf_array, map); 233 struct bpf_array *array = container_of(map, struct bpf_array, map);
240 unsigned int cpu = smp_processor_id(); 234 unsigned int cpu = smp_processor_id();
241 u64 index = flags & BPF_F_INDEX_MASK; 235 u64 index = flags & BPF_F_INDEX_MASK;
@@ -312,11 +306,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
312 return 0; 306 return 0;
313} 307}
314 308
315static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) 309BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
310 u64, flags, void *, data, u64, size)
316{ 311{
317 struct pt_regs *regs = (struct pt_regs *)(long) r1;
318 struct bpf_map *map = (struct bpf_map *)(long) r2;
319 void *data = (void *)(long) r4;
320 struct perf_raw_record raw = { 312 struct perf_raw_record raw = {
321 .frag = { 313 .frag = {
322 .size = size, 314 .size = size,
@@ -367,7 +359,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
367 return __bpf_perf_event_output(regs, map, flags, &raw); 359 return __bpf_perf_event_output(regs, map, flags, &raw);
368} 360}
369 361
370static u64 bpf_get_current_task(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 362BPF_CALL_0(bpf_get_current_task)
371{ 363{
372 return (long) current; 364 return (long) current;
373} 365}
@@ -378,16 +370,13 @@ static const struct bpf_func_proto bpf_get_current_task_proto = {
378 .ret_type = RET_INTEGER, 370 .ret_type = RET_INTEGER,
379}; 371};
380 372
381static u64 bpf_current_task_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 373BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
382{ 374{
383 struct bpf_map *map = (struct bpf_map *)(long)r1;
384 struct bpf_array *array = container_of(map, struct bpf_array, map); 375 struct bpf_array *array = container_of(map, struct bpf_array, map);
385 struct cgroup *cgrp; 376 struct cgroup *cgrp;
386 u32 idx = (u32)r2;
387 377
388 if (unlikely(in_interrupt())) 378 if (unlikely(in_interrupt()))
389 return -EINVAL; 379 return -EINVAL;
390
391 if (unlikely(idx >= array->map.max_entries)) 380 if (unlikely(idx >= array->map.max_entries))
392 return -E2BIG; 381 return -E2BIG;
393 382
@@ -481,16 +470,17 @@ static struct bpf_prog_type_list kprobe_tl = {
481 .type = BPF_PROG_TYPE_KPROBE, 470 .type = BPF_PROG_TYPE_KPROBE,
482}; 471};
483 472
484static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) 473BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
474 u64, flags, void *, data, u64, size)
485{ 475{
476 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
477
486 /* 478 /*
487 * r1 points to perf tracepoint buffer where first 8 bytes are hidden 479 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
488 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 480 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
489 * from there and call the same bpf_perf_event_output() helper 481 * from there and call the same bpf_perf_event_output() helper inline.
490 */ 482 */
491 u64 ctx = *(long *)(uintptr_t)r1; 483 return ____bpf_perf_event_output(regs, map, flags, data, size);
492
493 return bpf_perf_event_output(ctx, r2, index, r4, size);
494} 484}
495 485
496static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 486static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
@@ -504,11 +494,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
504 .arg5_type = ARG_CONST_STACK_SIZE, 494 .arg5_type = ARG_CONST_STACK_SIZE,
505}; 495};
506 496
507static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 497BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
498 u64, flags)
508{ 499{
509 u64 ctx = *(long *)(uintptr_t)r1; 500 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
510 501
511 return bpf_get_stackid(ctx, r2, r3, r4, r5); 502 /*
503 * Same comment as in bpf_perf_event_output_tp(), only that this time
504 * the other helper's function body cannot be inlined due to being
505 * external, thus we need to call raw helper function.
506 */
507 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
508 flags, 0, 0);
512} 509}
513 510
514static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 511static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
diff --git a/net/core/filter.c b/net/core/filter.c
index d6d9bb89ce3a..298b146b47e7 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -94,14 +94,13 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
94} 94}
95EXPORT_SYMBOL(sk_filter_trim_cap); 95EXPORT_SYMBOL(sk_filter_trim_cap);
96 96
97static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 97BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb)
98{ 98{
99 return skb_get_poff((struct sk_buff *)(unsigned long) ctx); 99 return skb_get_poff(skb);
100} 100}
101 101
102static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 102BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
103{ 103{
104 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
105 struct nlattr *nla; 104 struct nlattr *nla;
106 105
107 if (skb_is_nonlinear(skb)) 106 if (skb_is_nonlinear(skb))
@@ -120,9 +119,8 @@ static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
120 return 0; 119 return 0;
121} 120}
122 121
123static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 122BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
124{ 123{
125 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
126 struct nlattr *nla; 124 struct nlattr *nla;
127 125
128 if (skb_is_nonlinear(skb)) 126 if (skb_is_nonlinear(skb))
@@ -145,7 +143,7 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
145 return 0; 143 return 0;
146} 144}
147 145
148static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) 146BPF_CALL_0(__get_raw_cpu_id)
149{ 147{
150 return raw_smp_processor_id(); 148 return raw_smp_processor_id();
151} 149}
@@ -1376,12 +1374,9 @@ static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1376 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); 1374 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1377} 1375}
1378 1376
1379static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) 1377BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1378 const void *, from, u32, len, u64, flags)
1380{ 1379{
1381 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1382 unsigned int offset = (unsigned int) r2;
1383 void *from = (void *) (long) r3;
1384 unsigned int len = (unsigned int) r4;
1385 void *ptr; 1380 void *ptr;
1386 1381
1387 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1382 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
@@ -1416,12 +1411,9 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1416 .arg5_type = ARG_ANYTHING, 1411 .arg5_type = ARG_ANYTHING,
1417}; 1412};
1418 1413
1419static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1414BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1415 void *, to, u32, len)
1420{ 1416{
1421 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
1422 unsigned int offset = (unsigned int) r2;
1423 void *to = (void *)(unsigned long) r3;
1424 unsigned int len = (unsigned int) r4;
1425 void *ptr; 1417 void *ptr;
1426 1418
1427 if (unlikely(offset > 0xffff)) 1419 if (unlikely(offset > 0xffff))
@@ -1449,10 +1441,9 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1449 .arg4_type = ARG_CONST_STACK_SIZE, 1441 .arg4_type = ARG_CONST_STACK_SIZE,
1450}; 1442};
1451 1443
1452static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1444BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1445 u64, from, u64, to, u64, flags)
1453{ 1446{
1454 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1455 unsigned int offset = (unsigned int) r2;
1456 __sum16 *ptr; 1447 __sum16 *ptr;
1457 1448
1458 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1449 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
@@ -1494,12 +1485,11 @@ static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1494 .arg5_type = ARG_ANYTHING, 1485 .arg5_type = ARG_ANYTHING,
1495}; 1486};
1496 1487
1497static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1488BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1489 u64, from, u64, to, u64, flags)
1498{ 1490{
1499 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1500 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1491 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1501 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1492 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1502 unsigned int offset = (unsigned int) r2;
1503 __sum16 *ptr; 1493 __sum16 *ptr;
1504 1494
1505 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | 1495 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
@@ -1547,12 +1537,11 @@ static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1547 .arg5_type = ARG_ANYTHING, 1537 .arg5_type = ARG_ANYTHING,
1548}; 1538};
1549 1539
1550static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed) 1540BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1541 __be32 *, to, u32, to_size, __wsum, seed)
1551{ 1542{
1552 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); 1543 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1553 u64 diff_size = from_size + to_size; 1544 u32 diff_size = from_size + to_size;
1554 __be32 *from = (__be32 *) (long) r1;
1555 __be32 *to = (__be32 *) (long) r3;
1556 int i, j = 0; 1545 int i, j = 0;
1557 1546
1558 /* This is quite flexible, some examples: 1547 /* This is quite flexible, some examples:
@@ -1610,9 +1599,8 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1610 return ret; 1599 return ret;
1611} 1600}
1612 1601
1613static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) 1602BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1614{ 1603{
1615 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1616 struct net_device *dev; 1604 struct net_device *dev;
1617 1605
1618 if (unlikely(flags & ~(BPF_F_INGRESS))) 1606 if (unlikely(flags & ~(BPF_F_INGRESS)))
@@ -1648,7 +1636,7 @@ struct redirect_info {
1648 1636
1649static DEFINE_PER_CPU(struct redirect_info, redirect_info); 1637static DEFINE_PER_CPU(struct redirect_info, redirect_info);
1650 1638
1651static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5) 1639BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
1652{ 1640{
1653 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1641 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1654 1642
@@ -1687,9 +1675,9 @@ static const struct bpf_func_proto bpf_redirect_proto = {
1687 .arg2_type = ARG_ANYTHING, 1675 .arg2_type = ARG_ANYTHING,
1688}; 1676};
1689 1677
1690static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1678BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
1691{ 1679{
1692 return task_get_classid((struct sk_buff *) (unsigned long) r1); 1680 return task_get_classid(skb);
1693} 1681}
1694 1682
1695static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { 1683static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
@@ -1699,9 +1687,9 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
1699 .arg1_type = ARG_PTR_TO_CTX, 1687 .arg1_type = ARG_PTR_TO_CTX,
1700}; 1688};
1701 1689
1702static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1690BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
1703{ 1691{
1704 return dst_tclassid((struct sk_buff *) (unsigned long) r1); 1692 return dst_tclassid(skb);
1705} 1693}
1706 1694
1707static const struct bpf_func_proto bpf_get_route_realm_proto = { 1695static const struct bpf_func_proto bpf_get_route_realm_proto = {
@@ -1711,14 +1699,14 @@ static const struct bpf_func_proto bpf_get_route_realm_proto = {
1711 .arg1_type = ARG_PTR_TO_CTX, 1699 .arg1_type = ARG_PTR_TO_CTX,
1712}; 1700};
1713 1701
1714static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1702BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
1715{ 1703{
1716 /* If skb_clear_hash() was called due to mangling, we can 1704 /* If skb_clear_hash() was called due to mangling, we can
1717 * trigger SW recalculation here. Later access to hash 1705 * trigger SW recalculation here. Later access to hash
1718 * can then use the inline skb->hash via context directly 1706 * can then use the inline skb->hash via context directly
1719 * instead of calling this helper again. 1707 * instead of calling this helper again.
1720 */ 1708 */
1721 return skb_get_hash((struct sk_buff *) (unsigned long) r1); 1709 return skb_get_hash(skb);
1722} 1710}
1723 1711
1724static const struct bpf_func_proto bpf_get_hash_recalc_proto = { 1712static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
@@ -1728,10 +1716,9 @@ static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
1728 .arg1_type = ARG_PTR_TO_CTX, 1716 .arg1_type = ARG_PTR_TO_CTX,
1729}; 1717};
1730 1718
1731static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) 1719BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
1720 u16, vlan_tci)
1732{ 1721{
1733 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1734 __be16 vlan_proto = (__force __be16) r2;
1735 int ret; 1722 int ret;
1736 1723
1737 if (unlikely(vlan_proto != htons(ETH_P_8021Q) && 1724 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
@@ -1756,9 +1743,8 @@ const struct bpf_func_proto bpf_skb_vlan_push_proto = {
1756}; 1743};
1757EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto); 1744EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
1758 1745
1759static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1746BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
1760{ 1747{
1761 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1762 int ret; 1748 int ret;
1763 1749
1764 bpf_push_mac_rcsum(skb); 1750 bpf_push_mac_rcsum(skb);
@@ -1933,10 +1919,9 @@ static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
1933 return -ENOTSUPP; 1919 return -ENOTSUPP;
1934} 1920}
1935 1921
1936static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) 1922BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
1923 u64, flags)
1937{ 1924{
1938 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1939 __be16 proto = (__force __be16) r2;
1940 int ret; 1925 int ret;
1941 1926
1942 if (unlikely(flags)) 1927 if (unlikely(flags))
@@ -1973,11 +1958,8 @@ static const struct bpf_func_proto bpf_skb_change_proto_proto = {
1973 .arg3_type = ARG_ANYTHING, 1958 .arg3_type = ARG_ANYTHING,
1974}; 1959};
1975 1960
1976static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1961BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
1977{ 1962{
1978 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1979 u32 pkt_type = r2;
1980
1981 /* We only allow a restricted subset to be changed for now. */ 1963 /* We only allow a restricted subset to be changed for now. */
1982 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || 1964 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
1983 !skb_pkt_type_ok(pkt_type))) 1965 !skb_pkt_type_ok(pkt_type)))
@@ -2028,12 +2010,11 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2028 return __skb_trim_rcsum(skb, new_len); 2010 return __skb_trim_rcsum(skb, new_len);
2029} 2011}
2030 2012
2031static u64 bpf_skb_change_tail(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) 2013BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2014 u64, flags)
2032{ 2015{
2033 struct sk_buff *skb = (struct sk_buff *)(long) r1;
2034 u32 max_len = __bpf_skb_max_len(skb); 2016 u32 max_len = __bpf_skb_max_len(skb);
2035 u32 min_len = __bpf_skb_min_len(skb); 2017 u32 min_len = __bpf_skb_min_len(skb);
2036 u32 new_len = (u32) r2;
2037 int ret; 2018 int ret;
2038 2019
2039 if (unlikely(flags || new_len > max_len || new_len < min_len)) 2020 if (unlikely(flags || new_len > max_len || new_len < min_len))
@@ -2113,13 +2094,10 @@ static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
2113 return 0; 2094 return 0;
2114} 2095}
2115 2096
2116static u64 bpf_skb_event_output(u64 r1, u64 r2, u64 flags, u64 r4, 2097BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
2117 u64 meta_size) 2098 u64, flags, void *, meta, u64, meta_size)
2118{ 2099{
2119 struct sk_buff *skb = (struct sk_buff *)(long) r1;
2120 struct bpf_map *map = (struct bpf_map *)(long) r2;
2121 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 2100 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
2122 void *meta = (void *)(long) r4;
2123 2101
2124 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 2102 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
2125 return -EINVAL; 2103 return -EINVAL;
@@ -2146,10 +2124,9 @@ static unsigned short bpf_tunnel_key_af(u64 flags)
2146 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; 2124 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
2147} 2125}
2148 2126
2149static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) 2127BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
2128 u32, size, u64, flags)
2150{ 2129{
2151 struct sk_buff *skb = (struct sk_buff *) (long) r1;
2152 struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
2153 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 2130 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
2154 u8 compat[sizeof(struct bpf_tunnel_key)]; 2131 u8 compat[sizeof(struct bpf_tunnel_key)];
2155 void *to_orig = to; 2132 void *to_orig = to;
@@ -2214,10 +2191,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
2214 .arg4_type = ARG_ANYTHING, 2191 .arg4_type = ARG_ANYTHING,
2215}; 2192};
2216 2193
2217static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) 2194BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
2218{ 2195{
2219 struct sk_buff *skb = (struct sk_buff *) (long) r1;
2220 u8 *to = (u8 *) (long) r2;
2221 const struct ip_tunnel_info *info = skb_tunnel_info(skb); 2196 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
2222 int err; 2197 int err;
2223 2198
@@ -2252,10 +2227,9 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
2252 2227
2253static struct metadata_dst __percpu *md_dst; 2228static struct metadata_dst __percpu *md_dst;
2254 2229
2255static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) 2230BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
2231 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
2256{ 2232{
2257 struct sk_buff *skb = (struct sk_buff *) (long) r1;
2258 struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
2259 struct metadata_dst *md = this_cpu_ptr(md_dst); 2233 struct metadata_dst *md = this_cpu_ptr(md_dst);
2260 u8 compat[sizeof(struct bpf_tunnel_key)]; 2234 u8 compat[sizeof(struct bpf_tunnel_key)];
2261 struct ip_tunnel_info *info; 2235 struct ip_tunnel_info *info;
@@ -2273,7 +2247,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
2273 */ 2247 */
2274 memcpy(compat, from, size); 2248 memcpy(compat, from, size);
2275 memset(compat + size, 0, sizeof(compat) - size); 2249 memset(compat + size, 0, sizeof(compat) - size);
2276 from = (struct bpf_tunnel_key *)compat; 2250 from = (const struct bpf_tunnel_key *) compat;
2277 break; 2251 break;
2278 default: 2252 default:
2279 return -EINVAL; 2253 return -EINVAL;
@@ -2323,10 +2297,9 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
2323 .arg4_type = ARG_ANYTHING, 2297 .arg4_type = ARG_ANYTHING,
2324}; 2298};
2325 2299
2326static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) 2300BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
2301 const u8 *, from, u32, size)
2327{ 2302{
2328 struct sk_buff *skb = (struct sk_buff *) (long) r1;
2329 u8 *from = (u8 *) (long) r2;
2330 struct ip_tunnel_info *info = skb_tunnel_info(skb); 2303 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2331 const struct metadata_dst *md = this_cpu_ptr(md_dst); 2304 const struct metadata_dst *md = this_cpu_ptr(md_dst);
2332 2305
@@ -2372,23 +2345,20 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2372 } 2345 }
2373} 2346}
2374 2347
2375static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 2348BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
2349 u32, idx)
2376{ 2350{
2377 struct sk_buff *skb = (struct sk_buff *)(long)r1;
2378 struct bpf_map *map = (struct bpf_map *)(long)r2;
2379 struct bpf_array *array = container_of(map, struct bpf_array, map); 2351 struct bpf_array *array = container_of(map, struct bpf_array, map);
2380 struct cgroup *cgrp; 2352 struct cgroup *cgrp;
2381 struct sock *sk; 2353 struct sock *sk;
2382 u32 i = (u32)r3;
2383 2354
2384 sk = skb->sk; 2355 sk = skb->sk;
2385 if (!sk || !sk_fullsock(sk)) 2356 if (!sk || !sk_fullsock(sk))
2386 return -ENOENT; 2357 return -ENOENT;
2387 2358 if (unlikely(idx >= array->map.max_entries))
2388 if (unlikely(i >= array->map.max_entries))
2389 return -E2BIG; 2359 return -E2BIG;
2390 2360
2391 cgrp = READ_ONCE(array->ptrs[i]); 2361 cgrp = READ_ONCE(array->ptrs[idx]);
2392 if (unlikely(!cgrp)) 2362 if (unlikely(!cgrp))
2393 return -EAGAIN; 2363 return -EAGAIN;
2394 2364
@@ -2411,13 +2381,10 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
2411 return 0; 2381 return 0;
2412} 2382}
2413 2383
2414static u64 bpf_xdp_event_output(u64 r1, u64 r2, u64 flags, u64 r4, 2384BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
2415 u64 meta_size) 2385 u64, flags, void *, meta, u64, meta_size)
2416{ 2386{
2417 struct xdp_buff *xdp = (struct xdp_buff *)(long) r1;
2418 struct bpf_map *map = (struct bpf_map *)(long) r2;
2419 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; 2387 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
2420 void *meta = (void *)(long) r4;
2421 2388
2422 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) 2389 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
2423 return -EINVAL; 2390 return -EINVAL;