aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-23 12:33:01 -0500
committerDavid S. Miller <davem@davemloft.net>2017-11-23 12:33:01 -0500
commite4be7baba81a816bdf778804508b43fa92c6446d (patch)
tree119e98d982af88dff2498031f77f817d7c7b6c33
parent0c19f846d582af919db66a5914a0189f9f92c936 (diff)
parentc131187db2d3fa2f8bf32fdf4e9a4ef805168467 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2017-11-23 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Several BPF offloading fixes, from Jakub. Among others: - Limit offload to cls_bpf and XDP program types only. - Move device validation into the driver and don't make any assumptions about the device in the classifier due to shared blocks semantics. - Don't pass offloaded XDP program into the driver when it should be run in native XDP instead. Offloaded ones are not JITed for the host in such cases. - Don't destroy device offload state when moved to another namespace. - Revert dumping offload info into user space for now, since ifindex alone is not sufficient. This will be redone properly for bpf-next tree. 2) Fix test_verifier to avoid using bpf_probe_write_user() helper in test cases, since it's dumping a warning into kernel log which may confuse users when only running tests. Switch to use bpf_trace_printk() instead, from Yonghong. 3) Several fixes for correcting ARG_CONST_SIZE_OR_ZERO semantics before it becomes uabi, from Gianluca. More specifically: - Add a type ARG_PTR_TO_MEM_OR_NULL that is used only by bpf_csum_diff(), where the argument is either a valid pointer or NULL. The subsequent ARG_CONST_SIZE_OR_ZERO then enforces a valid pointer in case of non-0 size or a valid pointer or NULL in case of size 0. Given that, the semantics for ARG_PTR_TO_MEM in combination with ARG_CONST_SIZE_OR_ZERO are now such that in case of size 0, the pointer must always be valid and cannot be NULL. This fix in semantics allows for bpf_probe_read() to drop the recently added size == 0 check in the helper that would become part of uabi otherwise once released. At the same time we can then fix bpf_probe_read_str() and bpf_perf_event_output() to use ARG_CONST_SIZE_OR_ZERO instead of ARG_CONST_SIZE in order to fix recently reported issues by Arnaldo et al, where LLVM optimizes two boundary checks into a single one for unknown variables where the verifier looses track of the variable bounds and thus rejects valid programs otherwise. 4) A fix for the verifier for the case when it detects comparison of two constants where the branch is guaranteed to not be taken at runtime. Verifier will rightfully prune the exploration of such paths, but we still pass the program to JITs, where they would complain about using reserved fields, etc. Track such dead instructions and sanitize them with mov r0,r0. Rejection is not possible since LLVM may generate them for valid C code and doesn't do as much data flow analysis as verifier. For bpf-next we might implement removal of such dead code and adjust branches instead. Fix from Alexei. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c10
-rw-r--r--include/linux/bpf.h19
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/uapi/linux/bpf.h8
-rw-r--r--kernel/bpf/offload.c27
-rw-r--r--kernel/bpf/syscall.c40
-rw-r--r--kernel/bpf/verifier.c31
-rw-r--r--kernel/trace/bpf_trace.c12
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/filter.c4
-rw-r--r--net/sched/cls_bpf.c8
-rw-r--r--tools/bpf/bpftool/prog.c31
-rw-r--r--tools/include/uapi/linux/bpf.h8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c152
14 files changed, 216 insertions, 152 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index b6cee71f49d3..bc879aeb62d4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
214{ 214{
215 int err; 215 int err;
216 216
217 if (prog && !prog->aux->offload) 217 if (prog) {
218 return -EINVAL; 218 struct bpf_dev_offload *offload = prog->aux->offload;
219
220 if (!offload)
221 return -EINVAL;
222 if (offload->netdev != nn->dp.netdev)
223 return -EINVAL;
224 }
219 225
220 if (prog && old_prog) { 226 if (prog && old_prog) {
221 u8 cap; 227 u8 cap;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c397934f91dd..e55e4255a210 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -78,6 +78,7 @@ enum bpf_arg_type {
78 * functions that access data on eBPF program stack 78 * functions that access data on eBPF program stack
79 */ 79 */
80 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 80 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
81 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
81 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 82 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
82 * helper function must fill all bytes or clear 83 * helper function must fill all bytes or clear
83 * them in error case. 84 * them in error case.
@@ -334,9 +335,8 @@ extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
334extern const struct bpf_verifier_ops xdp_analyzer_ops; 335extern const struct bpf_verifier_ops xdp_analyzer_ops;
335 336
336struct bpf_prog *bpf_prog_get(u32 ufd); 337struct bpf_prog *bpf_prog_get(u32 ufd);
337struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
338struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 338struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
339 struct net_device *netdev); 339 bool attach_drv);
340struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 340struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
341void bpf_prog_sub(struct bpf_prog *prog, int i); 341void bpf_prog_sub(struct bpf_prog *prog, int i);
342struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 342struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@@ -425,15 +425,9 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
425 return ERR_PTR(-EOPNOTSUPP); 425 return ERR_PTR(-EOPNOTSUPP);
426} 426}
427 427
428static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
429 enum bpf_prog_type type)
430{
431 return ERR_PTR(-EOPNOTSUPP);
432}
433
434static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 428static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
435 enum bpf_prog_type type, 429 enum bpf_prog_type type,
436 struct net_device *netdev) 430 bool attach_drv)
437{ 431{
438 return ERR_PTR(-EOPNOTSUPP); 432 return ERR_PTR(-EOPNOTSUPP);
439} 433}
@@ -514,9 +508,14 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
514} 508}
515#endif /* CONFIG_BPF_SYSCALL */ 509#endif /* CONFIG_BPF_SYSCALL */
516 510
511static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
512 enum bpf_prog_type type)
513{
514 return bpf_prog_get_type_dev(ufd, type, false);
515}
516
517int bpf_prog_offload_compile(struct bpf_prog *prog); 517int bpf_prog_offload_compile(struct bpf_prog *prog);
518void bpf_prog_offload_destroy(struct bpf_prog *prog); 518void bpf_prog_offload_destroy(struct bpf_prog *prog);
519u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
520 519
521#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 520#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
522int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 521int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 07b96aaca256..c561b986bab0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -115,7 +115,7 @@ struct bpf_insn_aux_data {
115 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 115 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
116 }; 116 };
117 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 117 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
118 int converted_op_size; /* the valid value width after perceived conversion */ 118 bool seen; /* this insn was processed by the verifier */
119}; 119};
120 120
121#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 121#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -171,7 +171,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
171#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 171#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
172int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); 172int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
173#else 173#else
174int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 174static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
175{ 175{
176 return -EOPNOTSUPP; 176 return -EOPNOTSUPP;
177} 177}
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e880ae6434ee..4c223ab30293 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -262,7 +262,7 @@ union bpf_attr {
262 __u32 kern_version; /* checked when prog_type=kprobe */ 262 __u32 kern_version; /* checked when prog_type=kprobe */
263 __u32 prog_flags; 263 __u32 prog_flags;
264 char prog_name[BPF_OBJ_NAME_LEN]; 264 char prog_name[BPF_OBJ_NAME_LEN];
265 __u32 prog_target_ifindex; /* ifindex of netdev to prep for */ 265 __u32 prog_ifindex; /* ifindex of netdev to prep for */
266 }; 266 };
267 267
268 struct { /* anonymous struct used by BPF_OBJ_* commands */ 268 struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -897,10 +897,6 @@ enum sk_action {
897 897
898#define BPF_TAG_SIZE 8 898#define BPF_TAG_SIZE 8
899 899
900enum bpf_prog_status {
901 BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
902};
903
904struct bpf_prog_info { 900struct bpf_prog_info {
905 __u32 type; 901 __u32 type;
906 __u32 id; 902 __u32 id;
@@ -914,8 +910,6 @@ struct bpf_prog_info {
914 __u32 nr_map_ids; 910 __u32 nr_map_ids;
915 __aligned_u64 map_ids; 911 __aligned_u64 map_ids;
916 char name[BPF_OBJ_NAME_LEN]; 912 char name[BPF_OBJ_NAME_LEN];
917 __u32 ifindex;
918 __u32 status;
919} __attribute__((aligned(8))); 913} __attribute__((aligned(8)));
920 914
921struct bpf_map_info { 915struct bpf_map_info {
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 2816feb38be1..68ec884440b7 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -14,8 +14,9 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
14 struct net *net = current->nsproxy->net_ns; 14 struct net *net = current->nsproxy->net_ns;
15 struct bpf_dev_offload *offload; 15 struct bpf_dev_offload *offload;
16 16
17 if (!capable(CAP_SYS_ADMIN)) 17 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
18 return -EPERM; 18 attr->prog_type != BPF_PROG_TYPE_XDP)
19 return -EINVAL;
19 20
20 if (attr->prog_flags) 21 if (attr->prog_flags)
21 return -EINVAL; 22 return -EINVAL;
@@ -28,7 +29,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
28 init_waitqueue_head(&offload->verifier_done); 29 init_waitqueue_head(&offload->verifier_done);
29 30
30 rtnl_lock(); 31 rtnl_lock();
31 offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex); 32 offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
32 if (!offload->netdev) { 33 if (!offload->netdev) {
33 rtnl_unlock(); 34 rtnl_unlock();
34 kfree(offload); 35 kfree(offload);
@@ -85,6 +86,10 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
85 struct bpf_dev_offload *offload = prog->aux->offload; 86 struct bpf_dev_offload *offload = prog->aux->offload;
86 struct netdev_bpf data = {}; 87 struct netdev_bpf data = {};
87 88
89 /* Caution - if netdev is destroyed before the program, this function
90 * will be called twice.
91 */
92
88 data.offload.prog = prog; 93 data.offload.prog = prog;
89 94
90 if (offload->verifier_running) 95 if (offload->verifier_running)
@@ -144,18 +149,6 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
144 return bpf_prog_offload_translate(prog); 149 return bpf_prog_offload_translate(prog);
145} 150}
146 151
147u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
148{
149 struct bpf_dev_offload *offload = prog->aux->offload;
150 u32 ifindex;
151
152 rtnl_lock();
153 ifindex = offload->netdev ? offload->netdev->ifindex : 0;
154 rtnl_unlock();
155
156 return ifindex;
157}
158
159const struct bpf_prog_ops bpf_offload_prog_ops = { 152const struct bpf_prog_ops bpf_offload_prog_ops = {
160}; 153};
161 154
@@ -169,6 +162,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
169 162
170 switch (event) { 163 switch (event) {
171 case NETDEV_UNREGISTER: 164 case NETDEV_UNREGISTER:
165 /* ignore namespace changes */
166 if (netdev->reg_state != NETREG_UNREGISTERING)
167 break;
168
172 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, 169 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
173 offloads) { 170 offloads) {
174 if (offload->netdev == netdev) 171 if (offload->netdev == netdev)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 09badc37e864..2c4cfeaa8d5e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1057,22 +1057,23 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1057} 1057}
1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1058EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1059 1059
1060static bool bpf_prog_can_attach(struct bpf_prog *prog, 1060static bool bpf_prog_get_ok(struct bpf_prog *prog,
1061 enum bpf_prog_type *attach_type, 1061 enum bpf_prog_type *attach_type, bool attach_drv)
1062 struct net_device *netdev)
1063{ 1062{
1064 struct bpf_dev_offload *offload = prog->aux->offload; 1063 /* not an attachment, just a refcount inc, always allow */
1064 if (!attach_type)
1065 return true;
1065 1066
1066 if (prog->type != *attach_type) 1067 if (prog->type != *attach_type)
1067 return false; 1068 return false;
1068 if (offload && offload->netdev != netdev) 1069 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1069 return false; 1070 return false;
1070 1071
1071 return true; 1072 return true;
1072} 1073}
1073 1074
1074static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1075static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1075 struct net_device *netdev) 1076 bool attach_drv)
1076{ 1077{
1077 struct fd f = fdget(ufd); 1078 struct fd f = fdget(ufd);
1078 struct bpf_prog *prog; 1079 struct bpf_prog *prog;
@@ -1080,7 +1081,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1080 prog = ____bpf_prog_get(f); 1081 prog = ____bpf_prog_get(f);
1081 if (IS_ERR(prog)) 1082 if (IS_ERR(prog))
1082 return prog; 1083 return prog;
1083 if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) { 1084 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1084 prog = ERR_PTR(-EINVAL); 1085 prog = ERR_PTR(-EINVAL);
1085 goto out; 1086 goto out;
1086 } 1087 }
@@ -1093,23 +1094,13 @@ out:
1093 1094
1094struct bpf_prog *bpf_prog_get(u32 ufd) 1095struct bpf_prog *bpf_prog_get(u32 ufd)
1095{ 1096{
1096 return __bpf_prog_get(ufd, NULL, NULL); 1097 return __bpf_prog_get(ufd, NULL, false);
1097} 1098}
1098 1099
1099struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
1100{
1101 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
1102
1103 if (!IS_ERR(prog))
1104 trace_bpf_prog_get_type(prog);
1105 return prog;
1106}
1107EXPORT_SYMBOL_GPL(bpf_prog_get_type);
1108
1109struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1100struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1110 struct net_device *netdev) 1101 bool attach_drv)
1111{ 1102{
1112 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev); 1103 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
1113 1104
1114 if (!IS_ERR(prog)) 1105 if (!IS_ERR(prog))
1115 trace_bpf_prog_get_type(prog); 1106 trace_bpf_prog_get_type(prog);
@@ -1118,7 +1109,7 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1118EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1109EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1119 1110
1120/* last field in 'union bpf_attr' used by this command */ 1111/* last field in 'union bpf_attr' used by this command */
1121#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex 1112#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
1122 1113
1123static int bpf_prog_load(union bpf_attr *attr) 1114static int bpf_prog_load(union bpf_attr *attr)
1124{ 1115{
@@ -1181,7 +1172,7 @@ static int bpf_prog_load(union bpf_attr *attr)
1181 atomic_set(&prog->aux->refcnt, 1); 1172 atomic_set(&prog->aux->refcnt, 1);
1182 prog->gpl_compatible = is_gpl ? 1 : 0; 1173 prog->gpl_compatible = is_gpl ? 1 : 0;
1183 1174
1184 if (attr->prog_target_ifindex) { 1175 if (attr->prog_ifindex) {
1185 err = bpf_prog_offload_init(prog, attr); 1176 err = bpf_prog_offload_init(prog, attr);
1186 if (err) 1177 if (err)
1187 goto free_prog; 1178 goto free_prog;
@@ -1625,11 +1616,6 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1625 return -EFAULT; 1616 return -EFAULT;
1626 } 1617 }
1627 1618
1628 if (bpf_prog_is_dev_bound(prog->aux)) {
1629 info.status |= BPF_PROG_STATUS_DEV_BOUND;
1630 info.ifindex = bpf_prog_offload_ifindex(prog);
1631 }
1632
1633done: 1619done:
1634 if (copy_to_user(uinfo, &info, info_len) || 1620 if (copy_to_user(uinfo, &info, info_len) ||
1635 put_user(info_len, &uattr->info.info_len)) 1621 put_user(info_len, &uattr->info.info_len))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index dd54d20ace2f..d4593571c404 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1384,13 +1384,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1384 if (type != expected_type) 1384 if (type != expected_type)
1385 goto err_type; 1385 goto err_type;
1386 } else if (arg_type == ARG_PTR_TO_MEM || 1386 } else if (arg_type == ARG_PTR_TO_MEM ||
1387 arg_type == ARG_PTR_TO_MEM_OR_NULL ||
1387 arg_type == ARG_PTR_TO_UNINIT_MEM) { 1388 arg_type == ARG_PTR_TO_UNINIT_MEM) {
1388 expected_type = PTR_TO_STACK; 1389 expected_type = PTR_TO_STACK;
1389 /* One exception here. In case function allows for NULL to be 1390 /* One exception here. In case function allows for NULL to be
1390 * passed in as argument, it's a SCALAR_VALUE type. Final test 1391 * passed in as argument, it's a SCALAR_VALUE type. Final test
1391 * happens during stack boundary checking. 1392 * happens during stack boundary checking.
1392 */ 1393 */
1393 if (register_is_null(*reg)) 1394 if (register_is_null(*reg) &&
1395 arg_type == ARG_PTR_TO_MEM_OR_NULL)
1394 /* final test in check_stack_boundary() */; 1396 /* final test in check_stack_boundary() */;
1395 else if (!type_is_pkt_pointer(type) && 1397 else if (!type_is_pkt_pointer(type) &&
1396 type != PTR_TO_MAP_VALUE && 1398 type != PTR_TO_MAP_VALUE &&
@@ -3825,6 +3827,7 @@ static int do_check(struct bpf_verifier_env *env)
3825 return err; 3827 return err;
3826 3828
3827 regs = cur_regs(env); 3829 regs = cur_regs(env);
3830 env->insn_aux_data[insn_idx].seen = true;
3828 if (class == BPF_ALU || class == BPF_ALU64) { 3831 if (class == BPF_ALU || class == BPF_ALU64) {
3829 err = check_alu_op(env, insn); 3832 err = check_alu_op(env, insn);
3830 if (err) 3833 if (err)
@@ -4020,6 +4023,7 @@ process_bpf_exit:
4020 return err; 4023 return err;
4021 4024
4022 insn_idx++; 4025 insn_idx++;
4026 env->insn_aux_data[insn_idx].seen = true;
4023 } else { 4027 } else {
4024 verbose(env, "invalid BPF_LD mode\n"); 4028 verbose(env, "invalid BPF_LD mode\n");
4025 return -EINVAL; 4029 return -EINVAL;
@@ -4202,6 +4206,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4202 u32 off, u32 cnt) 4206 u32 off, u32 cnt)
4203{ 4207{
4204 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 4208 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
4209 int i;
4205 4210
4206 if (cnt == 1) 4211 if (cnt == 1)
4207 return 0; 4212 return 0;
@@ -4211,6 +4216,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4211 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 4216 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4212 memcpy(new_data + off + cnt - 1, old_data + off, 4217 memcpy(new_data + off + cnt - 1, old_data + off,
4213 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 4218 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4219 for (i = off; i < off + cnt - 1; i++)
4220 new_data[i].seen = true;
4214 env->insn_aux_data = new_data; 4221 env->insn_aux_data = new_data;
4215 vfree(old_data); 4222 vfree(old_data);
4216 return 0; 4223 return 0;
@@ -4229,6 +4236,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
4229 return new_prog; 4236 return new_prog;
4230} 4237}
4231 4238
4239/* The verifier does more data flow analysis than llvm and will not explore
4240 * branches that are dead at run time. Malicious programs can have dead code
4241 * too. Therefore replace all dead at-run-time code with nops.
4242 */
4243static void sanitize_dead_code(struct bpf_verifier_env *env)
4244{
4245 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
4246 struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
4247 struct bpf_insn *insn = env->prog->insnsi;
4248 const int insn_cnt = env->prog->len;
4249 int i;
4250
4251 for (i = 0; i < insn_cnt; i++) {
4252 if (aux_data[i].seen)
4253 continue;
4254 memcpy(insn + i, &nop, sizeof(nop));
4255 }
4256}
4257
4232/* convert load instructions that access fields of 'struct __sk_buff' 4258/* convert load instructions that access fields of 'struct __sk_buff'
4233 * into sequence of instructions that access fields of 'struct sk_buff' 4259 * into sequence of instructions that access fields of 'struct sk_buff'
4234 */ 4260 */
@@ -4556,6 +4582,9 @@ skip_full_check:
4556 free_states(env); 4582 free_states(env);
4557 4583
4558 if (ret == 0) 4584 if (ret == 0)
4585 sanitize_dead_code(env);
4586
4587 if (ret == 0)
4559 /* program is valid, convert *(u32*)(ctx + off) accesses */ 4588 /* program is valid, convert *(u32*)(ctx + off) accesses */
4560 ret = convert_ctx_accesses(env); 4589 ret = convert_ctx_accesses(env);
4561 4590
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index a5580c670866..27d1f4ffa3de 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -78,16 +78,12 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
78 78
79BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) 79BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
80{ 80{
81 int ret = 0; 81 int ret;
82
83 if (unlikely(size == 0))
84 goto out;
85 82
86 ret = probe_kernel_read(dst, unsafe_ptr, size); 83 ret = probe_kernel_read(dst, unsafe_ptr, size);
87 if (unlikely(ret < 0)) 84 if (unlikely(ret < 0))
88 memset(dst, 0, size); 85 memset(dst, 0, size);
89 86
90 out:
91 return ret; 87 return ret;
92} 88}
93 89
@@ -407,7 +403,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
407 .arg2_type = ARG_CONST_MAP_PTR, 403 .arg2_type = ARG_CONST_MAP_PTR,
408 .arg3_type = ARG_ANYTHING, 404 .arg3_type = ARG_ANYTHING,
409 .arg4_type = ARG_PTR_TO_MEM, 405 .arg4_type = ARG_PTR_TO_MEM,
410 .arg5_type = ARG_CONST_SIZE, 406 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
411}; 407};
412 408
413static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 409static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
@@ -498,7 +494,7 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
498 .gpl_only = true, 494 .gpl_only = true,
499 .ret_type = RET_INTEGER, 495 .ret_type = RET_INTEGER,
500 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 496 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
501 .arg2_type = ARG_CONST_SIZE, 497 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
502 .arg3_type = ARG_ANYTHING, 498 .arg3_type = ARG_ANYTHING,
503}; 499};
504 500
@@ -609,7 +605,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
609 .arg2_type = ARG_CONST_MAP_PTR, 605 .arg2_type = ARG_CONST_MAP_PTR,
610 .arg3_type = ARG_ANYTHING, 606 .arg3_type = ARG_ANYTHING,
611 .arg4_type = ARG_PTR_TO_MEM, 607 .arg4_type = ARG_PTR_TO_MEM,
612 .arg5_type = ARG_CONST_SIZE, 608 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
613}; 609};
614 610
615BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 611BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
diff --git a/net/core/dev.c b/net/core/dev.c
index bbba19112f02..07ed21d64f92 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7140,13 +7140,17 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7140 __dev_xdp_attached(dev, bpf_op, NULL)) 7140 __dev_xdp_attached(dev, bpf_op, NULL))
7141 return -EBUSY; 7141 return -EBUSY;
7142 7142
7143 if (bpf_op == ops->ndo_bpf) 7143 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7144 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 7144 bpf_op == ops->ndo_bpf);
7145 dev);
7146 else
7147 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
7148 if (IS_ERR(prog)) 7145 if (IS_ERR(prog))
7149 return PTR_ERR(prog); 7146 return PTR_ERR(prog);
7147
7148 if (!(flags & XDP_FLAGS_HW_MODE) &&
7149 bpf_prog_is_dev_bound(prog->aux)) {
7150 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
7151 bpf_prog_put(prog);
7152 return -EINVAL;
7153 }
7150 } 7154 }
7151 7155
7152 err = dev_xdp_install(dev, bpf_op, extack, flags, prog); 7156 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
diff --git a/net/core/filter.c b/net/core/filter.c
index 1afa17935954..6a85e67fafce 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1646,9 +1646,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
1646 .gpl_only = false, 1646 .gpl_only = false,
1647 .pkt_access = true, 1647 .pkt_access = true,
1648 .ret_type = RET_INTEGER, 1648 .ret_type = RET_INTEGER,
1649 .arg1_type = ARG_PTR_TO_MEM, 1649 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1650 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1650 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1651 .arg3_type = ARG_PTR_TO_MEM, 1651 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
1652 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1652 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1653 .arg5_type = ARG_ANYTHING, 1653 .arg5_type = ARG_ANYTHING,
1654}; 1654};
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index fb680dafac5a..a9f3e317055c 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -382,15 +382,13 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
382{ 382{
383 struct bpf_prog *fp; 383 struct bpf_prog *fp;
384 char *name = NULL; 384 char *name = NULL;
385 bool skip_sw;
385 u32 bpf_fd; 386 u32 bpf_fd;
386 387
387 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 388 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
389 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
388 390
389 if (gen_flags & TCA_CLS_FLAGS_SKIP_SW) 391 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
390 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
391 qdisc_dev(tp->q));
392 else
393 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
394 if (IS_ERR(fp)) 392 if (IS_ERR(fp))
395 return PTR_ERR(fp); 393 return PTR_ERR(fp);
396 394
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index f45c44ef9bec..ad619b96c276 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -41,7 +41,6 @@
41#include <string.h> 41#include <string.h>
42#include <time.h> 42#include <time.h>
43#include <unistd.h> 43#include <unistd.h>
44#include <net/if.h>
45#include <sys/types.h> 44#include <sys/types.h>
46#include <sys/stat.h> 45#include <sys/stat.h>
47 46
@@ -230,21 +229,6 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
230 info->tag[0], info->tag[1], info->tag[2], info->tag[3], 229 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
231 info->tag[4], info->tag[5], info->tag[6], info->tag[7]); 230 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
232 231
233 if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
234 jsonw_name(json_wtr, "dev");
235 if (info->ifindex) {
236 char name[IF_NAMESIZE];
237
238 if (!if_indextoname(info->ifindex, name))
239 jsonw_printf(json_wtr, "\"ifindex:%d\"",
240 info->ifindex);
241 else
242 jsonw_printf(json_wtr, "\"%s\"", name);
243 } else {
244 jsonw_printf(json_wtr, "\"unknown\"");
245 }
246 }
247
248 if (info->load_time) { 232 if (info->load_time) {
249 char buf[32]; 233 char buf[32];
250 234
@@ -302,21 +286,6 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
302 286
303 printf("tag "); 287 printf("tag ");
304 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); 288 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
305 printf(" ");
306
307 if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
308 printf("dev ");
309 if (info->ifindex) {
310 char name[IF_NAMESIZE];
311
312 if (!if_indextoname(info->ifindex, name))
313 printf("ifindex:%d ", info->ifindex);
314 else
315 printf("%s ", name);
316 } else {
317 printf("unknown ");
318 }
319 }
320 printf("\n"); 289 printf("\n");
321 290
322 if (info->load_time) { 291 if (info->load_time) {
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e880ae6434ee..4c223ab30293 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -262,7 +262,7 @@ union bpf_attr {
262 __u32 kern_version; /* checked when prog_type=kprobe */ 262 __u32 kern_version; /* checked when prog_type=kprobe */
263 __u32 prog_flags; 263 __u32 prog_flags;
264 char prog_name[BPF_OBJ_NAME_LEN]; 264 char prog_name[BPF_OBJ_NAME_LEN];
265 __u32 prog_target_ifindex; /* ifindex of netdev to prep for */ 265 __u32 prog_ifindex; /* ifindex of netdev to prep for */
266 }; 266 };
267 267
268 struct { /* anonymous struct used by BPF_OBJ_* commands */ 268 struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -897,10 +897,6 @@ enum sk_action {
897 897
898#define BPF_TAG_SIZE 8 898#define BPF_TAG_SIZE 8
899 899
900enum bpf_prog_status {
901 BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
902};
903
904struct bpf_prog_info { 900struct bpf_prog_info {
905 __u32 type; 901 __u32 type;
906 __u32 id; 902 __u32 id;
@@ -914,8 +910,6 @@ struct bpf_prog_info {
914 __u32 nr_map_ids; 910 __u32 nr_map_ids;
915 __aligned_u64 map_ids; 911 __aligned_u64 map_ids;
916 char name[BPF_OBJ_NAME_LEN]; 912 char name[BPF_OBJ_NAME_LEN];
917 __u32 ifindex;
918 __u32 status;
919} __attribute__((aligned(8))); 913} __attribute__((aligned(8)));
920 914
921struct bpf_map_info { 915struct bpf_map_info {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index bf092b83e453..3c64f30cf63c 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4377,11 +4377,10 @@ static struct bpf_test tests[] = {
4377 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4377 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4378 BPF_LD_MAP_FD(BPF_REG_1, 0), 4378 BPF_LD_MAP_FD(BPF_REG_1, 0),
4379 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4379 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4381 BPF_MOV64_IMM(BPF_REG_1, 0), 4381 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 4382 BPF_MOV64_IMM(BPF_REG_2, 0),
4383 BPF_MOV64_IMM(BPF_REG_3, 0), 4383 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4384 BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
4385 BPF_EXIT_INSN(), 4384 BPF_EXIT_INSN(),
4386 }, 4385 },
4387 .fixup_map2 = { 3 }, 4386 .fixup_map2 = { 3 },
@@ -4481,14 +4480,12 @@ static struct bpf_test tests[] = {
4481 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4480 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4482 BPF_LD_MAP_FD(BPF_REG_1, 0), 4481 BPF_LD_MAP_FD(BPF_REG_1, 0),
4483 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4482 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4483 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4485 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4484 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4487 offsetof(struct test_val, foo)), 4486 offsetof(struct test_val, foo)),
4488 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4487 BPF_MOV64_IMM(BPF_REG_2, 0),
4489 BPF_MOV64_IMM(BPF_REG_1, 0), 4488 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4490 BPF_MOV64_IMM(BPF_REG_3, 0),
4491 BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
4492 BPF_EXIT_INSN(), 4489 BPF_EXIT_INSN(),
4493 }, 4490 },
4494 .fixup_map2 = { 3 }, 4491 .fixup_map2 = { 3 },
@@ -4618,18 +4615,16 @@ static struct bpf_test tests[] = {
4618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4615 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4619 BPF_LD_MAP_FD(BPF_REG_1, 0), 4616 BPF_LD_MAP_FD(BPF_REG_1, 0),
4620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4623 BPF_MOV64_IMM(BPF_REG_3, 0), 4620 BPF_MOV64_IMM(BPF_REG_3, 0),
4624 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4621 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4622 BPF_MOV64_IMM(BPF_REG_2, 0),
4626 BPF_MOV64_IMM(BPF_REG_1, 0), 4623 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4627 BPF_MOV64_IMM(BPF_REG_3, 0),
4628 BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
4629 BPF_EXIT_INSN(), 4624 BPF_EXIT_INSN(),
4630 }, 4625 },
4631 .fixup_map2 = { 3 }, 4626 .fixup_map2 = { 3 },
4632 .errstr = "R2 min value is outside of the array range", 4627 .errstr = "R1 min value is outside of the array range",
4633 .result = REJECT, 4628 .result = REJECT,
4634 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4629 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4635 }, 4630 },
@@ -4760,20 +4755,18 @@ static struct bpf_test tests[] = {
4760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4755 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4761 BPF_LD_MAP_FD(BPF_REG_1, 0), 4756 BPF_LD_MAP_FD(BPF_REG_1, 0),
4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4757 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4758 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4759 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4765 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4760 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4766 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4761 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4767 offsetof(struct test_val, foo), 4), 4762 offsetof(struct test_val, foo), 3),
4768 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4763 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4764 BPF_MOV64_IMM(BPF_REG_2, 0),
4770 BPF_MOV64_IMM(BPF_REG_1, 0), 4765 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4771 BPF_MOV64_IMM(BPF_REG_3, 0),
4772 BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
4773 BPF_EXIT_INSN(), 4766 BPF_EXIT_INSN(),
4774 }, 4767 },
4775 .fixup_map2 = { 3 }, 4768 .fixup_map2 = { 3 },
4776 .errstr = "R2 min value is outside of the array range", 4769 .errstr = "R1 min value is outside of the array range",
4777 .result = REJECT, 4770 .result = REJECT,
4778 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4771 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4779 }, 4772 },
@@ -5638,7 +5631,7 @@ static struct bpf_test tests[] = {
5638 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5631 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5639 }, 5632 },
5640 { 5633 {
5641 "helper access to variable memory: size = 0 allowed on NULL", 5634 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5642 .insns = { 5635 .insns = {
5643 BPF_MOV64_IMM(BPF_REG_1, 0), 5636 BPF_MOV64_IMM(BPF_REG_1, 0),
5644 BPF_MOV64_IMM(BPF_REG_2, 0), 5637 BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5652,7 +5645,7 @@ static struct bpf_test tests[] = {
5652 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5645 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5653 }, 5646 },
5654 { 5647 {
5655 "helper access to variable memory: size > 0 not allowed on NULL", 5648 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5656 .insns = { 5649 .insns = {
5657 BPF_MOV64_IMM(BPF_REG_1, 0), 5650 BPF_MOV64_IMM(BPF_REG_1, 0),
5658 BPF_MOV64_IMM(BPF_REG_2, 0), 5651 BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5670,7 +5663,7 @@ static struct bpf_test tests[] = {
5670 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5663 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5671 }, 5664 },
5672 { 5665 {
5673 "helper access to variable memory: size = 0 allowed on != NULL stack pointer", 5666 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
5674 .insns = { 5667 .insns = {
5675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
@@ -5687,7 +5680,7 @@ static struct bpf_test tests[] = {
5687 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5680 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5688 }, 5681 },
5689 { 5682 {
5690 "helper access to variable memory: size = 0 allowed on != NULL map pointer", 5683 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
5691 .insns = { 5684 .insns = {
5692 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5685 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5709,7 +5702,7 @@ static struct bpf_test tests[] = {
5709 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5702 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5710 }, 5703 },
5711 { 5704 {
5712 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer", 5705 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
5713 .insns = { 5706 .insns = {
5714 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5707 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5715 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5708 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5734,7 +5727,7 @@ static struct bpf_test tests[] = {
5734 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5735 }, 5728 },
5736 { 5729 {
5737 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer", 5730 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
5738 .insns = { 5731 .insns = {
5739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5732 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5733 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5757,7 +5750,7 @@ static struct bpf_test tests[] = {
5757 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5750 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5758 }, 5751 },
5759 { 5752 {
5760 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer", 5753 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
5761 .insns = { 5754 .insns = {
5762 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 5755 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5763 offsetof(struct __sk_buff, data)), 5756 offsetof(struct __sk_buff, data)),
@@ -5779,6 +5772,105 @@ static struct bpf_test tests[] = {
5779 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5772 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5780 }, 5773 },
5781 { 5774 {
5775 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5776 .insns = {
5777 BPF_MOV64_IMM(BPF_REG_1, 0),
5778 BPF_MOV64_IMM(BPF_REG_2, 0),
5779 BPF_MOV64_IMM(BPF_REG_3, 0),
5780 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5781 BPF_EXIT_INSN(),
5782 },
5783 .errstr = "R1 type=inv expected=fp",
5784 .result = REJECT,
5785 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5786 },
5787 {
5788 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5789 .insns = {
5790 BPF_MOV64_IMM(BPF_REG_1, 0),
5791 BPF_MOV64_IMM(BPF_REG_2, 1),
5792 BPF_MOV64_IMM(BPF_REG_3, 0),
5793 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5794 BPF_EXIT_INSN(),
5795 },
5796 .errstr = "R1 type=inv expected=fp",
5797 .result = REJECT,
5798 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5799 },
5800 {
5801 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5802 .insns = {
5803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5805 BPF_MOV64_IMM(BPF_REG_2, 0),
5806 BPF_MOV64_IMM(BPF_REG_3, 0),
5807 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5808 BPF_EXIT_INSN(),
5809 },
5810 .result = ACCEPT,
5811 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5812 },
5813 {
5814 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5815 .insns = {
5816 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5819 BPF_LD_MAP_FD(BPF_REG_1, 0),
5820 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5821 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5822 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5823 BPF_MOV64_IMM(BPF_REG_2, 0),
5824 BPF_MOV64_IMM(BPF_REG_3, 0),
5825 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5826 BPF_EXIT_INSN(),
5827 },
5828 .fixup_map1 = { 3 },
5829 .result = ACCEPT,
5830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5831 },
5832 {
5833 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5834 .insns = {
5835 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5838 BPF_LD_MAP_FD(BPF_REG_1, 0),
5839 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5840 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5841 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5842 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5845 BPF_MOV64_IMM(BPF_REG_3, 0),
5846 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5847 BPF_EXIT_INSN(),
5848 },
5849 .fixup_map1 = { 3 },
5850 .result = ACCEPT,
5851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5852 },
5853 {
5854 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5855 .insns = {
5856 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5857 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5859 BPF_LD_MAP_FD(BPF_REG_1, 0),
5860 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5861 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5862 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5863 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5864 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
5865 BPF_MOV64_IMM(BPF_REG_3, 0),
5866 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5867 BPF_EXIT_INSN(),
5868 },
5869 .fixup_map1 = { 3 },
5870 .result = ACCEPT,
5871 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5872 },
5873 {
5782 "helper access to variable memory: 8 bytes leak", 5874 "helper access to variable memory: 8 bytes leak",
5783 .insns = { 5875 .insns = {
5784 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5876 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),