aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/kobject_uevent.c178
-rw-r--r--lib/rhashtable.c51
-rw-r--r--lib/test_bpf.c595
3 files changed, 574 insertions, 250 deletions
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 15ea216a67ce..63d0816ab23b 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -22,6 +22,7 @@
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netlink.h> 24#include <linux/netlink.h>
25#include <linux/uidgid.h>
25#include <linux/uuid.h> 26#include <linux/uuid.h>
26#include <linux/ctype.h> 27#include <linux/ctype.h>
27#include <net/sock.h> 28#include <net/sock.h>
@@ -231,30 +232,6 @@ out:
231 return r; 232 return r;
232} 233}
233 234
234#ifdef CONFIG_NET
235static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
236{
237 struct kobject *kobj = data, *ksobj;
238 const struct kobj_ns_type_operations *ops;
239
240 ops = kobj_ns_ops(kobj);
241 if (!ops && kobj->kset) {
242 ksobj = &kobj->kset->kobj;
243 if (ksobj->parent != NULL)
244 ops = kobj_ns_ops(ksobj->parent);
245 }
246
247 if (ops && ops->netlink_ns && kobj->ktype->namespace) {
248 const void *sock_ns, *ns;
249 ns = kobj->ktype->namespace(kobj);
250 sock_ns = ops->netlink_ns(dsk);
251 return sock_ns != ns;
252 }
253
254 return 0;
255}
256#endif
257
258#ifdef CONFIG_UEVENT_HELPER 235#ifdef CONFIG_UEVENT_HELPER
259static int kobj_usermode_filter(struct kobject *kobj) 236static int kobj_usermode_filter(struct kobject *kobj)
260{ 237{
@@ -296,15 +273,44 @@ static void cleanup_uevent_env(struct subprocess_info *info)
296} 273}
297#endif 274#endif
298 275
299static int kobject_uevent_net_broadcast(struct kobject *kobj, 276#ifdef CONFIG_NET
300 struct kobj_uevent_env *env, 277static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env,
301 const char *action_string, 278 const char *action_string,
302 const char *devpath) 279 const char *devpath)
303{ 280{
304 int retval = 0; 281 struct netlink_skb_parms *parms;
305#if defined(CONFIG_NET) 282 struct sk_buff *skb = NULL;
283 char *scratch;
284 size_t len;
285
286 /* allocate message with maximum possible size */
287 len = strlen(action_string) + strlen(devpath) + 2;
288 skb = alloc_skb(len + env->buflen, GFP_KERNEL);
289 if (!skb)
290 return NULL;
291
292 /* add header */
293 scratch = skb_put(skb, len);
294 sprintf(scratch, "%s@%s", action_string, devpath);
295
296 skb_put_data(skb, env->buf, env->buflen);
297
298 parms = &NETLINK_CB(skb);
299 parms->creds.uid = GLOBAL_ROOT_UID;
300 parms->creds.gid = GLOBAL_ROOT_GID;
301 parms->dst_group = 1;
302 parms->portid = 0;
303
304 return skb;
305}
306
307static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
308 const char *action_string,
309 const char *devpath)
310{
306 struct sk_buff *skb = NULL; 311 struct sk_buff *skb = NULL;
307 struct uevent_sock *ue_sk; 312 struct uevent_sock *ue_sk;
313 int retval = 0;
308 314
309 /* send netlink message */ 315 /* send netlink message */
310 list_for_each_entry(ue_sk, &uevent_sock_list, list) { 316 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
@@ -314,37 +320,99 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
314 continue; 320 continue;
315 321
316 if (!skb) { 322 if (!skb) {
317 /* allocate message with the maximum possible size */
318 size_t len = strlen(action_string) + strlen(devpath) + 2;
319 char *scratch;
320
321 retval = -ENOMEM; 323 retval = -ENOMEM;
322 skb = alloc_skb(len + env->buflen, GFP_KERNEL); 324 skb = alloc_uevent_skb(env, action_string, devpath);
323 if (!skb) 325 if (!skb)
324 continue; 326 continue;
325
326 /* add header */
327 scratch = skb_put(skb, len);
328 sprintf(scratch, "%s@%s", action_string, devpath);
329
330 skb_put_data(skb, env->buf, env->buflen);
331
332 NETLINK_CB(skb).dst_group = 1;
333 } 327 }
334 328
335 retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb), 329 retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
336 0, 1, GFP_KERNEL, 330 GFP_KERNEL);
337 kobj_bcast_filter,
338 kobj);
339 /* ENOBUFS should be handled in userspace */ 331 /* ENOBUFS should be handled in userspace */
340 if (retval == -ENOBUFS || retval == -ESRCH) 332 if (retval == -ENOBUFS || retval == -ESRCH)
341 retval = 0; 333 retval = 0;
342 } 334 }
343 consume_skb(skb); 335 consume_skb(skb);
344#endif 336
345 return retval; 337 return retval;
346} 338}
347 339
340static int uevent_net_broadcast_tagged(struct sock *usk,
341 struct kobj_uevent_env *env,
342 const char *action_string,
343 const char *devpath)
344{
345 struct user_namespace *owning_user_ns = sock_net(usk)->user_ns;
346 struct sk_buff *skb = NULL;
347 int ret = 0;
348
349 skb = alloc_uevent_skb(env, action_string, devpath);
350 if (!skb)
351 return -ENOMEM;
352
353 /* fix credentials */
354 if (owning_user_ns != &init_user_ns) {
355 struct netlink_skb_parms *parms = &NETLINK_CB(skb);
356 kuid_t root_uid;
357 kgid_t root_gid;
358
359 /* fix uid */
360 root_uid = make_kuid(owning_user_ns, 0);
361 if (uid_valid(root_uid))
362 parms->creds.uid = root_uid;
363
364 /* fix gid */
365 root_gid = make_kgid(owning_user_ns, 0);
366 if (gid_valid(root_gid))
367 parms->creds.gid = root_gid;
368 }
369
370 ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
371 /* ENOBUFS should be handled in userspace */
372 if (ret == -ENOBUFS || ret == -ESRCH)
373 ret = 0;
374
375 return ret;
376}
377#endif
378
379static int kobject_uevent_net_broadcast(struct kobject *kobj,
380 struct kobj_uevent_env *env,
381 const char *action_string,
382 const char *devpath)
383{
384 int ret = 0;
385
386#ifdef CONFIG_NET
387 const struct kobj_ns_type_operations *ops;
388 const struct net *net = NULL;
389
390 ops = kobj_ns_ops(kobj);
391 if (!ops && kobj->kset) {
392 struct kobject *ksobj = &kobj->kset->kobj;
393 if (ksobj->parent != NULL)
394 ops = kobj_ns_ops(ksobj->parent);
395 }
396
397 /* kobjects currently only carry network namespace tags and they
398 * are the only tag relevant here since we want to decide which
399 * network namespaces to broadcast the uevent into.
400 */
401 if (ops && ops->netlink_ns && kobj->ktype->namespace)
402 if (ops->type == KOBJ_NS_TYPE_NET)
403 net = kobj->ktype->namespace(kobj);
404
405 if (!net)
406 ret = uevent_net_broadcast_untagged(env, action_string,
407 devpath);
408 else
409 ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
410 action_string, devpath);
411#endif
412
413 return ret;
414}
415
348static void zap_modalias_env(struct kobj_uevent_env *env) 416static void zap_modalias_env(struct kobj_uevent_env *env)
349{ 417{
350 static const char modalias_prefix[] = "MODALIAS="; 418 static const char modalias_prefix[] = "MODALIAS=";
@@ -703,9 +771,13 @@ static int uevent_net_init(struct net *net)
703 771
704 net->uevent_sock = ue_sk; 772 net->uevent_sock = ue_sk;
705 773
706 mutex_lock(&uevent_sock_mutex); 774 /* Restrict uevents to initial user namespace. */
707 list_add_tail(&ue_sk->list, &uevent_sock_list); 775 if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
708 mutex_unlock(&uevent_sock_mutex); 776 mutex_lock(&uevent_sock_mutex);
777 list_add_tail(&ue_sk->list, &uevent_sock_list);
778 mutex_unlock(&uevent_sock_mutex);
779 }
780
709 return 0; 781 return 0;
710} 782}
711 783
@@ -713,9 +785,11 @@ static void uevent_net_exit(struct net *net)
713{ 785{
714 struct uevent_sock *ue_sk = net->uevent_sock; 786 struct uevent_sock *ue_sk = net->uevent_sock;
715 787
716 mutex_lock(&uevent_sock_mutex); 788 if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
717 list_del(&ue_sk->list); 789 mutex_lock(&uevent_sock_mutex);
718 mutex_unlock(&uevent_sock_mutex); 790 list_del(&ue_sk->list);
791 mutex_unlock(&uevent_sock_mutex);
792 }
719 793
720 netlink_kernel_release(ue_sk->sk); 794 netlink_kernel_release(ue_sk->sk);
721 kfree(ue_sk); 795 kfree(ue_sk);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 2b2b79974b61..9427b5766134 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -668,8 +668,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
668 * For a completely stable walk you should construct your own data 668 * For a completely stable walk you should construct your own data
669 * structure outside the hash table. 669 * structure outside the hash table.
670 * 670 *
671 * This function may sleep so you must not call it from interrupt 671 * This function may be called from any process context, including
672 * context or with spin locks held. 672 * non-preemptable context, but cannot be called from softirq or
673 * hardirq context.
673 * 674 *
674 * You must call rhashtable_walk_exit after this function returns. 675 * You must call rhashtable_walk_exit after this function returns.
675 */ 676 */
@@ -726,6 +727,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
726 __acquires(RCU) 727 __acquires(RCU)
727{ 728{
728 struct rhashtable *ht = iter->ht; 729 struct rhashtable *ht = iter->ht;
730 bool rhlist = ht->rhlist;
729 731
730 rcu_read_lock(); 732 rcu_read_lock();
731 733
@@ -734,11 +736,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
734 list_del(&iter->walker.list); 736 list_del(&iter->walker.list);
735 spin_unlock(&ht->lock); 737 spin_unlock(&ht->lock);
736 738
737 if (!iter->walker.tbl && !iter->end_of_table) { 739 if (iter->end_of_table)
740 return 0;
741 if (!iter->walker.tbl) {
738 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); 742 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
743 iter->slot = 0;
744 iter->skip = 0;
739 return -EAGAIN; 745 return -EAGAIN;
740 } 746 }
741 747
748 if (iter->p && !rhlist) {
749 /*
750 * We need to validate that 'p' is still in the table, and
751 * if so, update 'skip'
752 */
753 struct rhash_head *p;
754 int skip = 0;
755 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
756 skip++;
757 if (p == iter->p) {
758 iter->skip = skip;
759 goto found;
760 }
761 }
762 iter->p = NULL;
763 } else if (iter->p && rhlist) {
764 /* Need to validate that 'list' is still in the table, and
765 * if so, update 'skip' and 'p'.
766 */
767 struct rhash_head *p;
768 struct rhlist_head *list;
769 int skip = 0;
770 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
771 for (list = container_of(p, struct rhlist_head, rhead);
772 list;
773 list = rcu_dereference(list->next)) {
774 skip++;
775 if (list == iter->list) {
776 iter->p = p;
777 skip = skip;
778 goto found;
779 }
780 }
781 }
782 iter->p = NULL;
783 }
784found:
742 return 0; 785 return 0;
743} 786}
744EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); 787EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
@@ -914,8 +957,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
914 iter->walker.tbl = NULL; 957 iter->walker.tbl = NULL;
915 spin_unlock(&ht->lock); 958 spin_unlock(&ht->lock);
916 959
917 iter->p = NULL;
918
919out: 960out:
920 rcu_read_unlock(); 961 rcu_read_unlock();
921} 962}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 8e157806df7a..60aedc879361 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -356,29 +356,22 @@ static int bpf_fill_maxinsns11(struct bpf_test *self)
356 return __bpf_fill_ja(self, BPF_MAXINSNS, 68); 356 return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
357} 357}
358 358
359static int bpf_fill_ja(struct bpf_test *self) 359static int bpf_fill_maxinsns12(struct bpf_test *self)
360{
361 /* Hits exactly 11 passes on x86_64 JIT. */
362 return __bpf_fill_ja(self, 12, 9);
363}
364
365static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
366{ 360{
367 unsigned int len = BPF_MAXINSNS; 361 unsigned int len = BPF_MAXINSNS;
368 struct sock_filter *insn; 362 struct sock_filter *insn;
369 int i; 363 int i = 0;
370 364
371 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); 365 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
372 if (!insn) 366 if (!insn)
373 return -ENOMEM; 367 return -ENOMEM;
374 368
375 for (i = 0; i < len - 1; i += 2) { 369 insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
376 insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
377 insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
378 SKF_AD_OFF + SKF_AD_CPU);
379 }
380 370
381 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); 371 for (i = 1; i < len - 1; i++)
372 insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
373
374 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
382 375
383 self->u.ptr.insns = insn; 376 self->u.ptr.insns = insn;
384 self->u.ptr.len = len; 377 self->u.ptr.len = len;
@@ -386,50 +379,22 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
386 return 0; 379 return 0;
387} 380}
388 381
389#define PUSH_CNT 68 382static int bpf_fill_maxinsns13(struct bpf_test *self)
390/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
391static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
392{ 383{
393 unsigned int len = BPF_MAXINSNS; 384 unsigned int len = BPF_MAXINSNS;
394 struct bpf_insn *insn; 385 struct sock_filter *insn;
395 int i = 0, j, k = 0; 386 int i = 0;
396 387
397 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); 388 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
398 if (!insn) 389 if (!insn)
399 return -ENOMEM; 390 return -ENOMEM;
400 391
401 insn[i++] = BPF_MOV64_REG(R6, R1); 392 for (i = 0; i < len - 3; i++)
402loop: 393 insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
403 for (j = 0; j < PUSH_CNT; j++) {
404 insn[i++] = BPF_LD_ABS(BPF_B, 0);
405 insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
406 i++;
407 insn[i++] = BPF_MOV64_REG(R1, R6);
408 insn[i++] = BPF_MOV64_IMM(R2, 1);
409 insn[i++] = BPF_MOV64_IMM(R3, 2);
410 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
411 bpf_skb_vlan_push_proto.func - __bpf_call_base);
412 insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
413 i++;
414 }
415
416 for (j = 0; j < PUSH_CNT; j++) {
417 insn[i++] = BPF_LD_ABS(BPF_B, 0);
418 insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
419 i++;
420 insn[i++] = BPF_MOV64_REG(R1, R6);
421 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
422 bpf_skb_vlan_pop_proto.func - __bpf_call_base);
423 insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
424 i++;
425 }
426 if (++k < 5)
427 goto loop;
428 394
429 for (; i < len - 1; i++) 395 insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
430 insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef); 396 insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
431 397 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
432 insn[len - 1] = BPF_EXIT_INSN();
433 398
434 self->u.ptr.insns = insn; 399 self->u.ptr.insns = insn;
435 self->u.ptr.len = len; 400 self->u.ptr.len = len;
@@ -437,58 +402,29 @@ loop:
437 return 0; 402 return 0;
438} 403}
439 404
440static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self) 405static int bpf_fill_ja(struct bpf_test *self)
441{ 406{
442 struct bpf_insn *insn; 407 /* Hits exactly 11 passes on x86_64 JIT. */
443 408 return __bpf_fill_ja(self, 12, 9);
444 insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
445 if (!insn)
446 return -ENOMEM;
447
448 /* Due to func address being non-const, we need to
449 * assemble this here.
450 */
451 insn[0] = BPF_MOV64_REG(R6, R1);
452 insn[1] = BPF_LD_ABS(BPF_B, 0);
453 insn[2] = BPF_LD_ABS(BPF_H, 0);
454 insn[3] = BPF_LD_ABS(BPF_W, 0);
455 insn[4] = BPF_MOV64_REG(R7, R6);
456 insn[5] = BPF_MOV64_IMM(R6, 0);
457 insn[6] = BPF_MOV64_REG(R1, R7);
458 insn[7] = BPF_MOV64_IMM(R2, 1);
459 insn[8] = BPF_MOV64_IMM(R3, 2);
460 insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
461 bpf_skb_vlan_push_proto.func - __bpf_call_base);
462 insn[10] = BPF_MOV64_REG(R6, R7);
463 insn[11] = BPF_LD_ABS(BPF_B, 0);
464 insn[12] = BPF_LD_ABS(BPF_H, 0);
465 insn[13] = BPF_LD_ABS(BPF_W, 0);
466 insn[14] = BPF_MOV64_IMM(R0, 42);
467 insn[15] = BPF_EXIT_INSN();
468
469 self->u.ptr.insns = insn;
470 self->u.ptr.len = 16;
471
472 return 0;
473} 409}
474 410
475static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) 411static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
476{ 412{
477 unsigned int len = BPF_MAXINSNS; 413 unsigned int len = BPF_MAXINSNS;
478 struct bpf_insn *insn; 414 struct sock_filter *insn;
479 int i = 0; 415 int i;
480 416
481 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); 417 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
482 if (!insn) 418 if (!insn)
483 return -ENOMEM; 419 return -ENOMEM;
484 420
485 insn[i++] = BPF_MOV64_REG(R6, R1); 421 for (i = 0; i < len - 1; i += 2) {
486 insn[i++] = BPF_LD_ABS(BPF_B, 0); 422 insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
487 insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); 423 insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
488 i++; 424 SKF_AD_OFF + SKF_AD_CPU);
489 while (i < len - 1) 425 }
490 insn[i++] = BPF_LD_ABS(BPF_B, 1); 426
491 insn[i] = BPF_EXIT_INSN(); 427 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
492 428
493 self->u.ptr.insns = insn; 429 self->u.ptr.insns = insn;
494 self->u.ptr.len = len; 430 self->u.ptr.len = len;
@@ -1988,40 +1924,6 @@ static struct bpf_test tests[] = {
1988 { { 0, -1 } } 1924 { { 0, -1 } }
1989 }, 1925 },
1990 { 1926 {
1991 "INT: DIV + ABS",
1992 .u.insns_int = {
1993 BPF_ALU64_REG(BPF_MOV, R6, R1),
1994 BPF_LD_ABS(BPF_B, 3),
1995 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1996 BPF_ALU32_REG(BPF_DIV, R0, R2),
1997 BPF_ALU64_REG(BPF_MOV, R8, R0),
1998 BPF_LD_ABS(BPF_B, 4),
1999 BPF_ALU64_REG(BPF_ADD, R8, R0),
2000 BPF_LD_IND(BPF_B, R8, -70),
2001 BPF_EXIT_INSN(),
2002 },
2003 INTERNAL,
2004 { 10, 20, 30, 40, 50 },
2005 { { 4, 0 }, { 5, 10 } }
2006 },
2007 {
2008 /* This one doesn't go through verifier, but is just raw insn
2009 * as opposed to cBPF tests from here. Thus div by 0 tests are
2010 * done in test_verifier in BPF kselftests.
2011 */
2012 "INT: DIV by -1",
2013 .u.insns_int = {
2014 BPF_ALU64_REG(BPF_MOV, R6, R1),
2015 BPF_ALU64_IMM(BPF_MOV, R7, -1),
2016 BPF_LD_ABS(BPF_B, 3),
2017 BPF_ALU32_REG(BPF_DIV, R0, R7),
2018 BPF_EXIT_INSN(),
2019 },
2020 INTERNAL,
2021 { 10, 20, 30, 40, 50 },
2022 { { 3, 0 }, { 4, 0 } }
2023 },
2024 {
2025 "check: missing ret", 1927 "check: missing ret",
2026 .u.insns = { 1928 .u.insns = {
2027 BPF_STMT(BPF_LD | BPF_IMM, 1), 1929 BPF_STMT(BPF_LD | BPF_IMM, 1),
@@ -2383,50 +2285,6 @@ static struct bpf_test tests[] = {
2383 { }, 2285 { },
2384 { { 0, 1 } } 2286 { { 0, 1 } }
2385 }, 2287 },
2386 {
2387 "nmap reduced",
2388 .u.insns_int = {
2389 BPF_MOV64_REG(R6, R1),
2390 BPF_LD_ABS(BPF_H, 12),
2391 BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
2392 BPF_LD_ABS(BPF_H, 12),
2393 BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
2394 BPF_MOV32_IMM(R0, 18),
2395 BPF_STX_MEM(BPF_W, R10, R0, -64),
2396 BPF_LDX_MEM(BPF_W, R7, R10, -64),
2397 BPF_LD_IND(BPF_W, R7, 14),
2398 BPF_STX_MEM(BPF_W, R10, R0, -60),
2399 BPF_MOV32_IMM(R0, 280971478),
2400 BPF_STX_MEM(BPF_W, R10, R0, -56),
2401 BPF_LDX_MEM(BPF_W, R7, R10, -56),
2402 BPF_LDX_MEM(BPF_W, R0, R10, -60),
2403 BPF_ALU32_REG(BPF_SUB, R0, R7),
2404 BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
2405 BPF_LD_ABS(BPF_H, 12),
2406 BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
2407 BPF_MOV32_IMM(R0, 22),
2408 BPF_STX_MEM(BPF_W, R10, R0, -56),
2409 BPF_LDX_MEM(BPF_W, R7, R10, -56),
2410 BPF_LD_IND(BPF_H, R7, 14),
2411 BPF_STX_MEM(BPF_W, R10, R0, -52),
2412 BPF_MOV32_IMM(R0, 17366),
2413 BPF_STX_MEM(BPF_W, R10, R0, -48),
2414 BPF_LDX_MEM(BPF_W, R7, R10, -48),
2415 BPF_LDX_MEM(BPF_W, R0, R10, -52),
2416 BPF_ALU32_REG(BPF_SUB, R0, R7),
2417 BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
2418 BPF_MOV32_IMM(R0, 256),
2419 BPF_EXIT_INSN(),
2420 BPF_MOV32_IMM(R0, 0),
2421 BPF_EXIT_INSN(),
2422 },
2423 INTERNAL,
2424 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
2425 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2426 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
2427 { { 38, 256 } },
2428 .stack_depth = 64,
2429 },
2430 /* BPF_ALU | BPF_MOV | BPF_X */ 2288 /* BPF_ALU | BPF_MOV | BPF_X */
2431 { 2289 {
2432 "ALU_MOV_X: dst = 2", 2290 "ALU_MOV_X: dst = 2",
@@ -5478,28 +5336,29 @@ static struct bpf_test tests[] = {
5478 .expected_errcode = -ENOTSUPP, 5336 .expected_errcode = -ENOTSUPP,
5479 }, 5337 },
5480 { 5338 {
5481 "BPF_MAXINSNS: ld_abs+get_processor_id", 5339 "BPF_MAXINSNS: jump over MSH",
5482 { },
5483 CLASSIC,
5484 { }, 5340 { },
5485 { { 1, 0xbee } }, 5341 CLASSIC | FLAG_EXPECTED_FAIL,
5486 .fill_helper = bpf_fill_ld_abs_get_processor_id, 5342 { 0xfa, 0xfb, 0xfc, 0xfd, },
5343 { { 4, 0xabababab } },
5344 .fill_helper = bpf_fill_maxinsns12,
5345 .expected_errcode = -EINVAL,
5487 }, 5346 },
5488 { 5347 {
5489 "BPF_MAXINSNS: ld_abs+vlan_push/pop", 5348 "BPF_MAXINSNS: exec all MSH",
5490 { }, 5349 { },
5491 INTERNAL, 5350 CLASSIC,
5492 { 0x34 }, 5351 { 0xfa, 0xfb, 0xfc, 0xfd, },
5493 { { ETH_HLEN, 0xbef } }, 5352 { { 4, 0xababab83 } },
5494 .fill_helper = bpf_fill_ld_abs_vlan_push_pop, 5353 .fill_helper = bpf_fill_maxinsns13,
5495 }, 5354 },
5496 { 5355 {
5497 "BPF_MAXINSNS: jump around ld_abs", 5356 "BPF_MAXINSNS: ld_abs+get_processor_id",
5498 { }, 5357 { },
5499 INTERNAL, 5358 CLASSIC,
5500 { 10, 11 }, 5359 { },
5501 { { 2, 10 } }, 5360 { { 1, 0xbee } },
5502 .fill_helper = bpf_fill_jump_around_ld_abs, 5361 .fill_helper = bpf_fill_ld_abs_get_processor_id,
5503 }, 5362 },
5504 /* 5363 /*
5505 * LD_IND / LD_ABS on fragmented SKBs 5364 * LD_IND / LD_ABS on fragmented SKBs
@@ -5683,6 +5542,53 @@ static struct bpf_test tests[] = {
5683 { {0x40, 0x05 } }, 5542 { {0x40, 0x05 } },
5684 }, 5543 },
5685 { 5544 {
5545 "LD_IND byte positive offset, all ff",
5546 .u.insns = {
5547 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5548 BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
5549 BPF_STMT(BPF_RET | BPF_A, 0x0),
5550 },
5551 CLASSIC,
5552 { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
5553 { {0x40, 0xff } },
5554 },
5555 {
5556 "LD_IND byte positive offset, out of bounds",
5557 .u.insns = {
5558 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5559 BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
5560 BPF_STMT(BPF_RET | BPF_A, 0x0),
5561 },
5562 CLASSIC,
5563 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5564 { {0x3f, 0 }, },
5565 },
5566 {
5567 "LD_IND byte negative offset, out of bounds",
5568 .u.insns = {
5569 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5570 BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
5571 BPF_STMT(BPF_RET | BPF_A, 0x0),
5572 },
5573 CLASSIC,
5574 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5575 { {0x3f, 0 } },
5576 },
5577 {
5578 "LD_IND byte negative offset, multiple calls",
5579 .u.insns = {
5580 BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
5581 BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
5582 BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
5583 BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
5584 BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
5585 BPF_STMT(BPF_RET | BPF_A, 0x0),
5586 },
5587 CLASSIC,
5588 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5589 { {0x40, 0x82 }, },
5590 },
5591 {
5686 "LD_IND halfword positive offset", 5592 "LD_IND halfword positive offset",
5687 .u.insns = { 5593 .u.insns = {
5688 BPF_STMT(BPF_LDX | BPF_IMM, 0x20), 5594 BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
@@ -5731,6 +5637,39 @@ static struct bpf_test tests[] = {
5731 { {0x40, 0x66cc } }, 5637 { {0x40, 0x66cc } },
5732 }, 5638 },
5733 { 5639 {
5640 "LD_IND halfword positive offset, all ff",
5641 .u.insns = {
5642 BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
5643 BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
5644 BPF_STMT(BPF_RET | BPF_A, 0x0),
5645 },
5646 CLASSIC,
5647 { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
5648 { {0x40, 0xffff } },
5649 },
5650 {
5651 "LD_IND halfword positive offset, out of bounds",
5652 .u.insns = {
5653 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5654 BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
5655 BPF_STMT(BPF_RET | BPF_A, 0x0),
5656 },
5657 CLASSIC,
5658 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5659 { {0x3f, 0 }, },
5660 },
5661 {
5662 "LD_IND halfword negative offset, out of bounds",
5663 .u.insns = {
5664 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5665 BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
5666 BPF_STMT(BPF_RET | BPF_A, 0x0),
5667 },
5668 CLASSIC,
5669 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5670 { {0x3f, 0 } },
5671 },
5672 {
5734 "LD_IND word positive offset", 5673 "LD_IND word positive offset",
5735 .u.insns = { 5674 .u.insns = {
5736 BPF_STMT(BPF_LDX | BPF_IMM, 0x20), 5675 BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
@@ -5821,6 +5760,39 @@ static struct bpf_test tests[] = {
5821 { {0x40, 0x66cc77dd } }, 5760 { {0x40, 0x66cc77dd } },
5822 }, 5761 },
5823 { 5762 {
5763 "LD_IND word positive offset, all ff",
5764 .u.insns = {
5765 BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
5766 BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
5767 BPF_STMT(BPF_RET | BPF_A, 0x0),
5768 },
5769 CLASSIC,
5770 { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
5771 { {0x40, 0xffffffff } },
5772 },
5773 {
5774 "LD_IND word positive offset, out of bounds",
5775 .u.insns = {
5776 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5777 BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
5778 BPF_STMT(BPF_RET | BPF_A, 0x0),
5779 },
5780 CLASSIC,
5781 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5782 { {0x3f, 0 }, },
5783 },
5784 {
5785 "LD_IND word negative offset, out of bounds",
5786 .u.insns = {
5787 BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
5788 BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
5789 BPF_STMT(BPF_RET | BPF_A, 0x0),
5790 },
5791 CLASSIC,
5792 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5793 { {0x3f, 0 } },
5794 },
5795 {
5824 "LD_ABS byte", 5796 "LD_ABS byte",
5825 .u.insns = { 5797 .u.insns = {
5826 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), 5798 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
@@ -5838,6 +5810,68 @@ static struct bpf_test tests[] = {
5838 { {0x40, 0xcc } }, 5810 { {0x40, 0xcc } },
5839 }, 5811 },
5840 { 5812 {
5813 "LD_ABS byte positive offset, all ff",
5814 .u.insns = {
5815 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
5816 BPF_STMT(BPF_RET | BPF_A, 0x0),
5817 },
5818 CLASSIC,
5819 { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
5820 { {0x40, 0xff } },
5821 },
5822 {
5823 "LD_ABS byte positive offset, out of bounds",
5824 .u.insns = {
5825 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
5826 BPF_STMT(BPF_RET | BPF_A, 0x0),
5827 },
5828 CLASSIC,
5829 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5830 { {0x3f, 0 }, },
5831 },
5832 {
5833 "LD_ABS byte negative offset, out of bounds load",
5834 .u.insns = {
5835 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
5836 BPF_STMT(BPF_RET | BPF_A, 0x0),
5837 },
5838 CLASSIC | FLAG_EXPECTED_FAIL,
5839 .expected_errcode = -EINVAL,
5840 },
5841 {
5842 "LD_ABS byte negative offset, in bounds",
5843 .u.insns = {
5844 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
5845 BPF_STMT(BPF_RET | BPF_A, 0x0),
5846 },
5847 CLASSIC,
5848 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5849 { {0x40, 0x82 }, },
5850 },
5851 {
5852 "LD_ABS byte negative offset, out of bounds",
5853 .u.insns = {
5854 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
5855 BPF_STMT(BPF_RET | BPF_A, 0x0),
5856 },
5857 CLASSIC,
5858 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5859 { {0x3f, 0 }, },
5860 },
5861 {
5862 "LD_ABS byte negative offset, multiple calls",
5863 .u.insns = {
5864 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
5865 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
5866 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
5867 BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
5868 BPF_STMT(BPF_RET | BPF_A, 0x0),
5869 },
5870 CLASSIC,
5871 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5872 { {0x40, 0x82 }, },
5873 },
5874 {
5841 "LD_ABS halfword", 5875 "LD_ABS halfword",
5842 .u.insns = { 5876 .u.insns = {
5843 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), 5877 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
@@ -5872,6 +5906,55 @@ static struct bpf_test tests[] = {
5872 { {0x40, 0x99ff } }, 5906 { {0x40, 0x99ff } },
5873 }, 5907 },
5874 { 5908 {
5909 "LD_ABS halfword positive offset, all ff",
5910 .u.insns = {
5911 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
5912 BPF_STMT(BPF_RET | BPF_A, 0x0),
5913 },
5914 CLASSIC,
5915 { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
5916 { {0x40, 0xffff } },
5917 },
5918 {
5919 "LD_ABS halfword positive offset, out of bounds",
5920 .u.insns = {
5921 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
5922 BPF_STMT(BPF_RET | BPF_A, 0x0),
5923 },
5924 CLASSIC,
5925 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5926 { {0x3f, 0 }, },
5927 },
5928 {
5929 "LD_ABS halfword negative offset, out of bounds load",
5930 .u.insns = {
5931 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
5932 BPF_STMT(BPF_RET | BPF_A, 0x0),
5933 },
5934 CLASSIC | FLAG_EXPECTED_FAIL,
5935 .expected_errcode = -EINVAL,
5936 },
5937 {
5938 "LD_ABS halfword negative offset, in bounds",
5939 .u.insns = {
5940 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
5941 BPF_STMT(BPF_RET | BPF_A, 0x0),
5942 },
5943 CLASSIC,
5944 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5945 { {0x40, 0x1982 }, },
5946 },
5947 {
5948 "LD_ABS halfword negative offset, out of bounds",
5949 .u.insns = {
5950 BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
5951 BPF_STMT(BPF_RET | BPF_A, 0x0),
5952 },
5953 CLASSIC,
5954 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
5955 { {0x3f, 0 }, },
5956 },
5957 {
5875 "LD_ABS word", 5958 "LD_ABS word",
5876 .u.insns = { 5959 .u.insns = {
5877 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), 5960 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
@@ -5939,6 +6022,140 @@ static struct bpf_test tests[] = {
5939 }, 6022 },
5940 { {0x40, 0x88ee99ff } }, 6023 { {0x40, 0x88ee99ff } },
5941 }, 6024 },
6025 {
6026 "LD_ABS word positive offset, all ff",
6027 .u.insns = {
6028 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
6029 BPF_STMT(BPF_RET | BPF_A, 0x0),
6030 },
6031 CLASSIC,
6032 { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
6033 { {0x40, 0xffffffff } },
6034 },
6035 {
6036 "LD_ABS word positive offset, out of bounds",
6037 .u.insns = {
6038 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
6039 BPF_STMT(BPF_RET | BPF_A, 0x0),
6040 },
6041 CLASSIC,
6042 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6043 { {0x3f, 0 }, },
6044 },
6045 {
6046 "LD_ABS word negative offset, out of bounds load",
6047 .u.insns = {
6048 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
6049 BPF_STMT(BPF_RET | BPF_A, 0x0),
6050 },
6051 CLASSIC | FLAG_EXPECTED_FAIL,
6052 .expected_errcode = -EINVAL,
6053 },
6054 {
6055 "LD_ABS word negative offset, in bounds",
6056 .u.insns = {
6057 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
6058 BPF_STMT(BPF_RET | BPF_A, 0x0),
6059 },
6060 CLASSIC,
6061 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6062 { {0x40, 0x25051982 }, },
6063 },
6064 {
6065 "LD_ABS word negative offset, out of bounds",
6066 .u.insns = {
6067 BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
6068 BPF_STMT(BPF_RET | BPF_A, 0x0),
6069 },
6070 CLASSIC,
6071 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6072 { {0x3f, 0 }, },
6073 },
6074 {
6075 "LDX_MSH standalone, preserved A",
6076 .u.insns = {
6077 BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
6078 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
6079 BPF_STMT(BPF_RET | BPF_A, 0x0),
6080 },
6081 CLASSIC,
6082 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6083 { {0x40, 0xffeebbaa }, },
6084 },
6085 {
6086 "LDX_MSH standalone, preserved A 2",
6087 .u.insns = {
6088 BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
6089 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
6090 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
6091 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
6092 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
6093 BPF_STMT(BPF_RET | BPF_A, 0x0),
6094 },
6095 CLASSIC,
6096 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6097 { {0x40, 0x175e9d63 }, },
6098 },
6099 {
6100 "LDX_MSH standalone, test result 1",
6101 .u.insns = {
6102 BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
6103 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
6104 BPF_STMT(BPF_MISC | BPF_TXA, 0),
6105 BPF_STMT(BPF_RET | BPF_A, 0x0),
6106 },
6107 CLASSIC,
6108 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6109 { {0x40, 0x14 }, },
6110 },
6111 {
6112 "LDX_MSH standalone, test result 2",
6113 .u.insns = {
6114 BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
6115 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
6116 BPF_STMT(BPF_MISC | BPF_TXA, 0),
6117 BPF_STMT(BPF_RET | BPF_A, 0x0),
6118 },
6119 CLASSIC,
6120 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6121 { {0x40, 0x24 }, },
6122 },
6123 {
6124 "LDX_MSH standalone, negative offset",
6125 .u.insns = {
6126 BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
6127 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
6128 BPF_STMT(BPF_MISC | BPF_TXA, 0),
6129 BPF_STMT(BPF_RET | BPF_A, 0x0),
6130 },
6131 CLASSIC,
6132 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6133 { {0x40, 0 }, },
6134 },
6135 {
6136 "LDX_MSH standalone, negative offset 2",
6137 .u.insns = {
6138 BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
6139 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
6140 BPF_STMT(BPF_MISC | BPF_TXA, 0),
6141 BPF_STMT(BPF_RET | BPF_A, 0x0),
6142 },
6143 CLASSIC,
6144 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6145 { {0x40, 0x24 }, },
6146 },
6147 {
6148 "LDX_MSH standalone, out of bounds",
6149 .u.insns = {
6150 BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
6151 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
6152 BPF_STMT(BPF_MISC | BPF_TXA, 0),
6153 BPF_STMT(BPF_RET | BPF_A, 0x0),
6154 },
6155 CLASSIC,
6156 { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
6157 { {0x40, 0 }, },
6158 },
5942 /* 6159 /*
5943 * verify that the interpreter or JIT correctly sets A and X 6160 * verify that the interpreter or JIT correctly sets A and X
5944 * to 0. 6161 * to 0.
@@ -6127,14 +6344,6 @@ static struct bpf_test tests[] = {
6127 {}, 6344 {},
6128 { {0x1, 0x42 } }, 6345 { {0x1, 0x42 } },
6129 }, 6346 },
6130 {
6131 "LD_ABS with helper changing skb data",
6132 { },
6133 INTERNAL,
6134 { 0x34 },
6135 { { ETH_HLEN, 42 } },
6136 .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
6137 },
6138 /* Checking interpreter vs JIT wrt signed extended imms. */ 6347 /* Checking interpreter vs JIT wrt signed extended imms. */
6139 { 6348 {
6140 "JNE signed compare, test 1", 6349 "JNE signed compare, test 1",