aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 17:45:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 17:45:08 -0400
commitaae3dbb4776e7916b6cd442d00159bea27a695c1 (patch)
treed074c5d783a81e7e2e084b1eba77f57459da7e37 /lib
parentec3604c7a5aae8953545b0d05495357009a960e5 (diff)
parent66bed8465a808400eb14562510e26c8818082cb8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Support ipv6 checksum offload in sunvnet driver, from Shannon Nelson. 2) Move to RB-tree instead of custom AVL code in inetpeer, from Eric Dumazet. 3) Allow generic XDP to work on virtual devices, from John Fastabend. 4) Add bpf device maps and XDP_REDIRECT, which can be used to build arbitrary switching frameworks using XDP. From John Fastabend. 5) Remove UFO offloads from the tree, gave us little other than bugs. 6) Remove the IPSEC flow cache, from Florian Westphal. 7) Support ipv6 route offload in mlxsw driver. 8) Support VF representors in bnxt_en, from Sathya Perla. 9) Add support for forward error correction modes to ethtool, from Vidya Sagar Ravipati. 10) Add time filter for packet scheduler action dumping, from Jamal Hadi Salim. 11) Extend the zerocopy sendmsg() used by virtio and tap to regular sockets via MSG_ZEROCOPY. From Willem de Bruijn. 12) Significantly rework value tracking in the BPF verifier, from Edward Cree. 13) Add new jump instructions to eBPF, from Daniel Borkmann. 14) Rework rtnetlink plumbing so that operations can be run without taking the RTNL semaphore. From Florian Westphal. 15) Support XDP in tap driver, from Jason Wang. 16) Add 32-bit eBPF JIT for ARM, from Shubham Bansal. 17) Add Huawei hinic ethernet driver. 18) Allow to report MD5 keys in TCP inet_diag dumps, from Ivan Delalande. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1780 commits) i40e: point wb_desc at the nvm_wb_desc during i40e_read_nvm_aq i40e: avoid NVM acquire deadlock during NVM update drivers: net: xgene: Remove return statement from void function drivers: net: xgene: Configure tx/rx delay for ACPI drivers: net: xgene: Read tx/rx delay for ACPI rocker: fix kcalloc parameter order rds: Fix non-atomic operation on shared flag variable net: sched: don't use GFP_KERNEL under spin lock vhost_net: correctly check tx avail during rx busy polling net: mdio-mux: add mdio_mux parameter to mdio_mux_init() rxrpc: Make service connection lookup always check for retry net: stmmac: Delete dead code for MDIO registration gianfar: Fix Tx flow control deactivation cxgb4: Ignore MPS_TX_INT_CAUSE[Bubble] for T6 cxgb4: Fix pause frame count in t4_get_port_stats cxgb4: fix memory leak tun: rename generic_xdp to skb_xdp tun: reserve extra headroom only when XDP is set net: dsa: bcm_sf2: Configure IMP port TC2QOS mapping net: dsa: bcm_sf2: Advertise number of egress queues ...
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c66
-rw-r--r--lib/nlattr.c54
-rw-r--r--lib/radix-tree.c6
-rw-r--r--lib/test_bpf.c364
4 files changed, 458 insertions, 32 deletions
diff --git a/lib/idr.c b/lib/idr.c
index b13682bb0a1c..082778cf883e 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -7,45 +7,32 @@
7DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); 7DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
8static DEFINE_SPINLOCK(simple_ida_lock); 8static DEFINE_SPINLOCK(simple_ida_lock);
9 9
10/** 10int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
11 * idr_alloc - allocate an id 11 unsigned long start, unsigned long end, gfp_t gfp,
12 * @idr: idr handle 12 bool ext)
13 * @ptr: pointer to be associated with the new id
14 * @start: the minimum id (inclusive)
15 * @end: the maximum id (exclusive)
16 * @gfp: memory allocation flags
17 *
18 * Allocates an unused ID in the range [start, end). Returns -ENOSPC
19 * if there are no unused IDs in that range.
20 *
21 * Note that @end is treated as max when <= 0. This is to always allow
22 * using @start + N as @end as long as N is inside integer range.
23 *
24 * Simultaneous modifications to the @idr are not allowed and should be
25 * prevented by the user, usually with a lock. idr_alloc() may be called
26 * concurrently with read-only accesses to the @idr, such as idr_find() and
27 * idr_for_each_entry().
28 */
29int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
30{ 13{
31 void __rcu **slot;
32 struct radix_tree_iter iter; 14 struct radix_tree_iter iter;
15 void __rcu **slot;
33 16
34 if (WARN_ON_ONCE(start < 0))
35 return -EINVAL;
36 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 17 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
37 return -EINVAL; 18 return -EINVAL;
38 19
39 radix_tree_iter_init(&iter, start); 20 radix_tree_iter_init(&iter, start);
40 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); 21 if (ext)
22 slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end);
23 else
24 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
41 if (IS_ERR(slot)) 25 if (IS_ERR(slot))
42 return PTR_ERR(slot); 26 return PTR_ERR(slot);
43 27
44 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); 28 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
45 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); 29 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
46 return iter.index; 30
31 if (index)
32 *index = iter.index;
33 return 0;
47} 34}
48EXPORT_SYMBOL_GPL(idr_alloc); 35EXPORT_SYMBOL_GPL(idr_alloc_cmn);
49 36
50/** 37/**
51 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 38 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
@@ -134,6 +121,20 @@ void *idr_get_next(struct idr *idr, int *nextid)
134} 121}
135EXPORT_SYMBOL(idr_get_next); 122EXPORT_SYMBOL(idr_get_next);
136 123
124void *idr_get_next_ext(struct idr *idr, unsigned long *nextid)
125{
126 struct radix_tree_iter iter;
127 void __rcu **slot;
128
129 slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
130 if (!slot)
131 return NULL;
132
133 *nextid = iter.index;
134 return rcu_dereference_raw(*slot);
135}
136EXPORT_SYMBOL(idr_get_next_ext);
137
137/** 138/**
138 * idr_replace - replace pointer for given id 139 * idr_replace - replace pointer for given id
139 * @idr: idr handle 140 * @idr: idr handle
@@ -150,12 +151,19 @@ EXPORT_SYMBOL(idr_get_next);
150 */ 151 */
151void *idr_replace(struct idr *idr, void *ptr, int id) 152void *idr_replace(struct idr *idr, void *ptr, int id)
152{ 153{
154 if (WARN_ON_ONCE(id < 0))
155 return ERR_PTR(-EINVAL);
156
157 return idr_replace_ext(idr, ptr, id);
158}
159EXPORT_SYMBOL(idr_replace);
160
161void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id)
162{
153 struct radix_tree_node *node; 163 struct radix_tree_node *node;
154 void __rcu **slot = NULL; 164 void __rcu **slot = NULL;
155 void *entry; 165 void *entry;
156 166
157 if (WARN_ON_ONCE(id < 0))
158 return ERR_PTR(-EINVAL);
159 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 167 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
160 return ERR_PTR(-EINVAL); 168 return ERR_PTR(-EINVAL);
161 169
@@ -167,7 +175,7 @@ void *idr_replace(struct idr *idr, void *ptr, int id)
167 175
168 return entry; 176 return entry;
169} 177}
170EXPORT_SYMBOL(idr_replace); 178EXPORT_SYMBOL(idr_replace_ext);
171 179
172/** 180/**
173 * DOC: IDA description 181 * DOC: IDA description
diff --git a/lib/nlattr.c b/lib/nlattr.c
index fb52435be42d..927c2f19f119 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -27,6 +27,30 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
27 [NLA_S64] = sizeof(s64), 27 [NLA_S64] = sizeof(s64),
28}; 28};
29 29
30static int validate_nla_bitfield32(const struct nlattr *nla,
31 u32 *valid_flags_allowed)
32{
33 const struct nla_bitfield32 *bf = nla_data(nla);
34 u32 *valid_flags_mask = valid_flags_allowed;
35
36 if (!valid_flags_allowed)
37 return -EINVAL;
38
39 /*disallow invalid bit selector */
40 if (bf->selector & ~*valid_flags_mask)
41 return -EINVAL;
42
43 /*disallow invalid bit values */
44 if (bf->value & ~*valid_flags_mask)
45 return -EINVAL;
46
47 /*disallow valid bit values that are not selected*/
48 if (bf->value & ~bf->selector)
49 return -EINVAL;
50
51 return 0;
52}
53
30static int validate_nla(const struct nlattr *nla, int maxtype, 54static int validate_nla(const struct nlattr *nla, int maxtype,
31 const struct nla_policy *policy) 55 const struct nla_policy *policy)
32{ 56{
@@ -46,6 +70,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
46 return -ERANGE; 70 return -ERANGE;
47 break; 71 break;
48 72
73 case NLA_BITFIELD32:
74 if (attrlen != sizeof(struct nla_bitfield32))
75 return -ERANGE;
76
77 return validate_nla_bitfield32(nla, pt->validation_data);
78
49 case NLA_NUL_STRING: 79 case NLA_NUL_STRING:
50 if (pt->len) 80 if (pt->len)
51 minlen = min_t(int, attrlen, pt->len + 1); 81 minlen = min_t(int, attrlen, pt->len + 1);
@@ -272,6 +302,30 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
272EXPORT_SYMBOL(nla_strlcpy); 302EXPORT_SYMBOL(nla_strlcpy);
273 303
274/** 304/**
305 * nla_strdup - Copy string attribute payload into a newly allocated buffer
306 * @nla: attribute to copy the string from
307 * @flags: the type of memory to allocate (see kmalloc).
308 *
309 * Returns a pointer to the allocated buffer or NULL on error.
310 */
311char *nla_strdup(const struct nlattr *nla, gfp_t flags)
312{
313 size_t srclen = nla_len(nla);
314 char *src = nla_data(nla), *dst;
315
316 if (srclen > 0 && src[srclen - 1] == '\0')
317 srclen--;
318
319 dst = kmalloc(srclen + 1, flags);
320 if (dst != NULL) {
321 memcpy(dst, src, srclen);
322 dst[srclen] = '\0';
323 }
324 return dst;
325}
326EXPORT_SYMBOL(nla_strdup);
327
328/**
275 * nla_memcpy - Copy a netlink attribute into another memory area 329 * nla_memcpy - Copy a netlink attribute into another memory area
276 * @dest: where to copy to memcpy 330 * @dest: where to copy to memcpy
277 * @src: netlink attribute to copy from 331 * @src: netlink attribute to copy from
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3527eb364964..9717e2a50374 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2138,13 +2138,13 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
2138} 2138}
2139EXPORT_SYMBOL(ida_pre_get); 2139EXPORT_SYMBOL(ida_pre_get);
2140 2140
2141void __rcu **idr_get_free(struct radix_tree_root *root, 2141void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
2142 struct radix_tree_iter *iter, gfp_t gfp, int end) 2142 struct radix_tree_iter *iter, gfp_t gfp,
2143 unsigned long max)
2143{ 2144{
2144 struct radix_tree_node *node = NULL, *child; 2145 struct radix_tree_node *node = NULL, *child;
2145 void __rcu **slot = (void __rcu **)&root->rnode; 2146 void __rcu **slot = (void __rcu **)&root->rnode;
2146 unsigned long maxindex, start = iter->next_index; 2147 unsigned long maxindex, start = iter->next_index;
2147 unsigned long max = end > 0 ? end - 1 : INT_MAX;
2148 unsigned int shift, offset = 0; 2148 unsigned int shift, offset = 0;
2149 2149
2150 grow: 2150 grow:
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index d9d5a410955c..aa8812ae6776 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -952,6 +952,32 @@ static struct bpf_test tests[] = {
952 { { 2, 0 }, { 3, 1 }, { 4, MAX_K } }, 952 { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
953 }, 953 },
954 { 954 {
955 "JGE (jt 0), test 1",
956 .u.insns = {
957 BPF_STMT(BPF_LDX | BPF_LEN, 0),
958 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
959 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
960 BPF_STMT(BPF_RET | BPF_K, 1),
961 BPF_STMT(BPF_RET | BPF_K, MAX_K)
962 },
963 CLASSIC,
964 { 4, 4, 4, 3, 3 },
965 { { 2, 0 }, { 3, 1 }, { 4, 1 } },
966 },
967 {
968 "JGE (jt 0), test 2",
969 .u.insns = {
970 BPF_STMT(BPF_LDX | BPF_LEN, 0),
971 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
972 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
973 BPF_STMT(BPF_RET | BPF_K, 1),
974 BPF_STMT(BPF_RET | BPF_K, MAX_K)
975 },
976 CLASSIC,
977 { 4, 4, 5, 3, 3 },
978 { { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
979 },
980 {
955 "JGE", 981 "JGE",
956 .u.insns = { 982 .u.insns = {
957 BPF_STMT(BPF_LDX | BPF_LEN, 0), 983 BPF_STMT(BPF_LDX | BPF_LEN, 0),
@@ -4492,6 +4518,35 @@ static struct bpf_test tests[] = {
4492 { }, 4518 { },
4493 { { 0, 1 } }, 4519 { { 0, 1 } },
4494 }, 4520 },
4521 /* BPF_JMP | BPF_JSLT | BPF_K */
4522 {
4523 "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
4524 .u.insns_int = {
4525 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4526 BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
4527 BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
4528 BPF_EXIT_INSN(),
4529 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4530 BPF_EXIT_INSN(),
4531 },
4532 INTERNAL,
4533 { },
4534 { { 0, 1 } },
4535 },
4536 {
4537 "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
4538 .u.insns_int = {
4539 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4540 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
4541 BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
4542 BPF_EXIT_INSN(),
4543 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4544 BPF_EXIT_INSN(),
4545 },
4546 INTERNAL,
4547 { },
4548 { { 0, 1 } },
4549 },
4495 /* BPF_JMP | BPF_JSGT | BPF_K */ 4550 /* BPF_JMP | BPF_JSGT | BPF_K */
4496 { 4551 {
4497 "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", 4552 "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
@@ -4521,6 +4576,73 @@ static struct bpf_test tests[] = {
4521 { }, 4576 { },
4522 { { 0, 1 } }, 4577 { { 0, 1 } },
4523 }, 4578 },
4579 /* BPF_JMP | BPF_JSLE | BPF_K */
4580 {
4581 "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
4582 .u.insns_int = {
4583 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4584 BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
4585 BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
4586 BPF_EXIT_INSN(),
4587 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4588 BPF_EXIT_INSN(),
4589 },
4590 INTERNAL,
4591 { },
4592 { { 0, 1 } },
4593 },
4594 {
4595 "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
4596 .u.insns_int = {
4597 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4598 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
4599 BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
4600 BPF_EXIT_INSN(),
4601 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4602 BPF_EXIT_INSN(),
4603 },
4604 INTERNAL,
4605 { },
4606 { { 0, 1 } },
4607 },
4608 {
4609 "JMP_JSLE_K: Signed jump: value walk 1",
4610 .u.insns_int = {
4611 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4612 BPF_LD_IMM64(R1, 3),
4613 BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
4614 BPF_ALU64_IMM(BPF_SUB, R1, 1),
4615 BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
4616 BPF_ALU64_IMM(BPF_SUB, R1, 1),
4617 BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
4618 BPF_ALU64_IMM(BPF_SUB, R1, 1),
4619 BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
4620 BPF_EXIT_INSN(), /* bad exit */
4621 BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
4622 BPF_EXIT_INSN(),
4623 },
4624 INTERNAL,
4625 { },
4626 { { 0, 1 } },
4627 },
4628 {
4629 "JMP_JSLE_K: Signed jump: value walk 2",
4630 .u.insns_int = {
4631 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4632 BPF_LD_IMM64(R1, 3),
4633 BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
4634 BPF_ALU64_IMM(BPF_SUB, R1, 2),
4635 BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
4636 BPF_ALU64_IMM(BPF_SUB, R1, 2),
4637 BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
4638 BPF_EXIT_INSN(), /* bad exit */
4639 BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
4640 BPF_EXIT_INSN(),
4641 },
4642 INTERNAL,
4643 { },
4644 { { 0, 1 } },
4645 },
4524 /* BPF_JMP | BPF_JSGE | BPF_K */ 4646 /* BPF_JMP | BPF_JSGE | BPF_K */
4525 { 4647 {
4526 "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", 4648 "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
@@ -4617,6 +4739,35 @@ static struct bpf_test tests[] = {
4617 { }, 4739 { },
4618 { { 0, 1 } }, 4740 { { 0, 1 } },
4619 }, 4741 },
4742 /* BPF_JMP | BPF_JLT | BPF_K */
4743 {
4744 "JMP_JLT_K: if (2 < 3) return 1",
4745 .u.insns_int = {
4746 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4747 BPF_LD_IMM64(R1, 2),
4748 BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
4749 BPF_EXIT_INSN(),
4750 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4751 BPF_EXIT_INSN(),
4752 },
4753 INTERNAL,
4754 { },
4755 { { 0, 1 } },
4756 },
4757 {
4758 "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
4759 .u.insns_int = {
4760 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4761 BPF_LD_IMM64(R1, 1),
4762 BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
4763 BPF_EXIT_INSN(),
4764 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4765 BPF_EXIT_INSN(),
4766 },
4767 INTERNAL,
4768 { },
4769 { { 0, 1 } },
4770 },
4620 /* BPF_JMP | BPF_JGE | BPF_K */ 4771 /* BPF_JMP | BPF_JGE | BPF_K */
4621 { 4772 {
4622 "JMP_JGE_K: if (3 >= 2) return 1", 4773 "JMP_JGE_K: if (3 >= 2) return 1",
@@ -4632,6 +4783,21 @@ static struct bpf_test tests[] = {
4632 { }, 4783 { },
4633 { { 0, 1 } }, 4784 { { 0, 1 } },
4634 }, 4785 },
4786 /* BPF_JMP | BPF_JLE | BPF_K */
4787 {
4788 "JMP_JLE_K: if (2 <= 3) return 1",
4789 .u.insns_int = {
4790 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4791 BPF_LD_IMM64(R1, 2),
4792 BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
4793 BPF_EXIT_INSN(),
4794 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4795 BPF_EXIT_INSN(),
4796 },
4797 INTERNAL,
4798 { },
4799 { { 0, 1 } },
4800 },
4635 /* BPF_JMP | BPF_JGT | BPF_K jump backwards */ 4801 /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
4636 { 4802 {
4637 "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", 4803 "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
@@ -4662,6 +4828,36 @@ static struct bpf_test tests[] = {
4662 { }, 4828 { },
4663 { { 0, 1 } }, 4829 { { 0, 1 } },
4664 }, 4830 },
4831 /* BPF_JMP | BPF_JLT | BPF_K jump backwards */
4832 {
4833 "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
4834 .u.insns_int = {
4835 BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
4836 BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
4837 BPF_EXIT_INSN(),
4838 BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
4839 BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
4840 BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
4841 BPF_EXIT_INSN(),
4842 },
4843 INTERNAL,
4844 { },
4845 { { 0, 1 } },
4846 },
4847 {
4848 "JMP_JLE_K: if (3 <= 3) return 1",
4849 .u.insns_int = {
4850 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4851 BPF_LD_IMM64(R1, 3),
4852 BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
4853 BPF_EXIT_INSN(),
4854 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4855 BPF_EXIT_INSN(),
4856 },
4857 INTERNAL,
4858 { },
4859 { { 0, 1 } },
4860 },
4665 /* BPF_JMP | BPF_JNE | BPF_K */ 4861 /* BPF_JMP | BPF_JNE | BPF_K */
4666 { 4862 {
4667 "JMP_JNE_K: if (3 != 2) return 1", 4863 "JMP_JNE_K: if (3 != 2) return 1",
@@ -4752,6 +4948,37 @@ static struct bpf_test tests[] = {
4752 { }, 4948 { },
4753 { { 0, 1 } }, 4949 { { 0, 1 } },
4754 }, 4950 },
4951 /* BPF_JMP | BPF_JSLT | BPF_X */
4952 {
4953 "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
4954 .u.insns_int = {
4955 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4956 BPF_LD_IMM64(R1, -1),
4957 BPF_LD_IMM64(R2, -2),
4958 BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
4959 BPF_EXIT_INSN(),
4960 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4961 BPF_EXIT_INSN(),
4962 },
4963 INTERNAL,
4964 { },
4965 { { 0, 1 } },
4966 },
4967 {
4968 "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
4969 .u.insns_int = {
4970 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4971 BPF_LD_IMM64(R1, -1),
4972 BPF_LD_IMM64(R2, -1),
4973 BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
4974 BPF_EXIT_INSN(),
4975 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4976 BPF_EXIT_INSN(),
4977 },
4978 INTERNAL,
4979 { },
4980 { { 0, 1 } },
4981 },
4755 /* BPF_JMP | BPF_JSGE | BPF_X */ 4982 /* BPF_JMP | BPF_JSGE | BPF_X */
4756 { 4983 {
4757 "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", 4984 "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
@@ -4783,6 +5010,37 @@ static struct bpf_test tests[] = {
4783 { }, 5010 { },
4784 { { 0, 1 } }, 5011 { { 0, 1 } },
4785 }, 5012 },
5013 /* BPF_JMP | BPF_JSLE | BPF_X */
5014 {
5015 "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
5016 .u.insns_int = {
5017 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5018 BPF_LD_IMM64(R1, -1),
5019 BPF_LD_IMM64(R2, -2),
5020 BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
5021 BPF_EXIT_INSN(),
5022 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5023 BPF_EXIT_INSN(),
5024 },
5025 INTERNAL,
5026 { },
5027 { { 0, 1 } },
5028 },
5029 {
5030 "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
5031 .u.insns_int = {
5032 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5033 BPF_LD_IMM64(R1, -1),
5034 BPF_LD_IMM64(R2, -1),
5035 BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
5036 BPF_EXIT_INSN(),
5037 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5038 BPF_EXIT_INSN(),
5039 },
5040 INTERNAL,
5041 { },
5042 { { 0, 1 } },
5043 },
4786 /* BPF_JMP | BPF_JGT | BPF_X */ 5044 /* BPF_JMP | BPF_JGT | BPF_X */
4787 { 5045 {
4788 "JMP_JGT_X: if (3 > 2) return 1", 5046 "JMP_JGT_X: if (3 > 2) return 1",
@@ -4814,6 +5072,37 @@ static struct bpf_test tests[] = {
4814 { }, 5072 { },
4815 { { 0, 1 } }, 5073 { { 0, 1 } },
4816 }, 5074 },
5075 /* BPF_JMP | BPF_JLT | BPF_X */
5076 {
5077 "JMP_JLT_X: if (2 < 3) return 1",
5078 .u.insns_int = {
5079 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5080 BPF_LD_IMM64(R1, 3),
5081 BPF_LD_IMM64(R2, 2),
5082 BPF_JMP_REG(BPF_JLT, R2, R1, 1),
5083 BPF_EXIT_INSN(),
5084 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5085 BPF_EXIT_INSN(),
5086 },
5087 INTERNAL,
5088 { },
5089 { { 0, 1 } },
5090 },
5091 {
5092 "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
5093 .u.insns_int = {
5094 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5095 BPF_LD_IMM64(R1, -1),
5096 BPF_LD_IMM64(R2, 1),
5097 BPF_JMP_REG(BPF_JLT, R2, R1, 1),
5098 BPF_EXIT_INSN(),
5099 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5100 BPF_EXIT_INSN(),
5101 },
5102 INTERNAL,
5103 { },
5104 { { 0, 1 } },
5105 },
4817 /* BPF_JMP | BPF_JGE | BPF_X */ 5106 /* BPF_JMP | BPF_JGE | BPF_X */
4818 { 5107 {
4819 "JMP_JGE_X: if (3 >= 2) return 1", 5108 "JMP_JGE_X: if (3 >= 2) return 1",
@@ -4845,6 +5134,37 @@ static struct bpf_test tests[] = {
4845 { }, 5134 { },
4846 { { 0, 1 } }, 5135 { { 0, 1 } },
4847 }, 5136 },
5137 /* BPF_JMP | BPF_JLE | BPF_X */
5138 {
5139 "JMP_JLE_X: if (2 <= 3) return 1",
5140 .u.insns_int = {
5141 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5142 BPF_LD_IMM64(R1, 3),
5143 BPF_LD_IMM64(R2, 2),
5144 BPF_JMP_REG(BPF_JLE, R2, R1, 1),
5145 BPF_EXIT_INSN(),
5146 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5147 BPF_EXIT_INSN(),
5148 },
5149 INTERNAL,
5150 { },
5151 { { 0, 1 } },
5152 },
5153 {
5154 "JMP_JLE_X: if (3 <= 3) return 1",
5155 .u.insns_int = {
5156 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5157 BPF_LD_IMM64(R1, 3),
5158 BPF_LD_IMM64(R2, 3),
5159 BPF_JMP_REG(BPF_JLE, R1, R2, 1),
5160 BPF_EXIT_INSN(),
5161 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5162 BPF_EXIT_INSN(),
5163 },
5164 INTERNAL,
5165 { },
5166 { { 0, 1 } },
5167 },
4848 { 5168 {
4849 /* Mainly testing JIT + imm64 here. */ 5169 /* Mainly testing JIT + imm64 here. */
4850 "JMP_JGE_X: ldimm64 test 1", 5170 "JMP_JGE_X: ldimm64 test 1",
@@ -4890,6 +5210,50 @@ static struct bpf_test tests[] = {
4890 { }, 5210 { },
4891 { { 0, 1 } }, 5211 { { 0, 1 } },
4892 }, 5212 },
5213 {
5214 "JMP_JLE_X: ldimm64 test 1",
5215 .u.insns_int = {
5216 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5217 BPF_LD_IMM64(R1, 3),
5218 BPF_LD_IMM64(R2, 2),
5219 BPF_JMP_REG(BPF_JLE, R2, R1, 2),
5220 BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
5221 BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
5222 BPF_EXIT_INSN(),
5223 },
5224 INTERNAL,
5225 { },
5226 { { 0, 0xeeeeeeeeU } },
5227 },
5228 {
5229 "JMP_JLE_X: ldimm64 test 2",
5230 .u.insns_int = {
5231 BPF_ALU32_IMM(BPF_MOV, R0, 0),
5232 BPF_LD_IMM64(R1, 3),
5233 BPF_LD_IMM64(R2, 2),
5234 BPF_JMP_REG(BPF_JLE, R2, R1, 0),
5235 BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
5236 BPF_EXIT_INSN(),
5237 },
5238 INTERNAL,
5239 { },
5240 { { 0, 0xffffffffU } },
5241 },
5242 {
5243 "JMP_JLE_X: ldimm64 test 3",
5244 .u.insns_int = {
5245 BPF_ALU32_IMM(BPF_MOV, R0, 1),
5246 BPF_LD_IMM64(R1, 3),
5247 BPF_LD_IMM64(R2, 2),
5248 BPF_JMP_REG(BPF_JLE, R2, R1, 4),
5249 BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
5250 BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
5251 BPF_EXIT_INSN(),
5252 },
5253 INTERNAL,
5254 { },
5255 { { 0, 1 } },
5256 },
4893 /* BPF_JMP | BPF_JNE | BPF_X */ 5257 /* BPF_JMP | BPF_JNE | BPF_X */
4894 { 5258 {
4895 "JMP_JNE_X: if (3 != 2) return 1", 5259 "JMP_JNE_X: if (3 != 2) return 1",