aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 13:15:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 13:15:09 -0500
commit3051bf36c25d5153051704291782f8d44e744d36 (patch)
tree72dfc8a1d12675c6f2981d13102df954b678f11b /tools
parent1e74a2eb1f5cc7f2f2b5aa9c9eeecbcf352220a3 (diff)
parent005c3490e9db23738d91e02788606c0fe4734723 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support TX_RING in AF_PACKET TPACKET_V3 mode, from Sowmini Varadhan. 2) Simplify classifier state on sk_buff in order to shrink it a bit. From Willem de Bruijn. 3) Introduce SIPHASH and it's usage for secure sequence numbers and syncookies. From Jason A. Donenfeld. 4) Reduce CPU usage for ICMP replies we are going to limit or suppress, from Jesper Dangaard Brouer. 5) Introduce Shared Memory Communications socket layer, from Ursula Braun. 6) Add RACK loss detection and allow it to actually trigger fast recovery instead of just assisting after other algorithms have triggered it. From Yuchung Cheng. 7) Add xmit_more and BQL support to mvneta driver, from Simon Guinot. 8) skb_cow_data avoidance in esp4 and esp6, from Steffen Klassert. 9) Export MPLS packet stats via netlink, from Robert Shearman. 10) Significantly improve inet port bind conflict handling, especially when an application is restarted and changes it's setting of reuseport. From Josef Bacik. 11) Implement TX batching in vhost_net, from Jason Wang. 12) Extend the dummy device so that VF (virtual function) features, such as configuration, can be more easily tested. From Phil Sutter. 13) Avoid two atomic ops per page on x86 in bnx2x driver, from Eric Dumazet. 14) Add new bpf MAP, implementing a longest prefix match trie. From Daniel Mack. 15) Packet sample offloading support in mlxsw driver, from Yotam Gigi. 16) Add new aquantia driver, from David VomLehn. 17) Add bpf tracepoints, from Daniel Borkmann. 18) Add support for port mirroring to b53 and bcm_sf2 drivers, from Florian Fainelli. 19) Remove custom busy polling in many drivers, it is done in the core networking since 4.5 times. From Eric Dumazet. 20) Support XDP adjust_head in virtio_net, from John Fastabend. 21) Fix several major holes in neighbour entry confirmation, from Julian Anastasov. 22) Add XDP support to bnxt_en driver, from Michael Chan. 23) VXLAN offloads for enic driver, from Govindarajulu Varadarajan. 24) Add IPVTAP driver (IP-VLAN based tap driver) from Sainath Grandhi. 25) Support GRO in IPSEC protocols, from Steffen Klassert" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1764 commits) Revert "ath10k: Search SMBIOS for OEM board file extension" net: socket: fix recvmmsg not returning error from sock_error bnxt_en: use eth_hw_addr_random() bpf: fix unlocking of jited image when module ronx not set arch: add ARCH_HAS_SET_MEMORY config net: napi_watchdog() can use napi_schedule_irqoff() tcp: Revert "tcp: tcp_probe: use spin_lock_bh()" net/hsr: use eth_hw_addr_random() net: mvpp2: enable building on 64-bit platforms net: mvpp2: switch to build_skb() in the RX path net: mvpp2: simplify MVPP2_PRS_RI_* definitions net: mvpp2: fix indentation of MVPP2_EXT_GLOBAL_CTRL_DEFAULT net: mvpp2: remove unused register definitions net: mvpp2: simplify mvpp2_bm_bufs_add() net: mvpp2: drop useless fields in mvpp2_bm_pool and related code net: mvpp2: remove unused 'tx_skb' field of 'struct mvpp2_tx_queue' net: mvpp2: release reference to txq_cpu[] entry after unmapping net: mvpp2: handle too large value in mvpp2_rx_time_coal_set() net: mvpp2: handle too large value handling in mvpp2_rx_pkts_coal_set() net: mvpp2: remove useless arguments in mvpp2_rx_{pkts, time}_coal_set ...
Diffstat (limited to 'tools')
-rw-r--r--tools/include/uapi/linux/bpf.h23
-rw-r--r--tools/lib/bpf/bpf.c18
-rw-r--r--tools/lib/bpf/bpf.h12
-rw-r--r--tools/lib/traceevent/event-parse.c34
-rw-r--r--tools/lib/traceevent/event-parse.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c1
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_sys.h108
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c358
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c138
-rw-r--r--tools/testing/selftests/bpf/test_maps.c162
-rw-r--r--tools/testing/selftests/bpf/test_tag.c203
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c1633
-rw-r--r--tools/testing/selftests/net/psock_lib.h39
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c97
17 files changed, 2536 insertions, 313 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index d2b0ac799d03..0539a0ceef38 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -63,6 +63,12 @@ struct bpf_insn {
63 __s32 imm; /* signed immediate constant */ 63 __s32 imm; /* signed immediate constant */
64}; 64};
65 65
66/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
67struct bpf_lpm_trie_key {
68 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
69 __u8 data[0]; /* Arbitrary size */
70};
71
66/* BPF syscall commands, see bpf(2) man-page for details. */ 72/* BPF syscall commands, see bpf(2) man-page for details. */
67enum bpf_cmd { 73enum bpf_cmd {
68 BPF_MAP_CREATE, 74 BPF_MAP_CREATE,
@@ -89,6 +95,7 @@ enum bpf_map_type {
89 BPF_MAP_TYPE_CGROUP_ARRAY, 95 BPF_MAP_TYPE_CGROUP_ARRAY,
90 BPF_MAP_TYPE_LRU_HASH, 96 BPF_MAP_TYPE_LRU_HASH,
91 BPF_MAP_TYPE_LRU_PERCPU_HASH, 97 BPF_MAP_TYPE_LRU_PERCPU_HASH,
98 BPF_MAP_TYPE_LPM_TRIE,
92}; 99};
93 100
94enum bpf_prog_type { 101enum bpf_prog_type {
@@ -437,6 +444,18 @@ union bpf_attr {
437 * @xdp_md: pointer to xdp_md 444 * @xdp_md: pointer to xdp_md
438 * @delta: An positive/negative integer to be added to xdp_md.data 445 * @delta: An positive/negative integer to be added to xdp_md.data
439 * Return: 0 on success or negative on error 446 * Return: 0 on success or negative on error
447 *
448 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
449 * Copy a NUL terminated string from unsafe address. In case the string
450 * length is smaller than size, the target is not padded with further NUL
451 * bytes. In case the string length is larger than size, just count-1
452 * bytes are copied and the last byte is set to NUL.
453 * @dst: destination address
454 * @size: maximum number of bytes to copy, including the trailing NUL
455 * @unsafe_ptr: unsafe address
456 * Return:
457 * > 0 length of the string including the trailing NUL on success
458 * < 0 error
440 */ 459 */
441#define __BPF_FUNC_MAPPER(FN) \ 460#define __BPF_FUNC_MAPPER(FN) \
442 FN(unspec), \ 461 FN(unspec), \
@@ -483,7 +502,8 @@ union bpf_attr {
483 FN(set_hash_invalid), \ 502 FN(set_hash_invalid), \
484 FN(get_numa_node_id), \ 503 FN(get_numa_node_id), \
485 FN(skb_change_head), \ 504 FN(skb_change_head), \
486 FN(xdp_adjust_head), 505 FN(xdp_adjust_head), \
506 FN(probe_read_str),
487 507
488/* integer value in 'imm' field of BPF_CALL instruction selects which helper 508/* integer value in 'imm' field of BPF_CALL instruction selects which helper
489 * function eBPF program intends to call 509 * function eBPF program intends to call
@@ -509,6 +529,7 @@ enum bpf_func_id {
509/* BPF_FUNC_l4_csum_replace flags. */ 529/* BPF_FUNC_l4_csum_replace flags. */
510#define BPF_F_PSEUDO_HDR (1ULL << 4) 530#define BPF_F_PSEUDO_HDR (1ULL << 4)
511#define BPF_F_MARK_MANGLED_0 (1ULL << 5) 531#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
532#define BPF_F_MARK_ENFORCE (1ULL << 6)
512 533
513/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 534/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
514#define BPF_F_INGRESS (1ULL << 0) 535#define BPF_F_INGRESS (1ULL << 0)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index ae752fa4eaa7..d48b70ceb25a 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -42,13 +42,13 @@
42# endif 42# endif
43#endif 43#endif
44 44
45static __u64 ptr_to_u64(void *ptr) 45static inline __u64 ptr_to_u64(const void *ptr)
46{ 46{
47 return (__u64) (unsigned long) ptr; 47 return (__u64) (unsigned long) ptr;
48} 48}
49 49
50static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, 50static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
51 unsigned int size) 51 unsigned int size)
52{ 52{
53 return syscall(__NR_bpf, cmd, attr, size); 53 return syscall(__NR_bpf, cmd, attr, size);
54} 54}
@@ -69,8 +69,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size,
69 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 69 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
70} 70}
71 71
72int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, 72int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
73 size_t insns_cnt, char *license, 73 size_t insns_cnt, const char *license,
74 __u32 kern_version, char *log_buf, size_t log_buf_sz) 74 __u32 kern_version, char *log_buf, size_t log_buf_sz)
75{ 75{
76 int fd; 76 int fd;
@@ -98,7 +98,7 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
98 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 98 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
99} 99}
100 100
101int bpf_map_update_elem(int fd, void *key, void *value, 101int bpf_map_update_elem(int fd, const void *key, const void *value,
102 __u64 flags) 102 __u64 flags)
103{ 103{
104 union bpf_attr attr; 104 union bpf_attr attr;
@@ -112,7 +112,7 @@ int bpf_map_update_elem(int fd, void *key, void *value,
112 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); 112 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
113} 113}
114 114
115int bpf_map_lookup_elem(int fd, void *key, void *value) 115int bpf_map_lookup_elem(int fd, const void *key, void *value)
116{ 116{
117 union bpf_attr attr; 117 union bpf_attr attr;
118 118
@@ -124,7 +124,7 @@ int bpf_map_lookup_elem(int fd, void *key, void *value)
124 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); 124 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
125} 125}
126 126
127int bpf_map_delete_elem(int fd, void *key) 127int bpf_map_delete_elem(int fd, const void *key)
128{ 128{
129 union bpf_attr attr; 129 union bpf_attr attr;
130 130
@@ -135,7 +135,7 @@ int bpf_map_delete_elem(int fd, void *key)
135 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); 135 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
136} 136}
137 137
138int bpf_map_get_next_key(int fd, void *key, void *next_key) 138int bpf_map_get_next_key(int fd, const void *key, void *next_key)
139{ 139{
140 union bpf_attr attr; 140 union bpf_attr attr;
141 141
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 44fb7c5f8ae6..09c3dcac0496 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -29,17 +29,17 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
29 29
30/* Recommend log buffer size */ 30/* Recommend log buffer size */
31#define BPF_LOG_BUF_SIZE 65536 31#define BPF_LOG_BUF_SIZE 65536
32int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, 32int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
33 size_t insns_cnt, char *license, 33 size_t insns_cnt, const char *license,
34 __u32 kern_version, char *log_buf, 34 __u32 kern_version, char *log_buf,
35 size_t log_buf_sz); 35 size_t log_buf_sz);
36 36
37int bpf_map_update_elem(int fd, void *key, void *value, 37int bpf_map_update_elem(int fd, const void *key, const void *value,
38 __u64 flags); 38 __u64 flags);
39 39
40int bpf_map_lookup_elem(int fd, void *key, void *value); 40int bpf_map_lookup_elem(int fd, const void *key, void *value);
41int bpf_map_delete_elem(int fd, void *key); 41int bpf_map_delete_elem(int fd, const void *key);
42int bpf_map_get_next_key(int fd, void *key, void *next_key); 42int bpf_map_get_next_key(int fd, const void *key, void *next_key);
43int bpf_obj_pin(int fd, const char *pathname); 43int bpf_obj_pin(int fd, const char *pathname);
44int bpf_obj_get(const char *pathname); 44int bpf_obj_get(const char *pathname);
45int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, 45int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 14a4f623c1a5..f2ea78021450 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -831,6 +831,7 @@ static void free_arg(struct print_arg *arg)
831 free_flag_sym(arg->symbol.symbols); 831 free_flag_sym(arg->symbol.symbols);
832 break; 832 break;
833 case PRINT_HEX: 833 case PRINT_HEX:
834 case PRINT_HEX_STR:
834 free_arg(arg->hex.field); 835 free_arg(arg->hex.field);
835 free_arg(arg->hex.size); 836 free_arg(arg->hex.size);
836 break; 837 break;
@@ -2629,10 +2630,11 @@ out_free:
2629} 2630}
2630 2631
2631static enum event_type 2632static enum event_type
2632process_hex(struct event_format *event, struct print_arg *arg, char **tok) 2633process_hex_common(struct event_format *event, struct print_arg *arg,
2634 char **tok, enum print_arg_type type)
2633{ 2635{
2634 memset(arg, 0, sizeof(*arg)); 2636 memset(arg, 0, sizeof(*arg));
2635 arg->type = PRINT_HEX; 2637 arg->type = type;
2636 2638
2637 if (alloc_and_process_delim(event, ",", &arg->hex.field)) 2639 if (alloc_and_process_delim(event, ",", &arg->hex.field))
2638 goto out; 2640 goto out;
@@ -2651,6 +2653,19 @@ out:
2651} 2653}
2652 2654
2653static enum event_type 2655static enum event_type
2656process_hex(struct event_format *event, struct print_arg *arg, char **tok)
2657{
2658 return process_hex_common(event, arg, tok, PRINT_HEX);
2659}
2660
2661static enum event_type
2662process_hex_str(struct event_format *event, struct print_arg *arg,
2663 char **tok)
2664{
2665 return process_hex_common(event, arg, tok, PRINT_HEX_STR);
2666}
2667
2668static enum event_type
2654process_int_array(struct event_format *event, struct print_arg *arg, char **tok) 2669process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
2655{ 2670{
2656 memset(arg, 0, sizeof(*arg)); 2671 memset(arg, 0, sizeof(*arg));
@@ -3009,6 +3024,10 @@ process_function(struct event_format *event, struct print_arg *arg,
3009 free_token(token); 3024 free_token(token);
3010 return process_hex(event, arg, tok); 3025 return process_hex(event, arg, tok);
3011 } 3026 }
3027 if (strcmp(token, "__print_hex_str") == 0) {
3028 free_token(token);
3029 return process_hex_str(event, arg, tok);
3030 }
3012 if (strcmp(token, "__print_array") == 0) { 3031 if (strcmp(token, "__print_array") == 0) {
3013 free_token(token); 3032 free_token(token);
3014 return process_int_array(event, arg, tok); 3033 return process_int_array(event, arg, tok);
@@ -3547,6 +3566,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
3547 case PRINT_SYMBOL: 3566 case PRINT_SYMBOL:
3548 case PRINT_INT_ARRAY: 3567 case PRINT_INT_ARRAY:
3549 case PRINT_HEX: 3568 case PRINT_HEX:
3569 case PRINT_HEX_STR:
3550 break; 3570 break;
3551 case PRINT_TYPE: 3571 case PRINT_TYPE:
3552 val = eval_num_arg(data, size, event, arg->typecast.item); 3572 val = eval_num_arg(data, size, event, arg->typecast.item);
@@ -3962,6 +3982,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
3962 } 3982 }
3963 break; 3983 break;
3964 case PRINT_HEX: 3984 case PRINT_HEX:
3985 case PRINT_HEX_STR:
3965 if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) { 3986 if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) {
3966 unsigned long offset; 3987 unsigned long offset;
3967 offset = pevent_read_number(pevent, 3988 offset = pevent_read_number(pevent,
@@ -3981,7 +4002,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
3981 } 4002 }
3982 len = eval_num_arg(data, size, event, arg->hex.size); 4003 len = eval_num_arg(data, size, event, arg->hex.size);
3983 for (i = 0; i < len; i++) { 4004 for (i = 0; i < len; i++) {
3984 if (i) 4005 if (i && arg->type == PRINT_HEX)
3985 trace_seq_putc(s, ' '); 4006 trace_seq_putc(s, ' ');
3986 trace_seq_printf(s, "%02x", hex[i]); 4007 trace_seq_printf(s, "%02x", hex[i]);
3987 } 4008 }
@@ -5727,6 +5748,13 @@ static void print_args(struct print_arg *args)
5727 print_args(args->hex.size); 5748 print_args(args->hex.size);
5728 printf(")"); 5749 printf(")");
5729 break; 5750 break;
5751 case PRINT_HEX_STR:
5752 printf("__print_hex_str(");
5753 print_args(args->hex.field);
5754 printf(", ");
5755 print_args(args->hex.size);
5756 printf(")");
5757 break;
5730 case PRINT_INT_ARRAY: 5758 case PRINT_INT_ARRAY:
5731 printf("__print_array("); 5759 printf("__print_array(");
5732 print_args(args->int_array.field); 5760 print_args(args->int_array.field);
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 7aae746ec2fe..74cecba87daa 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -292,6 +292,7 @@ enum print_arg_type {
292 PRINT_FUNC, 292 PRINT_FUNC,
293 PRINT_BITMASK, 293 PRINT_BITMASK,
294 PRINT_DYNAMIC_ARRAY_LEN, 294 PRINT_DYNAMIC_ARRAY_LEN,
295 PRINT_HEX_STR,
295}; 296};
296 297
297struct print_arg { 298struct print_arg {
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index c1555fd0035a..dff043a29589 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -217,6 +217,7 @@ static void define_event_symbols(struct event_format *event,
217 cur_field_name); 217 cur_field_name);
218 break; 218 break;
219 case PRINT_HEX: 219 case PRINT_HEX:
220 case PRINT_HEX_STR:
220 define_event_symbols(event, ev_name, args->hex.field); 221 define_event_symbols(event, ev_name, args->hex.field);
221 define_event_symbols(event, ev_name, args->hex.size); 222 define_event_symbols(event, ev_name, args->hex.size);
222 break; 223 break;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 089438da1f7f..581e0efd6356 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -236,6 +236,7 @@ static void define_event_symbols(struct event_format *event,
236 cur_field_name); 236 cur_field_name);
237 break; 237 break;
238 case PRINT_HEX: 238 case PRINT_HEX:
239 case PRINT_HEX_STR:
239 define_event_symbols(event, ev_name, args->hex.field); 240 define_event_symbols(event, ev_name, args->hex.field);
240 define_event_symbols(event, ev_name, args->hex.size); 241 define_event_symbols(event, ev_name, args->hex.size);
241 break; 242 break;
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 071431bedde8..541d9d7fad5a 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,3 +1,5 @@
1test_verifier 1test_verifier
2test_maps 2test_maps
3test_lru_map 3test_lru_map
4test_lpm_map
5test_tag
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 7a5f24543a5f..c7816fe60feb 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,12 +1,25 @@
1CFLAGS += -Wall -O2 -I../../../../usr/include 1LIBDIR := ../../../lib
2BPFOBJ := $(LIBDIR)/bpf/bpf.o
2 3
3test_objs = test_verifier test_maps test_lru_map 4CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR)
4 5
5TEST_PROGS := test_verifier test_maps test_lru_map test_kmod.sh 6test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map
7
8TEST_PROGS := $(test_objs) test_kmod.sh
6TEST_FILES := $(test_objs) 9TEST_FILES := $(test_objs)
7 10
11.PHONY: all clean force
12
8all: $(test_objs) 13all: $(test_objs)
9 14
15# force a rebuild of BPFOBJ when its dependencies are updated
16force:
17
18$(BPFOBJ): force
19 $(MAKE) -C $(dir $(BPFOBJ))
20
21$(test_objs): $(BPFOBJ)
22
10include ../lib.mk 23include ../lib.mk
11 24
12clean: 25clean:
diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h
deleted file mode 100644
index 6b4565f2a3f2..000000000000
--- a/tools/testing/selftests/bpf/bpf_sys.h
+++ /dev/null
@@ -1,108 +0,0 @@
1#ifndef __BPF_SYS__
2#define __BPF_SYS__
3
4#include <stdint.h>
5#include <stdlib.h>
6
7#include <sys/syscall.h>
8
9#include <linux/bpf.h>
10
11static inline __u64 bpf_ptr_to_u64(const void *ptr)
12{
13 return (__u64)(unsigned long) ptr;
14}
15
16static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size)
17{
18#ifdef __NR_bpf
19 return syscall(__NR_bpf, cmd, attr, size);
20#else
21 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
22 errno = ENOSYS;
23 return -1;
24#endif
25}
26
27static inline int bpf_map_lookup(int fd, const void *key, void *value)
28{
29 union bpf_attr attr = {};
30
31 attr.map_fd = fd;
32 attr.key = bpf_ptr_to_u64(key);
33 attr.value = bpf_ptr_to_u64(value);
34
35 return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
36}
37
38static inline int bpf_map_update(int fd, const void *key, const void *value,
39 uint64_t flags)
40{
41 union bpf_attr attr = {};
42
43 attr.map_fd = fd;
44 attr.key = bpf_ptr_to_u64(key);
45 attr.value = bpf_ptr_to_u64(value);
46 attr.flags = flags;
47
48 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
49}
50
51static inline int bpf_map_delete(int fd, const void *key)
52{
53 union bpf_attr attr = {};
54
55 attr.map_fd = fd;
56 attr.key = bpf_ptr_to_u64(key);
57
58 return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
59}
60
61static inline int bpf_map_next_key(int fd, const void *key, void *next_key)
62{
63 union bpf_attr attr = {};
64
65 attr.map_fd = fd;
66 attr.key = bpf_ptr_to_u64(key);
67 attr.next_key = bpf_ptr_to_u64(next_key);
68
69 return bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
70}
71
72static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
73 uint32_t size_value, uint32_t max_elem,
74 uint32_t flags)
75{
76 union bpf_attr attr = {};
77
78 attr.map_type = type;
79 attr.key_size = size_key;
80 attr.value_size = size_value;
81 attr.max_entries = max_elem;
82 attr.map_flags = flags;
83
84 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
85}
86
87static inline int bpf_prog_load(enum bpf_prog_type type,
88 const struct bpf_insn *insns, size_t size_insns,
89 const char *license, char *log, size_t size_log)
90{
91 union bpf_attr attr = {};
92
93 attr.prog_type = type;
94 attr.insns = bpf_ptr_to_u64(insns);
95 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
96 attr.license = bpf_ptr_to_u64(license);
97
98 if (size_log > 0) {
99 attr.log_buf = bpf_ptr_to_u64(log);
100 attr.log_size = size_log;
101 attr.log_level = 1;
102 log[0] = 0;
103 }
104
105 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
106}
107
108#endif /* __BPF_SYS__ */
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
new file mode 100644
index 000000000000..e97565243d59
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -0,0 +1,358 @@
1/*
2 * Randomized tests for eBPF longest-prefix-match maps
3 *
4 * This program runs randomized tests against the lpm-bpf-map. It implements a
5 * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked
6 * lists. The implementation should be pretty straightforward.
7 *
8 * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies
9 * the trie-based bpf-map implementation behaves the same way as tlpm.
10 */
11
12#include <assert.h>
13#include <errno.h>
14#include <inttypes.h>
15#include <linux/bpf.h>
16#include <stdio.h>
17#include <stdlib.h>
18#include <string.h>
19#include <time.h>
20#include <unistd.h>
21#include <arpa/inet.h>
22#include <sys/time.h>
23#include <sys/resource.h>
24
25#include <bpf/bpf.h>
26#include "bpf_util.h"
27
28struct tlpm_node {
29 struct tlpm_node *next;
30 size_t n_bits;
31 uint8_t key[];
32};
33
34static struct tlpm_node *tlpm_add(struct tlpm_node *list,
35 const uint8_t *key,
36 size_t n_bits)
37{
38 struct tlpm_node *node;
39 size_t n;
40
41 /* add new entry with @key/@n_bits to @list and return new head */
42
43 n = (n_bits + 7) / 8;
44 node = malloc(sizeof(*node) + n);
45 assert(node);
46
47 node->next = list;
48 node->n_bits = n_bits;
49 memcpy(node->key, key, n);
50
51 return node;
52}
53
54static void tlpm_clear(struct tlpm_node *list)
55{
56 struct tlpm_node *node;
57
58 /* free all entries in @list */
59
60 while ((node = list)) {
61 list = list->next;
62 free(node);
63 }
64}
65
66static struct tlpm_node *tlpm_match(struct tlpm_node *list,
67 const uint8_t *key,
68 size_t n_bits)
69{
70 struct tlpm_node *best = NULL;
71 size_t i;
72
73 /* Perform longest prefix-match on @key/@n_bits. That is, iterate all
74 * entries and match each prefix against @key. Remember the "best"
75 * entry we find (i.e., the longest prefix that matches) and return it
76 * to the caller when done.
77 */
78
79 for ( ; list; list = list->next) {
80 for (i = 0; i < n_bits && i < list->n_bits; ++i) {
81 if ((key[i / 8] & (1 << (7 - i % 8))) !=
82 (list->key[i / 8] & (1 << (7 - i % 8))))
83 break;
84 }
85
86 if (i >= list->n_bits) {
87 if (!best || i > best->n_bits)
88 best = list;
89 }
90 }
91
92 return best;
93}
94
95static void test_lpm_basic(void)
96{
97 struct tlpm_node *list = NULL, *t1, *t2;
98
99 /* very basic, static tests to verify tlpm works as expected */
100
101 assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
102
103 t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8);
104 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
105 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
106 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16));
107 assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8));
108 assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8));
109 assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7));
110
111 t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16);
112 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
113 assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
114 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
115 assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
116
117 tlpm_clear(list);
118}
119
120static void test_lpm_order(void)
121{
122 struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL;
123 size_t i, j;
124
125 /* Verify the tlpm implementation works correctly regardless of the
126 * order of entries. Insert a random set of entries into @l1, and copy
127 * the same data in reverse order into @l2. Then verify a lookup of
128 * random keys will yield the same result in both sets.
129 */
130
131 for (i = 0; i < (1 << 12); ++i)
132 l1 = tlpm_add(l1, (uint8_t[]){
133 rand() % 0xff,
134 rand() % 0xff,
135 }, rand() % 16 + 1);
136
137 for (t1 = l1; t1; t1 = t1->next)
138 l2 = tlpm_add(l2, t1->key, t1->n_bits);
139
140 for (i = 0; i < (1 << 8); ++i) {
141 uint8_t key[] = { rand() % 0xff, rand() % 0xff };
142
143 t1 = tlpm_match(l1, key, 16);
144 t2 = tlpm_match(l2, key, 16);
145
146 assert(!t1 == !t2);
147 if (t1) {
148 assert(t1->n_bits == t2->n_bits);
149 for (j = 0; j < t1->n_bits; ++j)
150 assert((t1->key[j / 8] & (1 << (7 - j % 8))) ==
151 (t2->key[j / 8] & (1 << (7 - j % 8))));
152 }
153 }
154
155 tlpm_clear(l1);
156 tlpm_clear(l2);
157}
158
159static void test_lpm_map(int keysize)
160{
161 size_t i, j, n_matches, n_nodes, n_lookups;
162 struct tlpm_node *t, *list = NULL;
163 struct bpf_lpm_trie_key *key;
164 uint8_t *data, *value;
165 int r, map;
166
167 /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of
168 * prefixes and insert it into both tlpm and bpf-lpm. Then run some
169 * randomized lookups and verify both maps return the same result.
170 */
171
172 n_matches = 0;
173 n_nodes = 1 << 8;
174 n_lookups = 1 << 16;
175
176 data = alloca(keysize);
177 memset(data, 0, keysize);
178
179 value = alloca(keysize + 1);
180 memset(value, 0, keysize + 1);
181
182 key = alloca(sizeof(*key) + keysize);
183 memset(key, 0, sizeof(*key) + keysize);
184
185 map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
186 sizeof(*key) + keysize,
187 keysize + 1,
188 4096,
189 BPF_F_NO_PREALLOC);
190 assert(map >= 0);
191
192 for (i = 0; i < n_nodes; ++i) {
193 for (j = 0; j < keysize; ++j)
194 value[j] = rand() & 0xff;
195 value[keysize] = rand() % (8 * keysize + 1);
196
197 list = tlpm_add(list, value, value[keysize]);
198
199 key->prefixlen = value[keysize];
200 memcpy(key->data, value, keysize);
201 r = bpf_map_update_elem(map, key, value, 0);
202 assert(!r);
203 }
204
205 for (i = 0; i < n_lookups; ++i) {
206 for (j = 0; j < keysize; ++j)
207 data[j] = rand() & 0xff;
208
209 t = tlpm_match(list, data, 8 * keysize);
210
211 key->prefixlen = 8 * keysize;
212 memcpy(key->data, data, keysize);
213 r = bpf_map_lookup_elem(map, key, value);
214 assert(!r || errno == ENOENT);
215 assert(!t == !!r);
216
217 if (t) {
218 ++n_matches;
219 assert(t->n_bits == value[keysize]);
220 for (j = 0; j < t->n_bits; ++j)
221 assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
222 (value[j / 8] & (1 << (7 - j % 8))));
223 }
224 }
225
226 close(map);
227 tlpm_clear(list);
228
229 /* With 255 random nodes in the map, we are pretty likely to match
230 * something on every lookup. For statistics, use this:
231 *
232 * printf(" nodes: %zu\n"
233 * "lookups: %zu\n"
234 * "matches: %zu\n", n_nodes, n_lookups, n_matches);
235 */
236}
237
238/* Test the implementation with some 'real world' examples */
239
240static void test_lpm_ipaddr(void)
241{
242 struct bpf_lpm_trie_key *key_ipv4;
243 struct bpf_lpm_trie_key *key_ipv6;
244 size_t key_size_ipv4;
245 size_t key_size_ipv6;
246 int map_fd_ipv4;
247 int map_fd_ipv6;
248 __u64 value;
249
250 key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32);
251 key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4;
252 key_ipv4 = alloca(key_size_ipv4);
253 key_ipv6 = alloca(key_size_ipv6);
254
255 map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
256 key_size_ipv4, sizeof(value),
257 100, BPF_F_NO_PREALLOC);
258 assert(map_fd_ipv4 >= 0);
259
260 map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
261 key_size_ipv6, sizeof(value),
262 100, BPF_F_NO_PREALLOC);
263 assert(map_fd_ipv6 >= 0);
264
265 /* Fill data some IPv4 and IPv6 address ranges */
266 value = 1;
267 key_ipv4->prefixlen = 16;
268 inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
269 assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
270
271 value = 2;
272 key_ipv4->prefixlen = 24;
273 inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
274 assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
275
276 value = 3;
277 key_ipv4->prefixlen = 24;
278 inet_pton(AF_INET, "192.168.128.0", key_ipv4->data);
279 assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
280
281 value = 5;
282 key_ipv4->prefixlen = 24;
283 inet_pton(AF_INET, "192.168.1.0", key_ipv4->data);
284 assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
285
286 value = 4;
287 key_ipv4->prefixlen = 23;
288 inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
289 assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
290
291 value = 0xdeadbeef;
292 key_ipv6->prefixlen = 64;
293 inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data);
294 assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0);
295
296 /* Set tprefixlen to maximum for lookups */
297 key_ipv4->prefixlen = 32;
298 key_ipv6->prefixlen = 128;
299
300 /* Test some lookups that should come back with a value */
301 inet_pton(AF_INET, "192.168.128.23", key_ipv4->data);
302 assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
303 assert(value == 3);
304
305 inet_pton(AF_INET, "192.168.0.1", key_ipv4->data);
306 assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
307 assert(value == 2);
308
309 inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data);
310 assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
311 assert(value == 0xdeadbeef);
312
313 inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data);
314 assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
315 assert(value == 0xdeadbeef);
316
317 /* Test some lookups that should not match any entry */
318 inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
319 assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
320 errno == ENOENT);
321
322 inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
323 assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
324 errno == ENOENT);
325
326 inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
327 assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
328 errno == ENOENT);
329
330 close(map_fd_ipv4);
331 close(map_fd_ipv6);
332}
333
334int main(void)
335{
336 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
337 int i, ret;
338
339 /* we want predictable, pseudo random tests */
340 srand(0xf00ba1);
341
342 /* allow unlimited locked memory */
343 ret = setrlimit(RLIMIT_MEMLOCK, &limit);
344 if (ret < 0)
345 perror("Unable to lift memlock rlimit");
346
347 test_lpm_basic();
348 test_lpm_order();
349
350 /* Test with 8, 16, 24, 32, ... 128 bit prefix length */
351 for (i = 1; i <= 16; ++i)
352 test_lpm_map(i);
353
354 test_lpm_ipaddr();
355
356 printf("test_lpm: OK\n");
357 return 0;
358}
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 9f7bd1915c21..00b0aff56e2e 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -18,7 +18,7 @@
18#include <sys/wait.h> 18#include <sys/wait.h>
19#include <sys/resource.h> 19#include <sys/resource.h>
20 20
21#include "bpf_sys.h" 21#include <bpf/bpf.h>
22#include "bpf_util.h" 22#include "bpf_util.h"
23 23
24#define LOCAL_FREE_TARGET (128) 24#define LOCAL_FREE_TARGET (128)
@@ -30,11 +30,11 @@ static int create_map(int map_type, int map_flags, unsigned int size)
30{ 30{
31 int map_fd; 31 int map_fd;
32 32
33 map_fd = bpf_map_create(map_type, sizeof(unsigned long long), 33 map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
34 sizeof(unsigned long long), size, map_flags); 34 sizeof(unsigned long long), size, map_flags);
35 35
36 if (map_fd == -1) 36 if (map_fd == -1)
37 perror("bpf_map_create"); 37 perror("bpf_create_map");
38 38
39 return map_fd; 39 return map_fd;
40} 40}
@@ -45,9 +45,9 @@ static int map_subset(int map0, int map1)
45 unsigned long long value0[nr_cpus], value1[nr_cpus]; 45 unsigned long long value0[nr_cpus], value1[nr_cpus];
46 int ret; 46 int ret;
47 47
48 while (!bpf_map_next_key(map1, &next_key, &next_key)) { 48 while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
49 assert(!bpf_map_lookup(map1, &next_key, value1)); 49 assert(!bpf_map_lookup_elem(map1, &next_key, value1));
50 ret = bpf_map_lookup(map0, &next_key, value0); 50 ret = bpf_map_lookup_elem(map0, &next_key, value0);
51 if (ret) { 51 if (ret) {
52 printf("key:%llu not found from map. %s(%d)\n", 52 printf("key:%llu not found from map. %s(%d)\n",
53 next_key, strerror(errno), errno); 53 next_key, strerror(errno), errno);
@@ -119,52 +119,54 @@ static void test_lru_sanity0(int map_type, int map_flags)
119 /* insert key=1 element */ 119 /* insert key=1 element */
120 120
121 key = 1; 121 key = 1;
122 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 122 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
123 assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); 123 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
124 BPF_NOEXIST));
124 125
125 /* BPF_NOEXIST means: add new element if it doesn't exist */ 126 /* BPF_NOEXIST means: add new element if it doesn't exist */
126 assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 && 127 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
127 /* key=1 already exists */ 128 /* key=1 already exists */
128 errno == EEXIST); 129 && errno == EEXIST);
129 130
130 assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 && 131 assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
131 errno == EINVAL); 132 errno == EINVAL);
132 133
133 /* insert key=2 element */ 134 /* insert key=2 element */
134 135
135 /* check that key=2 is not found */ 136 /* check that key=2 is not found */
136 key = 2; 137 key = 2;
137 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && 138 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
138 errno == ENOENT); 139 errno == ENOENT);
139 140
140 /* BPF_EXIST means: update existing element */ 141 /* BPF_EXIST means: update existing element */
141 assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 && 142 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
142 /* key=2 is not there */ 143 /* key=2 is not there */
143 errno == ENOENT); 144 errno == ENOENT);
144 145
145 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 146 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
146 147
147 /* insert key=3 element */ 148 /* insert key=3 element */
148 149
149 /* check that key=3 is not found */ 150 /* check that key=3 is not found */
150 key = 3; 151 key = 3;
151 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && 152 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
152 errno == ENOENT); 153 errno == ENOENT);
153 154
154 /* check that key=1 can be found and mark the ref bit to 155 /* check that key=1 can be found and mark the ref bit to
155 * stop LRU from removing key=1 156 * stop LRU from removing key=1
156 */ 157 */
157 key = 1; 158 key = 1;
158 assert(!bpf_map_lookup(lru_map_fd, &key, value)); 159 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
159 assert(value[0] == 1234); 160 assert(value[0] == 1234);
160 161
161 key = 3; 162 key = 3;
162 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 163 assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
163 assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); 164 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
165 BPF_NOEXIST));
164 166
165 /* key=2 has been removed from the LRU */ 167 /* key=2 has been removed from the LRU */
166 key = 2; 168 key = 2;
167 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1); 169 assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1);
168 170
169 assert(map_equal(lru_map_fd, expected_map_fd)); 171 assert(map_equal(lru_map_fd, expected_map_fd));
170 172
@@ -217,14 +219,15 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
217 /* Insert 1 to tgt_free (+tgt_free keys) */ 219 /* Insert 1 to tgt_free (+tgt_free keys) */
218 end_key = 1 + tgt_free; 220 end_key = 1 + tgt_free;
219 for (key = 1; key < end_key; key++) 221 for (key = 1; key < end_key; key++)
220 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 222 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
223 BPF_NOEXIST));
221 224
222 /* Lookup 1 to tgt_free/2 */ 225 /* Lookup 1 to tgt_free/2 */
223 end_key = 1 + batch_size; 226 end_key = 1 + batch_size;
224 for (key = 1; key < end_key; key++) { 227 for (key = 1; key < end_key; key++) {
225 assert(!bpf_map_lookup(lru_map_fd, &key, value)); 228 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
226 assert(!bpf_map_update(expected_map_fd, &key, value, 229 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
227 BPF_NOEXIST)); 230 BPF_NOEXIST));
228 } 231 }
229 232
230 /* Insert 1+tgt_free to 2*tgt_free 233 /* Insert 1+tgt_free to 2*tgt_free
@@ -234,9 +237,10 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
234 key = 1 + tgt_free; 237 key = 1 + tgt_free;
235 end_key = key + tgt_free; 238 end_key = key + tgt_free;
236 for (; key < end_key; key++) { 239 for (; key < end_key; key++) {
237 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 240 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
238 assert(!bpf_map_update(expected_map_fd, &key, value, 241 BPF_NOEXIST));
239 BPF_NOEXIST)); 242 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
243 BPF_NOEXIST));
240 } 244 }
241 245
242 assert(map_equal(lru_map_fd, expected_map_fd)); 246 assert(map_equal(lru_map_fd, expected_map_fd));
@@ -301,9 +305,10 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
301 /* Insert 1 to tgt_free (+tgt_free keys) */ 305 /* Insert 1 to tgt_free (+tgt_free keys) */
302 end_key = 1 + tgt_free; 306 end_key = 1 + tgt_free;
303 for (key = 1; key < end_key; key++) 307 for (key = 1; key < end_key; key++)
304 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 308 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
309 BPF_NOEXIST));
305 310
306 /* Any bpf_map_update will require to acquire a new node 311 /* Any bpf_map_update_elem will require to acquire a new node
307 * from LRU first. 312 * from LRU first.
308 * 313 *
309 * The local list is running out of free nodes. 314 * The local list is running out of free nodes.
@@ -316,10 +321,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
316 */ 321 */
317 key = 1; 322 key = 1;
318 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 323 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
319 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 324 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
320 assert(!bpf_map_delete(lru_map_fd, &key)); 325 BPF_NOEXIST));
326 assert(!bpf_map_delete_elem(lru_map_fd, &key));
321 } else { 327 } else {
322 assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST)); 328 assert(bpf_map_update_elem(lru_map_fd, &key, value,
329 BPF_EXIST));
323 } 330 }
324 331
325 /* Re-insert 1 to tgt_free/2 again and do a lookup 332 /* Re-insert 1 to tgt_free/2 again and do a lookup
@@ -328,12 +335,13 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
328 end_key = 1 + batch_size; 335 end_key = 1 + batch_size;
329 value[0] = 4321; 336 value[0] = 4321;
330 for (key = 1; key < end_key; key++) { 337 for (key = 1; key < end_key; key++) {
331 assert(bpf_map_lookup(lru_map_fd, &key, value)); 338 assert(bpf_map_lookup_elem(lru_map_fd, &key, value));
332 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 339 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
333 assert(!bpf_map_lookup(lru_map_fd, &key, value)); 340 BPF_NOEXIST));
341 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
334 assert(value[0] == 4321); 342 assert(value[0] == 4321);
335 assert(!bpf_map_update(expected_map_fd, &key, value, 343 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
336 BPF_NOEXIST)); 344 BPF_NOEXIST));
337 } 345 }
338 346
339 value[0] = 1234; 347 value[0] = 1234;
@@ -344,14 +352,16 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
344 /* These newly added but not referenced keys will be 352 /* These newly added but not referenced keys will be
345 * gone during the next LRU shrink. 353 * gone during the next LRU shrink.
346 */ 354 */
347 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 355 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
356 BPF_NOEXIST));
348 357
349 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ 358 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
350 end_key = key + tgt_free; 359 end_key = key + tgt_free;
351 for (; key < end_key; key++) { 360 for (; key < end_key; key++) {
352 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 361 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
353 assert(!bpf_map_update(expected_map_fd, &key, value, 362 BPF_NOEXIST));
354 BPF_NOEXIST)); 363 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
364 BPF_NOEXIST));
355 } 365 }
356 366
357 assert(map_equal(lru_map_fd, expected_map_fd)); 367 assert(map_equal(lru_map_fd, expected_map_fd));
@@ -401,14 +411,15 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
401 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ 411 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
402 end_key = 1 + (2 * tgt_free); 412 end_key = 1 + (2 * tgt_free);
403 for (key = 1; key < end_key; key++) 413 for (key = 1; key < end_key; key++)
404 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 414 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
415 BPF_NOEXIST));
405 416
406 /* Lookup key 1 to tgt_free*3/2 */ 417 /* Lookup key 1 to tgt_free*3/2 */
407 end_key = tgt_free + batch_size; 418 end_key = tgt_free + batch_size;
408 for (key = 1; key < end_key; key++) { 419 for (key = 1; key < end_key; key++) {
409 assert(!bpf_map_lookup(lru_map_fd, &key, value)); 420 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
410 assert(!bpf_map_update(expected_map_fd, &key, value, 421 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
411 BPF_NOEXIST)); 422 BPF_NOEXIST));
412 } 423 }
413 424
414 /* Add 1+2*tgt_free to tgt_free*5/2 425 /* Add 1+2*tgt_free to tgt_free*5/2
@@ -417,9 +428,10 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
417 key = 2 * tgt_free + 1; 428 key = 2 * tgt_free + 1;
418 end_key = key + batch_size; 429 end_key = key + batch_size;
419 for (; key < end_key; key++) { 430 for (; key < end_key; key++) {
420 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 431 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
421 assert(!bpf_map_update(expected_map_fd, &key, value, 432 BPF_NOEXIST));
422 BPF_NOEXIST)); 433 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
434 BPF_NOEXIST));
423 } 435 }
424 436
425 assert(map_equal(lru_map_fd, expected_map_fd)); 437 assert(map_equal(lru_map_fd, expected_map_fd));
@@ -457,27 +469,29 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
457 value[0] = 1234; 469 value[0] = 1234;
458 470
459 for (key = 1; key <= 2 * tgt_free; key++) 471 for (key = 1; key <= 2 * tgt_free; key++)
460 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 472 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
473 BPF_NOEXIST));
461 474
462 key = 1; 475 key = 1;
463 assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 476 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
464 477
465 for (key = 1; key <= tgt_free; key++) { 478 for (key = 1; key <= tgt_free; key++) {
466 assert(!bpf_map_lookup(lru_map_fd, &key, value)); 479 assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
467 assert(!bpf_map_update(expected_map_fd, &key, value, 480 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
468 BPF_NOEXIST)); 481 BPF_NOEXIST));
469 } 482 }
470 483
471 for (; key <= 2 * tgt_free; key++) { 484 for (; key <= 2 * tgt_free; key++) {
472 assert(!bpf_map_delete(lru_map_fd, &key)); 485 assert(!bpf_map_delete_elem(lru_map_fd, &key));
473 assert(bpf_map_delete(lru_map_fd, &key)); 486 assert(bpf_map_delete_elem(lru_map_fd, &key));
474 } 487 }
475 488
476 end_key = key + 2 * tgt_free; 489 end_key = key + 2 * tgt_free;
477 for (; key < end_key; key++) { 490 for (; key < end_key; key++) {
478 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); 491 assert(!bpf_map_update_elem(lru_map_fd, &key, value,
479 assert(!bpf_map_update(expected_map_fd, &key, value, 492 BPF_NOEXIST));
480 BPF_NOEXIST)); 493 assert(!bpf_map_update_elem(expected_map_fd, &key, value,
494 BPF_NOEXIST));
481 } 495 }
482 496
483 assert(map_equal(lru_map_fd, expected_map_fd)); 497 assert(map_equal(lru_map_fd, expected_map_fd));
@@ -493,16 +507,16 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
493 unsigned long long key, value[nr_cpus]; 507 unsigned long long key, value[nr_cpus];
494 508
495 /* Ensure the last key inserted by previous CPU can be found */ 509 /* Ensure the last key inserted by previous CPU can be found */
496 assert(!bpf_map_lookup(map_fd, &last_key, value)); 510 assert(!bpf_map_lookup_elem(map_fd, &last_key, value));
497 511
498 value[0] = 1234; 512 value[0] = 1234;
499 513
500 key = last_key + 1; 514 key = last_key + 1;
501 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 515 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
502 assert(!bpf_map_lookup(map_fd, &key, value)); 516 assert(!bpf_map_lookup_elem(map_fd, &key, value));
503 517
504 /* Cannot find the last key because it was removed by LRU */ 518 /* Cannot find the last key because it was removed by LRU */
505 assert(bpf_map_lookup(map_fd, &last_key, value)); 519 assert(bpf_map_lookup_elem(map_fd, &last_key, value));
506} 520}
507 521
508/* Test map with only one element */ 522/* Test map with only one element */
@@ -523,7 +537,7 @@ static void test_lru_sanity5(int map_type, int map_flags)
523 537
524 value[0] = 1234; 538 value[0] = 1234;
525 key = 0; 539 key = 0;
526 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 540 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
527 541
528 while (sched_next_online(0, &next_cpu) != -1) { 542 while (sched_next_online(0, &next_cpu) != -1) {
529 pid_t pid; 543 pid_t pid;
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index eedfef8d2946..cada17ac00b8 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -21,7 +21,7 @@
21 21
22#include <linux/bpf.h> 22#include <linux/bpf.h>
23 23
24#include "bpf_sys.h" 24#include <bpf/bpf.h>
25#include "bpf_util.h" 25#include "bpf_util.h"
26 26
27static int map_flags; 27static int map_flags;
@@ -31,7 +31,7 @@ static void test_hashmap(int task, void *data)
31 long long key, next_key, value; 31 long long key, next_key, value;
32 int fd; 32 int fd;
33 33
34 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 34 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
35 2, map_flags); 35 2, map_flags);
36 if (fd < 0) { 36 if (fd < 0) {
37 printf("Failed to create hashmap '%s'!\n", strerror(errno)); 37 printf("Failed to create hashmap '%s'!\n", strerror(errno));
@@ -41,69 +41,70 @@ static void test_hashmap(int task, void *data)
41 key = 1; 41 key = 1;
42 value = 1234; 42 value = 1234;
43 /* Insert key=1 element. */ 43 /* Insert key=1 element. */
44 assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 44 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
45 45
46 value = 0; 46 value = 0;
47 /* BPF_NOEXIST means add new element if it doesn't exist. */ 47 /* BPF_NOEXIST means add new element if it doesn't exist. */
48 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 48 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
49 /* key=1 already exists. */ 49 /* key=1 already exists. */
50 errno == EEXIST); 50 errno == EEXIST);
51 51
52 /* -1 is an invalid flag. */ 52 /* -1 is an invalid flag. */
53 assert(bpf_map_update(fd, &key, &value, -1) == -1 && errno == EINVAL); 53 assert(bpf_map_update_elem(fd, &key, &value, -1) == -1 &&
54 errno == EINVAL);
54 55
55 /* Check that key=1 can be found. */ 56 /* Check that key=1 can be found. */
56 assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); 57 assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
57 58
58 key = 2; 59 key = 2;
59 /* Check that key=2 is not found. */ 60 /* Check that key=2 is not found. */
60 assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 61 assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
61 62
62 /* BPF_EXIST means update existing element. */ 63 /* BPF_EXIST means update existing element. */
63 assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && 64 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
64 /* key=2 is not there. */ 65 /* key=2 is not there. */
65 errno == ENOENT); 66 errno == ENOENT);
66 67
67 /* Insert key=2 element. */ 68 /* Insert key=2 element. */
68 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 69 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
69 70
70 /* key=1 and key=2 were inserted, check that key=0 cannot be 71 /* key=1 and key=2 were inserted, check that key=0 cannot be
71 * inserted due to max_entries limit. 72 * inserted due to max_entries limit.
72 */ 73 */
73 key = 0; 74 key = 0;
74 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 75 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
75 errno == E2BIG); 76 errno == E2BIG);
76 77
77 /* Update existing element, though the map is full. */ 78 /* Update existing element, though the map is full. */
78 key = 1; 79 key = 1;
79 assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); 80 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
80 key = 2; 81 key = 2;
81 assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 82 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
82 key = 1; 83 key = 1;
83 assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 84 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
84 85
85 /* Check that key = 0 doesn't exist. */ 86 /* Check that key = 0 doesn't exist. */
86 key = 0; 87 key = 0;
87 assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 88 assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
88 89
89 /* Iterate over two elements. */ 90 /* Iterate over two elements. */
90 assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 91 assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
91 (next_key == 1 || next_key == 2)); 92 (next_key == 1 || next_key == 2));
92 assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 93 assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
93 (next_key == 1 || next_key == 2)); 94 (next_key == 1 || next_key == 2));
94 assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 95 assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
95 errno == ENOENT); 96 errno == ENOENT);
96 97
97 /* Delete both elements. */ 98 /* Delete both elements. */
98 key = 1; 99 key = 1;
99 assert(bpf_map_delete(fd, &key) == 0); 100 assert(bpf_map_delete_elem(fd, &key) == 0);
100 key = 2; 101 key = 2;
101 assert(bpf_map_delete(fd, &key) == 0); 102 assert(bpf_map_delete_elem(fd, &key) == 0);
102 assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 103 assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
103 104
104 key = 0; 105 key = 0;
105 /* Check that map is empty. */ 106 /* Check that map is empty. */
106 assert(bpf_map_next_key(fd, &key, &next_key) == -1 && 107 assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
107 errno == ENOENT); 108 errno == ENOENT);
108 109
109 close(fd); 110 close(fd);
@@ -117,7 +118,7 @@ static void test_hashmap_percpu(int task, void *data)
117 int expected_key_mask = 0; 118 int expected_key_mask = 0;
118 int fd, i; 119 int fd, i;
119 120
120 fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), 121 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
121 sizeof(value[0]), 2, map_flags); 122 sizeof(value[0]), 2, map_flags);
122 if (fd < 0) { 123 if (fd < 0) {
123 printf("Failed to create hashmap '%s'!\n", strerror(errno)); 124 printf("Failed to create hashmap '%s'!\n", strerror(errno));
@@ -130,53 +131,54 @@ static void test_hashmap_percpu(int task, void *data)
130 key = 1; 131 key = 1;
131 /* Insert key=1 element. */ 132 /* Insert key=1 element. */
132 assert(!(expected_key_mask & key)); 133 assert(!(expected_key_mask & key));
133 assert(bpf_map_update(fd, &key, value, BPF_ANY) == 0); 134 assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0);
134 expected_key_mask |= key; 135 expected_key_mask |= key;
135 136
136 /* BPF_NOEXIST means add new element if it doesn't exist. */ 137 /* BPF_NOEXIST means add new element if it doesn't exist. */
137 assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && 138 assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
138 /* key=1 already exists. */ 139 /* key=1 already exists. */
139 errno == EEXIST); 140 errno == EEXIST);
140 141
141 /* -1 is an invalid flag. */ 142 /* -1 is an invalid flag. */
142 assert(bpf_map_update(fd, &key, value, -1) == -1 && errno == EINVAL); 143 assert(bpf_map_update_elem(fd, &key, value, -1) == -1 &&
144 errno == EINVAL);
143 145
144 /* Check that key=1 can be found. Value could be 0 if the lookup 146 /* Check that key=1 can be found. Value could be 0 if the lookup
145 * was run from a different CPU. 147 * was run from a different CPU.
146 */ 148 */
147 value[0] = 1; 149 value[0] = 1;
148 assert(bpf_map_lookup(fd, &key, value) == 0 && value[0] == 100); 150 assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100);
149 151
150 key = 2; 152 key = 2;
151 /* Check that key=2 is not found. */ 153 /* Check that key=2 is not found. */
152 assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT); 154 assert(bpf_map_lookup_elem(fd, &key, value) == -1 && errno == ENOENT);
153 155
154 /* BPF_EXIST means update existing element. */ 156 /* BPF_EXIST means update existing element. */
155 assert(bpf_map_update(fd, &key, value, BPF_EXIST) == -1 && 157 assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 &&
156 /* key=2 is not there. */ 158 /* key=2 is not there. */
157 errno == ENOENT); 159 errno == ENOENT);
158 160
159 /* Insert key=2 element. */ 161 /* Insert key=2 element. */
160 assert(!(expected_key_mask & key)); 162 assert(!(expected_key_mask & key));
161 assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == 0); 163 assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
162 expected_key_mask |= key; 164 expected_key_mask |= key;
163 165
164 /* key=1 and key=2 were inserted, check that key=0 cannot be 166 /* key=1 and key=2 were inserted, check that key=0 cannot be
165 * inserted due to max_entries limit. 167 * inserted due to max_entries limit.
166 */ 168 */
167 key = 0; 169 key = 0;
168 assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 && 170 assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
169 errno == E2BIG); 171 errno == E2BIG);
170 172
171 /* Check that key = 0 doesn't exist. */ 173 /* Check that key = 0 doesn't exist. */
172 assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 174 assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
173 175
174 /* Iterate over two elements. */ 176 /* Iterate over two elements. */
175 while (!bpf_map_next_key(fd, &key, &next_key)) { 177 while (!bpf_map_get_next_key(fd, &key, &next_key)) {
176 assert((expected_key_mask & next_key) == next_key); 178 assert((expected_key_mask & next_key) == next_key);
177 expected_key_mask &= ~next_key; 179 expected_key_mask &= ~next_key;
178 180
179 assert(bpf_map_lookup(fd, &next_key, value) == 0); 181 assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
180 182
181 for (i = 0; i < nr_cpus; i++) 183 for (i = 0; i < nr_cpus; i++)
182 assert(value[i] == i + 100); 184 assert(value[i] == i + 100);
@@ -187,18 +189,18 @@ static void test_hashmap_percpu(int task, void *data)
187 189
188 /* Update with BPF_EXIST. */ 190 /* Update with BPF_EXIST. */
189 key = 1; 191 key = 1;
190 assert(bpf_map_update(fd, &key, value, BPF_EXIST) == 0); 192 assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
191 193
192 /* Delete both elements. */ 194 /* Delete both elements. */
193 key = 1; 195 key = 1;
194 assert(bpf_map_delete(fd, &key) == 0); 196 assert(bpf_map_delete_elem(fd, &key) == 0);
195 key = 2; 197 key = 2;
196 assert(bpf_map_delete(fd, &key) == 0); 198 assert(bpf_map_delete_elem(fd, &key) == 0);
197 assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT); 199 assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
198 200
199 key = 0; 201 key = 0;
200 /* Check that map is empty. */ 202 /* Check that map is empty. */
201 assert(bpf_map_next_key(fd, &key, &next_key) == -1 && 203 assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
202 errno == ENOENT); 204 errno == ENOENT);
203 205
204 close(fd); 206 close(fd);
@@ -209,7 +211,7 @@ static void test_arraymap(int task, void *data)
209 int key, next_key, fd; 211 int key, next_key, fd;
210 long long value; 212 long long value;
211 213
212 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 214 fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
213 2, 0); 215 2, 0);
214 if (fd < 0) { 216 if (fd < 0) {
215 printf("Failed to create arraymap '%s'!\n", strerror(errno)); 217 printf("Failed to create arraymap '%s'!\n", strerror(errno));
@@ -219,40 +221,40 @@ static void test_arraymap(int task, void *data)
219 key = 1; 221 key = 1;
220 value = 1234; 222 value = 1234;
221 /* Insert key=1 element. */ 223 /* Insert key=1 element. */
222 assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0); 224 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
223 225
224 value = 0; 226 value = 0;
225 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 227 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
226 errno == EEXIST); 228 errno == EEXIST);
227 229
228 /* Check that key=1 can be found. */ 230 /* Check that key=1 can be found. */
229 assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234); 231 assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
230 232
231 key = 0; 233 key = 0;
232 /* Check that key=0 is also found and zero initialized. */ 234 /* Check that key=0 is also found and zero initialized. */
233 assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); 235 assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
234 236
235 /* key=0 and key=1 were inserted, check that key=2 cannot be inserted 237 /* key=0 and key=1 were inserted, check that key=2 cannot be inserted
236 * due to max_entries limit. 238 * due to max_entries limit.
237 */ 239 */
238 key = 2; 240 key = 2;
239 assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 && 241 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
240 errno == E2BIG); 242 errno == E2BIG);
241 243
242 /* Check that key = 2 doesn't exist. */ 244 /* Check that key = 2 doesn't exist. */
243 assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 245 assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
244 246
245 /* Iterate over two elements. */ 247 /* Iterate over two elements. */
246 assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 248 assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
247 next_key == 0); 249 next_key == 0);
248 assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 250 assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
249 next_key == 1); 251 next_key == 1);
250 assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 252 assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
251 errno == ENOENT); 253 errno == ENOENT);
252 254
253 /* Delete shouldn't succeed. */ 255 /* Delete shouldn't succeed. */
254 key = 1; 256 key = 1;
255 assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); 257 assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
256 258
257 close(fd); 259 close(fd);
258} 260}
@@ -263,7 +265,7 @@ static void test_arraymap_percpu(int task, void *data)
263 int key, next_key, fd, i; 265 int key, next_key, fd, i;
264 long values[nr_cpus]; 266 long values[nr_cpus];
265 267
266 fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 268 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
267 sizeof(values[0]), 2, 0); 269 sizeof(values[0]), 2, 0);
268 if (fd < 0) { 270 if (fd < 0) {
269 printf("Failed to create arraymap '%s'!\n", strerror(errno)); 271 printf("Failed to create arraymap '%s'!\n", strerror(errno));
@@ -275,39 +277,39 @@ static void test_arraymap_percpu(int task, void *data)
275 277
276 key = 1; 278 key = 1;
277 /* Insert key=1 element. */ 279 /* Insert key=1 element. */
278 assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); 280 assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
279 281
280 values[0] = 0; 282 values[0] = 0;
281 assert(bpf_map_update(fd, &key, values, BPF_NOEXIST) == -1 && 283 assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 &&
282 errno == EEXIST); 284 errno == EEXIST);
283 285
284 /* Check that key=1 can be found. */ 286 /* Check that key=1 can be found. */
285 assert(bpf_map_lookup(fd, &key, values) == 0 && values[0] == 100); 287 assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100);
286 288
287 key = 0; 289 key = 0;
288 /* Check that key=0 is also found and zero initialized. */ 290 /* Check that key=0 is also found and zero initialized. */
289 assert(bpf_map_lookup(fd, &key, values) == 0 && 291 assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
290 values[0] == 0 && values[nr_cpus - 1] == 0); 292 values[0] == 0 && values[nr_cpus - 1] == 0);
291 293
292 /* Check that key=2 cannot be inserted due to max_entries limit. */ 294 /* Check that key=2 cannot be inserted due to max_entries limit. */
293 key = 2; 295 key = 2;
294 assert(bpf_map_update(fd, &key, values, BPF_EXIST) == -1 && 296 assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) == -1 &&
295 errno == E2BIG); 297 errno == E2BIG);
296 298
297 /* Check that key = 2 doesn't exist. */ 299 /* Check that key = 2 doesn't exist. */
298 assert(bpf_map_lookup(fd, &key, values) == -1 && errno == ENOENT); 300 assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT);
299 301
300 /* Iterate over two elements. */ 302 /* Iterate over two elements. */
301 assert(bpf_map_next_key(fd, &key, &next_key) == 0 && 303 assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
302 next_key == 0); 304 next_key == 0);
303 assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 && 305 assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
304 next_key == 1); 306 next_key == 1);
305 assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 && 307 assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
306 errno == ENOENT); 308 errno == ENOENT);
307 309
308 /* Delete shouldn't succeed. */ 310 /* Delete shouldn't succeed. */
309 key = 1; 311 key = 1;
310 assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL); 312 assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
311 313
312 close(fd); 314 close(fd);
313} 315}
@@ -319,7 +321,7 @@ static void test_arraymap_percpu_many_keys(void)
319 long values[nr_cpus]; 321 long values[nr_cpus];
320 int key, fd, i; 322 int key, fd, i;
321 323
322 fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), 324 fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
323 sizeof(values[0]), nr_keys, 0); 325 sizeof(values[0]), nr_keys, 0);
324 if (fd < 0) { 326 if (fd < 0) {
325 printf("Failed to create per-cpu arraymap '%s'!\n", 327 printf("Failed to create per-cpu arraymap '%s'!\n",
@@ -331,13 +333,13 @@ static void test_arraymap_percpu_many_keys(void)
331 values[i] = i + 10; 333 values[i] = i + 10;
332 334
333 for (key = 0; key < nr_keys; key++) 335 for (key = 0; key < nr_keys; key++)
334 assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0); 336 assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
335 337
336 for (key = 0; key < nr_keys; key++) { 338 for (key = 0; key < nr_keys; key++) {
337 for (i = 0; i < nr_cpus; i++) 339 for (i = 0; i < nr_cpus; i++)
338 values[i] = 0; 340 values[i] = 0;
339 341
340 assert(bpf_map_lookup(fd, &key, values) == 0); 342 assert(bpf_map_lookup_elem(fd, &key, values) == 0);
341 343
342 for (i = 0; i < nr_cpus; i++) 344 for (i = 0; i < nr_cpus; i++)
343 assert(values[i] == i + 10); 345 assert(values[i] == i + 10);
@@ -357,7 +359,7 @@ static void test_map_large(void)
357 } key; 359 } key;
358 int fd, i, value; 360 int fd, i, value;
359 361
360 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 362 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
361 MAP_SIZE, map_flags); 363 MAP_SIZE, map_flags);
362 if (fd < 0) { 364 if (fd < 0) {
363 printf("Failed to create large map '%s'!\n", strerror(errno)); 365 printf("Failed to create large map '%s'!\n", strerror(errno));
@@ -368,22 +370,22 @@ static void test_map_large(void)
368 key = (struct bigkey) { .c = i }; 370 key = (struct bigkey) { .c = i };
369 value = i; 371 value = i;
370 372
371 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 373 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
372 } 374 }
373 375
374 key.c = -1; 376 key.c = -1;
375 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 377 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
376 errno == E2BIG); 378 errno == E2BIG);
377 379
378 /* Iterate through all elements. */ 380 /* Iterate through all elements. */
379 for (i = 0; i < MAP_SIZE; i++) 381 for (i = 0; i < MAP_SIZE; i++)
380 assert(bpf_map_next_key(fd, &key, &key) == 0); 382 assert(bpf_map_get_next_key(fd, &key, &key) == 0);
381 assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 383 assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
382 384
383 key.c = 0; 385 key.c = 0;
384 assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0); 386 assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
385 key.a = 1; 387 key.a = 1;
386 assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT); 388 assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
387 389
388 close(fd); 390 close(fd);
389} 391}
@@ -437,10 +439,12 @@ static void do_work(int fn, void *data)
437 key = value = i; 439 key = value = i;
438 440
439 if (do_update) { 441 if (do_update) {
440 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0); 442 assert(bpf_map_update_elem(fd, &key, &value,
441 assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0); 443 BPF_NOEXIST) == 0);
444 assert(bpf_map_update_elem(fd, &key, &value,
445 BPF_EXIST) == 0);
442 } else { 446 } else {
443 assert(bpf_map_delete(fd, &key) == 0); 447 assert(bpf_map_delete_elem(fd, &key) == 0);
444 } 448 }
445 } 449 }
446} 450}
@@ -450,7 +454,7 @@ static void test_map_parallel(void)
450 int i, fd, key = 0, value = 0; 454 int i, fd, key = 0, value = 0;
451 int data[2]; 455 int data[2];
452 456
453 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 457 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
454 MAP_SIZE, map_flags); 458 MAP_SIZE, map_flags);
455 if (fd < 0) { 459 if (fd < 0) {
456 printf("Failed to create map for parallel test '%s'!\n", 460 printf("Failed to create map for parallel test '%s'!\n",
@@ -468,20 +472,20 @@ static void test_map_parallel(void)
468 run_parallel(TASKS, do_work, data); 472 run_parallel(TASKS, do_work, data);
469 473
470 /* Check that key=0 is already there. */ 474 /* Check that key=0 is already there. */
471 assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 && 475 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
472 errno == EEXIST); 476 errno == EEXIST);
473 477
474 /* Check that all elements were inserted. */ 478 /* Check that all elements were inserted. */
475 key = -1; 479 key = -1;
476 for (i = 0; i < MAP_SIZE; i++) 480 for (i = 0; i < MAP_SIZE; i++)
477 assert(bpf_map_next_key(fd, &key, &key) == 0); 481 assert(bpf_map_get_next_key(fd, &key, &key) == 0);
478 assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 482 assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
479 483
480 /* Another check for all elements */ 484 /* Another check for all elements */
481 for (i = 0; i < MAP_SIZE; i++) { 485 for (i = 0; i < MAP_SIZE; i++) {
482 key = MAP_SIZE - i - 1; 486 key = MAP_SIZE - i - 1;
483 487
484 assert(bpf_map_lookup(fd, &key, &value) == 0 && 488 assert(bpf_map_lookup_elem(fd, &key, &value) == 0 &&
485 value == key); 489 value == key);
486 } 490 }
487 491
@@ -491,7 +495,7 @@ static void test_map_parallel(void)
491 495
492 /* Nothing should be left. */ 496 /* Nothing should be left. */
493 key = -1; 497 key = -1;
494 assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT); 498 assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
495} 499}
496 500
497static void run_all_tests(void) 501static void run_all_tests(void)
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
new file mode 100644
index 000000000000..de409fc50c35
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -0,0 +1,203 @@
1#include <stdint.h>
2#include <stdio.h>
3#include <stdlib.h>
4#include <ctype.h>
5#include <time.h>
6#include <errno.h>
7#include <unistd.h>
8#include <string.h>
9#include <sched.h>
10#include <limits.h>
11#include <assert.h>
12
13#include <sys/socket.h>
14#include <sys/resource.h>
15
16#include <linux/filter.h>
17#include <linux/bpf.h>
18#include <linux/if_alg.h>
19
20#include <bpf/bpf.h>
21
22#include "../../../include/linux/filter.h"
23
24static struct bpf_insn prog[BPF_MAXINSNS];
25
26static void bpf_gen_imm_prog(unsigned int insns, int fd_map)
27{
28 int i;
29
30 srand(time(NULL));
31 for (i = 0; i < insns; i++)
32 prog[i] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, rand());
33 prog[i - 1] = BPF_EXIT_INSN();
34}
35
36static void bpf_gen_map_prog(unsigned int insns, int fd_map)
37{
38 int i, j = 0;
39
40 for (i = 0; i + 1 < insns; i += 2) {
41 struct bpf_insn tmp[] = {
42 BPF_LD_MAP_FD(j++ % BPF_REG_10, fd_map)
43 };
44
45 memcpy(&prog[i], tmp, sizeof(tmp));
46 }
47 if (insns % 2 == 0)
48 prog[insns - 2] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, 42);
49 prog[insns - 1] = BPF_EXIT_INSN();
50}
51
52static int bpf_try_load_prog(int insns, int fd_map,
53 void (*bpf_filler)(unsigned int insns,
54 int fd_map))
55{
56 int fd_prog;
57
58 bpf_filler(insns, fd_map);
59 fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0,
60 NULL, 0);
61 assert(fd_prog > 0);
62 if (fd_map > 0)
63 bpf_filler(insns, 0);
64 return fd_prog;
65}
66
67static int __hex2bin(char ch)
68{
69 if ((ch >= '0') && (ch <= '9'))
70 return ch - '0';
71 ch = tolower(ch);
72 if ((ch >= 'a') && (ch <= 'f'))
73 return ch - 'a' + 10;
74 return -1;
75}
76
77static int hex2bin(uint8_t *dst, const char *src, size_t count)
78{
79 while (count--) {
80 int hi = __hex2bin(*src++);
81 int lo = __hex2bin(*src++);
82
83 if ((hi < 0) || (lo < 0))
84 return -1;
85 *dst++ = (hi << 4) | lo;
86 }
87 return 0;
88}
89
90static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len)
91{
92 const int prefix_len = sizeof("prog_tag:\t") - 1;
93 char buff[256];
94 int ret = -1;
95 FILE *fp;
96
97 snprintf(buff, sizeof(buff), "/proc/%d/fdinfo/%d", getpid(),
98 fd_prog);
99 fp = fopen(buff, "r");
100 assert(fp);
101
102 while (fgets(buff, sizeof(buff), fp)) {
103 if (strncmp(buff, "prog_tag:\t", prefix_len))
104 continue;
105 ret = hex2bin(tag, buff + prefix_len, len);
106 break;
107 }
108
109 fclose(fp);
110 assert(!ret);
111}
112
113static void tag_from_alg(int insns, uint8_t *tag, uint32_t len)
114{
115 static const struct sockaddr_alg alg = {
116 .salg_family = AF_ALG,
117 .salg_type = "hash",
118 .salg_name = "sha1",
119 };
120 int fd_base, fd_alg, ret;
121 ssize_t size;
122
123 fd_base = socket(AF_ALG, SOCK_SEQPACKET, 0);
124 assert(fd_base > 0);
125
126 ret = bind(fd_base, (struct sockaddr *)&alg, sizeof(alg));
127 assert(!ret);
128
129 fd_alg = accept(fd_base, NULL, 0);
130 assert(fd_alg > 0);
131
132 insns *= sizeof(struct bpf_insn);
133 size = write(fd_alg, prog, insns);
134 assert(size == insns);
135
136 size = read(fd_alg, tag, len);
137 assert(size == len);
138
139 close(fd_alg);
140 close(fd_base);
141}
142
143static void tag_dump(const char *prefix, uint8_t *tag, uint32_t len)
144{
145 int i;
146
147 printf("%s", prefix);
148 for (i = 0; i < len; i++)
149 printf("%02x", tag[i]);
150 printf("\n");
151}
152
153static void tag_exit_report(int insns, int fd_map, uint8_t *ftag,
154 uint8_t *atag, uint32_t len)
155{
156 printf("Program tag mismatch for %d insns%s!\n", insns,
157 fd_map < 0 ? "" : " with map");
158
159 tag_dump(" fdinfo result: ", ftag, len);
160 tag_dump(" af_alg result: ", atag, len);
161 exit(1);
162}
163
164static void do_test(uint32_t *tests, int start_insns, int fd_map,
165 void (*bpf_filler)(unsigned int insns, int fd))
166{
167 int i, fd_prog;
168
169 for (i = start_insns; i <= BPF_MAXINSNS; i++) {
170 uint8_t ftag[8], atag[sizeof(ftag)];
171
172 fd_prog = bpf_try_load_prog(i, fd_map, bpf_filler);
173 tag_from_fdinfo(fd_prog, ftag, sizeof(ftag));
174 tag_from_alg(i, atag, sizeof(atag));
175 if (memcmp(ftag, atag, sizeof(ftag)))
176 tag_exit_report(i, fd_map, ftag, atag, sizeof(ftag));
177
178 close(fd_prog);
179 sched_yield();
180 (*tests)++;
181 }
182}
183
184int main(void)
185{
186 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
187 uint32_t tests = 0;
188 int i, fd_map;
189
190 setrlimit(RLIMIT_MEMLOCK, &rinf);
191 fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int),
192 sizeof(int), 1, BPF_F_NO_PREALLOC);
193 assert(fd_map > 0);
194
195 for (i = 0; i < 5; i++) {
196 do_test(&tests, 2, -1, bpf_gen_imm_prog);
197 do_test(&tests, 3, fd_map, bpf_gen_map_prog);
198 }
199
200 printf("test_tag: OK (%u tests)\n", tests);
201 close(fd_map);
202 return 0;
203}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 853d7e43434a..e1f5b9eea1e8 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,7 +8,9 @@
8 * License as published by the Free Software Foundation. 8 * License as published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <stdint.h>
11#include <stdio.h> 12#include <stdio.h>
13#include <stdlib.h>
12#include <unistd.h> 14#include <unistd.h>
13#include <errno.h> 15#include <errno.h>
14#include <string.h> 16#include <string.h>
@@ -16,6 +18,7 @@
16#include <stdbool.h> 18#include <stdbool.h>
17#include <sched.h> 19#include <sched.h>
18 20
21#include <sys/capability.h>
19#include <sys/resource.h> 22#include <sys/resource.h>
20 23
21#include <linux/unistd.h> 24#include <linux/unistd.h>
@@ -23,9 +26,9 @@
23#include <linux/bpf_perf_event.h> 26#include <linux/bpf_perf_event.h>
24#include <linux/bpf.h> 27#include <linux/bpf.h>
25 28
26#include "../../../include/linux/filter.h" 29#include <bpf/bpf.h>
27 30
28#include "bpf_sys.h" 31#include "../../../include/linux/filter.h"
29 32
30#ifndef ARRAY_SIZE 33#ifndef ARRAY_SIZE
31# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 34# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -859,15 +862,451 @@ static struct bpf_test tests[] = {
859 .result = REJECT, 862 .result = REJECT,
860 }, 863 },
861 { 864 {
862 "check non-u32 access to cb", 865 "check cb access: byte",
863 .insns = { 866 .insns = {
864 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1, 867 BPF_MOV64_IMM(BPF_REG_0, 0),
868 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
869 offsetof(struct __sk_buff, cb[0])),
870 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
871 offsetof(struct __sk_buff, cb[0]) + 1),
872 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
873 offsetof(struct __sk_buff, cb[0]) + 2),
874 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
875 offsetof(struct __sk_buff, cb[0]) + 3),
876 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
877 offsetof(struct __sk_buff, cb[1])),
878 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
879 offsetof(struct __sk_buff, cb[1]) + 1),
880 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
881 offsetof(struct __sk_buff, cb[1]) + 2),
882 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
883 offsetof(struct __sk_buff, cb[1]) + 3),
884 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
885 offsetof(struct __sk_buff, cb[2])),
886 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
887 offsetof(struct __sk_buff, cb[2]) + 1),
888 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
889 offsetof(struct __sk_buff, cb[2]) + 2),
890 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
891 offsetof(struct __sk_buff, cb[2]) + 3),
892 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
893 offsetof(struct __sk_buff, cb[3])),
894 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
895 offsetof(struct __sk_buff, cb[3]) + 1),
896 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
897 offsetof(struct __sk_buff, cb[3]) + 2),
898 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
899 offsetof(struct __sk_buff, cb[3]) + 3),
900 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
901 offsetof(struct __sk_buff, cb[4])),
902 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
903 offsetof(struct __sk_buff, cb[4]) + 1),
904 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
905 offsetof(struct __sk_buff, cb[4]) + 2),
906 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
907 offsetof(struct __sk_buff, cb[4]) + 3),
908 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, cb[0])),
910 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
911 offsetof(struct __sk_buff, cb[0]) + 1),
912 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, cb[0]) + 2),
914 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
915 offsetof(struct __sk_buff, cb[0]) + 3),
916 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
917 offsetof(struct __sk_buff, cb[1])),
918 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
919 offsetof(struct __sk_buff, cb[1]) + 1),
920 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
921 offsetof(struct __sk_buff, cb[1]) + 2),
922 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
923 offsetof(struct __sk_buff, cb[1]) + 3),
924 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
925 offsetof(struct __sk_buff, cb[2])),
926 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
927 offsetof(struct __sk_buff, cb[2]) + 1),
928 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
929 offsetof(struct __sk_buff, cb[2]) + 2),
930 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
931 offsetof(struct __sk_buff, cb[2]) + 3),
932 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
933 offsetof(struct __sk_buff, cb[3])),
934 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
935 offsetof(struct __sk_buff, cb[3]) + 1),
936 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
937 offsetof(struct __sk_buff, cb[3]) + 2),
938 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
939 offsetof(struct __sk_buff, cb[3]) + 3),
940 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
941 offsetof(struct __sk_buff, cb[4])),
942 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
943 offsetof(struct __sk_buff, cb[4]) + 1),
944 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
945 offsetof(struct __sk_buff, cb[4]) + 2),
946 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
947 offsetof(struct __sk_buff, cb[4]) + 3),
948 BPF_EXIT_INSN(),
949 },
950 .result = ACCEPT,
951 },
952 {
953 "check cb access: byte, oob 1",
954 .insns = {
955 BPF_MOV64_IMM(BPF_REG_0, 0),
956 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
957 offsetof(struct __sk_buff, cb[4]) + 4),
958 BPF_EXIT_INSN(),
959 },
960 .errstr = "invalid bpf_context access",
961 .result = REJECT,
962 },
963 {
964 "check cb access: byte, oob 2",
965 .insns = {
966 BPF_MOV64_IMM(BPF_REG_0, 0),
967 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
968 offsetof(struct __sk_buff, cb[0]) - 1),
969 BPF_EXIT_INSN(),
970 },
971 .errstr = "invalid bpf_context access",
972 .result = REJECT,
973 },
974 {
975 "check cb access: byte, oob 3",
976 .insns = {
977 BPF_MOV64_IMM(BPF_REG_0, 0),
978 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
979 offsetof(struct __sk_buff, cb[4]) + 4),
980 BPF_EXIT_INSN(),
981 },
982 .errstr = "invalid bpf_context access",
983 .result = REJECT,
984 },
985 {
986 "check cb access: byte, oob 4",
987 .insns = {
988 BPF_MOV64_IMM(BPF_REG_0, 0),
989 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
990 offsetof(struct __sk_buff, cb[0]) - 1),
991 BPF_EXIT_INSN(),
992 },
993 .errstr = "invalid bpf_context access",
994 .result = REJECT,
995 },
996 {
997 "check cb access: byte, wrong type",
998 .insns = {
999 BPF_MOV64_IMM(BPF_REG_0, 0),
1000 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1001 offsetof(struct __sk_buff, cb[0])),
1002 BPF_EXIT_INSN(),
1003 },
1004 .errstr = "invalid bpf_context access",
1005 .result = REJECT,
1006 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1007 },
1008 {
1009 "check cb access: half",
1010 .insns = {
1011 BPF_MOV64_IMM(BPF_REG_0, 0),
1012 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1013 offsetof(struct __sk_buff, cb[0])),
1014 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1015 offsetof(struct __sk_buff, cb[0]) + 2),
1016 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1017 offsetof(struct __sk_buff, cb[1])),
1018 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1019 offsetof(struct __sk_buff, cb[1]) + 2),
1020 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1021 offsetof(struct __sk_buff, cb[2])),
1022 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1023 offsetof(struct __sk_buff, cb[2]) + 2),
1024 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1025 offsetof(struct __sk_buff, cb[3])),
1026 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1027 offsetof(struct __sk_buff, cb[3]) + 2),
1028 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1029 offsetof(struct __sk_buff, cb[4])),
1030 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1031 offsetof(struct __sk_buff, cb[4]) + 2),
1032 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1033 offsetof(struct __sk_buff, cb[0])),
1034 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1035 offsetof(struct __sk_buff, cb[0]) + 2),
1036 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1037 offsetof(struct __sk_buff, cb[1])),
1038 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1039 offsetof(struct __sk_buff, cb[1]) + 2),
1040 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1041 offsetof(struct __sk_buff, cb[2])),
1042 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1043 offsetof(struct __sk_buff, cb[2]) + 2),
1044 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1045 offsetof(struct __sk_buff, cb[3])),
1046 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1047 offsetof(struct __sk_buff, cb[3]) + 2),
1048 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1049 offsetof(struct __sk_buff, cb[4])),
1050 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1051 offsetof(struct __sk_buff, cb[4]) + 2),
1052 BPF_EXIT_INSN(),
1053 },
1054 .result = ACCEPT,
1055 },
1056 {
1057 "check cb access: half, unaligned",
1058 .insns = {
1059 BPF_MOV64_IMM(BPF_REG_0, 0),
1060 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1061 offsetof(struct __sk_buff, cb[0]) + 1),
1062 BPF_EXIT_INSN(),
1063 },
1064 .errstr = "misaligned access",
1065 .result = REJECT,
1066 },
1067 {
1068 "check cb access: half, oob 1",
1069 .insns = {
1070 BPF_MOV64_IMM(BPF_REG_0, 0),
1071 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1072 offsetof(struct __sk_buff, cb[4]) + 4),
1073 BPF_EXIT_INSN(),
1074 },
1075 .errstr = "invalid bpf_context access",
1076 .result = REJECT,
1077 },
1078 {
1079 "check cb access: half, oob 2",
1080 .insns = {
1081 BPF_MOV64_IMM(BPF_REG_0, 0),
1082 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1083 offsetof(struct __sk_buff, cb[0]) - 2),
1084 BPF_EXIT_INSN(),
1085 },
1086 .errstr = "invalid bpf_context access",
1087 .result = REJECT,
1088 },
1089 {
1090 "check cb access: half, oob 3",
1091 .insns = {
1092 BPF_MOV64_IMM(BPF_REG_0, 0),
1093 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1094 offsetof(struct __sk_buff, cb[4]) + 4),
1095 BPF_EXIT_INSN(),
1096 },
1097 .errstr = "invalid bpf_context access",
1098 .result = REJECT,
1099 },
1100 {
1101 "check cb access: half, oob 4",
1102 .insns = {
1103 BPF_MOV64_IMM(BPF_REG_0, 0),
1104 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1105 offsetof(struct __sk_buff, cb[0]) - 2),
1106 BPF_EXIT_INSN(),
1107 },
1108 .errstr = "invalid bpf_context access",
1109 .result = REJECT,
1110 },
1111 {
1112 "check cb access: half, wrong type",
1113 .insns = {
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1115 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1116 offsetof(struct __sk_buff, cb[0])),
1117 BPF_EXIT_INSN(),
1118 },
1119 .errstr = "invalid bpf_context access",
1120 .result = REJECT,
1121 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1122 },
1123 {
1124 "check cb access: word",
1125 .insns = {
1126 BPF_MOV64_IMM(BPF_REG_0, 0),
1127 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1128 offsetof(struct __sk_buff, cb[0])),
1129 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1130 offsetof(struct __sk_buff, cb[1])),
1131 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1132 offsetof(struct __sk_buff, cb[2])),
1133 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1134 offsetof(struct __sk_buff, cb[3])),
1135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1136 offsetof(struct __sk_buff, cb[4])),
1137 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1138 offsetof(struct __sk_buff, cb[0])),
1139 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1140 offsetof(struct __sk_buff, cb[1])),
1141 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1142 offsetof(struct __sk_buff, cb[2])),
1143 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1144 offsetof(struct __sk_buff, cb[3])),
1145 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1146 offsetof(struct __sk_buff, cb[4])),
1147 BPF_EXIT_INSN(),
1148 },
1149 .result = ACCEPT,
1150 },
1151 {
1152 "check cb access: word, unaligned 1",
1153 .insns = {
1154 BPF_MOV64_IMM(BPF_REG_0, 0),
1155 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1156 offsetof(struct __sk_buff, cb[0]) + 2),
1157 BPF_EXIT_INSN(),
1158 },
1159 .errstr = "misaligned access",
1160 .result = REJECT,
1161 },
1162 {
1163 "check cb access: word, unaligned 2",
1164 .insns = {
1165 BPF_MOV64_IMM(BPF_REG_0, 0),
1166 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1167 offsetof(struct __sk_buff, cb[4]) + 1),
1168 BPF_EXIT_INSN(),
1169 },
1170 .errstr = "misaligned access",
1171 .result = REJECT,
1172 },
1173 {
1174 "check cb access: word, unaligned 3",
1175 .insns = {
1176 BPF_MOV64_IMM(BPF_REG_0, 0),
1177 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1178 offsetof(struct __sk_buff, cb[4]) + 2),
1179 BPF_EXIT_INSN(),
1180 },
1181 .errstr = "misaligned access",
1182 .result = REJECT,
1183 },
1184 {
1185 "check cb access: word, unaligned 4",
1186 .insns = {
1187 BPF_MOV64_IMM(BPF_REG_0, 0),
1188 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1189 offsetof(struct __sk_buff, cb[4]) + 3),
1190 BPF_EXIT_INSN(),
1191 },
1192 .errstr = "misaligned access",
1193 .result = REJECT,
1194 },
1195 {
1196 "check cb access: double",
1197 .insns = {
1198 BPF_MOV64_IMM(BPF_REG_0, 0),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1200 offsetof(struct __sk_buff, cb[0])),
1201 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1202 offsetof(struct __sk_buff, cb[2])),
1203 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1204 offsetof(struct __sk_buff, cb[0])),
1205 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1206 offsetof(struct __sk_buff, cb[2])),
1207 BPF_EXIT_INSN(),
1208 },
1209 .result = ACCEPT,
1210 },
1211 {
1212 "check cb access: double, unaligned 1",
1213 .insns = {
1214 BPF_MOV64_IMM(BPF_REG_0, 0),
1215 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1216 offsetof(struct __sk_buff, cb[1])),
1217 BPF_EXIT_INSN(),
1218 },
1219 .errstr = "misaligned access",
1220 .result = REJECT,
1221 },
1222 {
1223 "check cb access: double, unaligned 2",
1224 .insns = {
1225 BPF_MOV64_IMM(BPF_REG_0, 0),
1226 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1227 offsetof(struct __sk_buff, cb[3])),
1228 BPF_EXIT_INSN(),
1229 },
1230 .errstr = "misaligned access",
1231 .result = REJECT,
1232 },
1233 {
1234 "check cb access: double, oob 1",
1235 .insns = {
1236 BPF_MOV64_IMM(BPF_REG_0, 0),
1237 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1238 offsetof(struct __sk_buff, cb[4])),
1239 BPF_EXIT_INSN(),
1240 },
1241 .errstr = "invalid bpf_context access",
1242 .result = REJECT,
1243 },
1244 {
1245 "check cb access: double, oob 2",
1246 .insns = {
1247 BPF_MOV64_IMM(BPF_REG_0, 0),
1248 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1249 offsetof(struct __sk_buff, cb[4]) + 8),
1250 BPF_EXIT_INSN(),
1251 },
1252 .errstr = "invalid bpf_context access",
1253 .result = REJECT,
1254 },
1255 {
1256 "check cb access: double, oob 3",
1257 .insns = {
1258 BPF_MOV64_IMM(BPF_REG_0, 0),
1259 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1260 offsetof(struct __sk_buff, cb[0]) - 8),
1261 BPF_EXIT_INSN(),
1262 },
1263 .errstr = "invalid bpf_context access",
1264 .result = REJECT,
1265 },
1266 {
1267 "check cb access: double, oob 4",
1268 .insns = {
1269 BPF_MOV64_IMM(BPF_REG_0, 0),
1270 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1271 offsetof(struct __sk_buff, cb[4])),
1272 BPF_EXIT_INSN(),
1273 },
1274 .errstr = "invalid bpf_context access",
1275 .result = REJECT,
1276 },
1277 {
1278 "check cb access: double, oob 5",
1279 .insns = {
1280 BPF_MOV64_IMM(BPF_REG_0, 0),
1281 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1282 offsetof(struct __sk_buff, cb[4]) + 8),
1283 BPF_EXIT_INSN(),
1284 },
1285 .errstr = "invalid bpf_context access",
1286 .result = REJECT,
1287 },
1288 {
1289 "check cb access: double, oob 6",
1290 .insns = {
1291 BPF_MOV64_IMM(BPF_REG_0, 0),
1292 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1293 offsetof(struct __sk_buff, cb[0]) - 8),
1294 BPF_EXIT_INSN(),
1295 },
1296 .errstr = "invalid bpf_context access",
1297 .result = REJECT,
1298 },
1299 {
1300 "check cb access: double, wrong type",
1301 .insns = {
1302 BPF_MOV64_IMM(BPF_REG_0, 0),
1303 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
865 offsetof(struct __sk_buff, cb[0])), 1304 offsetof(struct __sk_buff, cb[0])),
866 BPF_EXIT_INSN(), 1305 BPF_EXIT_INSN(),
867 }, 1306 },
868 .errstr = "invalid bpf_context access", 1307 .errstr = "invalid bpf_context access",
869 .errstr_unpriv = "R1 leaks addr",
870 .result = REJECT, 1308 .result = REJECT,
1309 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
871 }, 1310 },
872 { 1311 {
873 "check out of range skb->cb access", 1312 "check out of range skb->cb access",
@@ -1890,6 +2329,107 @@ static struct bpf_test tests[] = {
1890 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2329 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1891 }, 2330 },
1892 { 2331 {
2332 "direct packet access: test11 (shift, good access)",
2333 .insns = {
2334 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2335 offsetof(struct __sk_buff, data)),
2336 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2337 offsetof(struct __sk_buff, data_end)),
2338 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2340 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2341 BPF_MOV64_IMM(BPF_REG_3, 144),
2342 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2344 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2345 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2346 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2347 BPF_MOV64_IMM(BPF_REG_0, 1),
2348 BPF_EXIT_INSN(),
2349 BPF_MOV64_IMM(BPF_REG_0, 0),
2350 BPF_EXIT_INSN(),
2351 },
2352 .result = ACCEPT,
2353 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2354 },
2355 {
2356 "direct packet access: test12 (and, good access)",
2357 .insns = {
2358 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2359 offsetof(struct __sk_buff, data)),
2360 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2361 offsetof(struct __sk_buff, data_end)),
2362 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2364 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2365 BPF_MOV64_IMM(BPF_REG_3, 144),
2366 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2367 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2368 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2370 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2371 BPF_MOV64_IMM(BPF_REG_0, 1),
2372 BPF_EXIT_INSN(),
2373 BPF_MOV64_IMM(BPF_REG_0, 0),
2374 BPF_EXIT_INSN(),
2375 },
2376 .result = ACCEPT,
2377 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2378 },
2379 {
2380 "direct packet access: test13 (branches, good access)",
2381 .insns = {
2382 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2383 offsetof(struct __sk_buff, data)),
2384 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2385 offsetof(struct __sk_buff, data_end)),
2386 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2388 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2389 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2390 offsetof(struct __sk_buff, mark)),
2391 BPF_MOV64_IMM(BPF_REG_4, 1),
2392 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2393 BPF_MOV64_IMM(BPF_REG_3, 14),
2394 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2395 BPF_MOV64_IMM(BPF_REG_3, 24),
2396 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2398 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2399 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2400 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2401 BPF_MOV64_IMM(BPF_REG_0, 1),
2402 BPF_EXIT_INSN(),
2403 BPF_MOV64_IMM(BPF_REG_0, 0),
2404 BPF_EXIT_INSN(),
2405 },
2406 .result = ACCEPT,
2407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2408 },
2409 {
2410 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2411 .insns = {
2412 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2413 offsetof(struct __sk_buff, data)),
2414 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2415 offsetof(struct __sk_buff, data_end)),
2416 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2418 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2419 BPF_MOV64_IMM(BPF_REG_5, 12),
2420 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2421 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2422 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2423 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2424 BPF_MOV64_IMM(BPF_REG_0, 1),
2425 BPF_EXIT_INSN(),
2426 BPF_MOV64_IMM(BPF_REG_0, 0),
2427 BPF_EXIT_INSN(),
2428 },
2429 .result = ACCEPT,
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2431 },
2432 {
1893 "helper access to packet: test1, valid packet_ptr range", 2433 "helper access to packet: test1, valid packet_ptr range",
1894 .insns = { 2434 .insns = {
1895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2905,6 +3445,1012 @@ static struct bpf_test tests[] = {
2905 .result = REJECT, 3445 .result = REJECT,
2906 .errstr = "invalid bpf_context access", 3446 .errstr = "invalid bpf_context access",
2907 }, 3447 },
3448 {
3449 "helper access to map: full range",
3450 .insns = {
3451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3453 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3454 BPF_LD_MAP_FD(BPF_REG_1, 0),
3455 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3458 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
3459 BPF_MOV64_IMM(BPF_REG_3, 0),
3460 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3461 BPF_EXIT_INSN(),
3462 },
3463 .fixup_map2 = { 3 },
3464 .result = ACCEPT,
3465 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3466 },
3467 {
3468 "helper access to map: partial range",
3469 .insns = {
3470 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3471 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3472 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3473 BPF_LD_MAP_FD(BPF_REG_1, 0),
3474 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3475 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3477 BPF_MOV64_IMM(BPF_REG_2, 8),
3478 BPF_MOV64_IMM(BPF_REG_3, 0),
3479 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3480 BPF_EXIT_INSN(),
3481 },
3482 .fixup_map2 = { 3 },
3483 .result = ACCEPT,
3484 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3485 },
3486 {
3487 "helper access to map: empty range",
3488 .insns = {
3489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3491 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3492 BPF_LD_MAP_FD(BPF_REG_1, 0),
3493 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3494 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3495 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3496 BPF_MOV64_IMM(BPF_REG_2, 0),
3497 BPF_MOV64_IMM(BPF_REG_3, 0),
3498 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3499 BPF_EXIT_INSN(),
3500 },
3501 .fixup_map2 = { 3 },
3502 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
3503 .result = REJECT,
3504 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3505 },
3506 {
3507 "helper access to map: out-of-bound range",
3508 .insns = {
3509 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3511 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3512 BPF_LD_MAP_FD(BPF_REG_1, 0),
3513 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3514 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3516 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
3517 BPF_MOV64_IMM(BPF_REG_3, 0),
3518 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3519 BPF_EXIT_INSN(),
3520 },
3521 .fixup_map2 = { 3 },
3522 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
3523 .result = REJECT,
3524 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3525 },
3526 {
3527 "helper access to map: negative range",
3528 .insns = {
3529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3531 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3532 BPF_LD_MAP_FD(BPF_REG_1, 0),
3533 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3534 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3535 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3536 BPF_MOV64_IMM(BPF_REG_2, -8),
3537 BPF_MOV64_IMM(BPF_REG_3, 0),
3538 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3539 BPF_EXIT_INSN(),
3540 },
3541 .fixup_map2 = { 3 },
3542 .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
3543 .result = REJECT,
3544 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3545 },
3546 {
3547 "helper access to adjusted map (via const imm): full range",
3548 .insns = {
3549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3551 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3552 BPF_LD_MAP_FD(BPF_REG_1, 0),
3553 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3557 offsetof(struct test_val, foo)),
3558 BPF_MOV64_IMM(BPF_REG_2,
3559 sizeof(struct test_val) -
3560 offsetof(struct test_val, foo)),
3561 BPF_MOV64_IMM(BPF_REG_3, 0),
3562 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3563 BPF_EXIT_INSN(),
3564 },
3565 .fixup_map2 = { 3 },
3566 .result = ACCEPT,
3567 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3568 },
3569 {
3570 "helper access to adjusted map (via const imm): partial range",
3571 .insns = {
3572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3574 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3575 BPF_LD_MAP_FD(BPF_REG_1, 0),
3576 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3580 offsetof(struct test_val, foo)),
3581 BPF_MOV64_IMM(BPF_REG_2, 8),
3582 BPF_MOV64_IMM(BPF_REG_3, 0),
3583 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3584 BPF_EXIT_INSN(),
3585 },
3586 .fixup_map2 = { 3 },
3587 .result = ACCEPT,
3588 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3589 },
3590 {
3591 "helper access to adjusted map (via const imm): empty range",
3592 .insns = {
3593 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3595 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3596 BPF_LD_MAP_FD(BPF_REG_1, 0),
3597 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3599 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3601 offsetof(struct test_val, foo)),
3602 BPF_MOV64_IMM(BPF_REG_2, 0),
3603 BPF_MOV64_IMM(BPF_REG_3, 0),
3604 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3605 BPF_EXIT_INSN(),
3606 },
3607 .fixup_map2 = { 3 },
3608 .errstr = "R1 min value is outside of the array range",
3609 .result = REJECT,
3610 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3611 },
3612 {
3613 "helper access to adjusted map (via const imm): out-of-bound range",
3614 .insns = {
3615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3617 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3618 BPF_LD_MAP_FD(BPF_REG_1, 0),
3619 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3621 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3623 offsetof(struct test_val, foo)),
3624 BPF_MOV64_IMM(BPF_REG_2,
3625 sizeof(struct test_val) -
3626 offsetof(struct test_val, foo) + 8),
3627 BPF_MOV64_IMM(BPF_REG_3, 0),
3628 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3629 BPF_EXIT_INSN(),
3630 },
3631 .fixup_map2 = { 3 },
3632 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3633 .result = REJECT,
3634 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3635 },
3636 {
3637 "helper access to adjusted map (via const imm): negative range (> adjustment)",
3638 .insns = {
3639 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3641 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3642 BPF_LD_MAP_FD(BPF_REG_1, 0),
3643 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3647 offsetof(struct test_val, foo)),
3648 BPF_MOV64_IMM(BPF_REG_2, -8),
3649 BPF_MOV64_IMM(BPF_REG_3, 0),
3650 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3651 BPF_EXIT_INSN(),
3652 },
3653 .fixup_map2 = { 3 },
3654 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3655 .result = REJECT,
3656 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3657 },
3658 {
3659 "helper access to adjusted map (via const imm): negative range (< adjustment)",
3660 .insns = {
3661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3664 BPF_LD_MAP_FD(BPF_REG_1, 0),
3665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
3669 offsetof(struct test_val, foo)),
3670 BPF_MOV64_IMM(BPF_REG_2, -1),
3671 BPF_MOV64_IMM(BPF_REG_3, 0),
3672 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3673 BPF_EXIT_INSN(),
3674 },
3675 .fixup_map2 = { 3 },
3676 .errstr = "R1 min value is outside of the array range",
3677 .result = REJECT,
3678 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3679 },
3680 {
3681 "helper access to adjusted map (via const reg): full range",
3682 .insns = {
3683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3685 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3686 BPF_LD_MAP_FD(BPF_REG_1, 0),
3687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3690 BPF_MOV64_IMM(BPF_REG_3,
3691 offsetof(struct test_val, foo)),
3692 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3693 BPF_MOV64_IMM(BPF_REG_2,
3694 sizeof(struct test_val) -
3695 offsetof(struct test_val, foo)),
3696 BPF_MOV64_IMM(BPF_REG_3, 0),
3697 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3698 BPF_EXIT_INSN(),
3699 },
3700 .fixup_map2 = { 3 },
3701 .result = ACCEPT,
3702 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3703 },
3704 {
3705 "helper access to adjusted map (via const reg): partial range",
3706 .insns = {
3707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3709 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3710 BPF_LD_MAP_FD(BPF_REG_1, 0),
3711 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3714 BPF_MOV64_IMM(BPF_REG_3,
3715 offsetof(struct test_val, foo)),
3716 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3717 BPF_MOV64_IMM(BPF_REG_2, 8),
3718 BPF_MOV64_IMM(BPF_REG_3, 0),
3719 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3720 BPF_EXIT_INSN(),
3721 },
3722 .fixup_map2 = { 3 },
3723 .result = ACCEPT,
3724 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3725 },
3726 {
3727 "helper access to adjusted map (via const reg): empty range",
3728 .insns = {
3729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3731 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3732 BPF_LD_MAP_FD(BPF_REG_1, 0),
3733 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3736 BPF_MOV64_IMM(BPF_REG_3, 0),
3737 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3738 BPF_MOV64_IMM(BPF_REG_2, 0),
3739 BPF_MOV64_IMM(BPF_REG_3, 0),
3740 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3741 BPF_EXIT_INSN(),
3742 },
3743 .fixup_map2 = { 3 },
3744 .errstr = "R1 min value is outside of the array range",
3745 .result = REJECT,
3746 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3747 },
3748 {
3749 "helper access to adjusted map (via const reg): out-of-bound range",
3750 .insns = {
3751 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3752 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3753 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3754 BPF_LD_MAP_FD(BPF_REG_1, 0),
3755 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3758 BPF_MOV64_IMM(BPF_REG_3,
3759 offsetof(struct test_val, foo)),
3760 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3761 BPF_MOV64_IMM(BPF_REG_2,
3762 sizeof(struct test_val) -
3763 offsetof(struct test_val, foo) + 8),
3764 BPF_MOV64_IMM(BPF_REG_3, 0),
3765 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3766 BPF_EXIT_INSN(),
3767 },
3768 .fixup_map2 = { 3 },
3769 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
3770 .result = REJECT,
3771 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3772 },
3773 {
3774 "helper access to adjusted map (via const reg): negative range (> adjustment)",
3775 .insns = {
3776 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3778 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3779 BPF_LD_MAP_FD(BPF_REG_1, 0),
3780 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3782 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3783 BPF_MOV64_IMM(BPF_REG_3,
3784 offsetof(struct test_val, foo)),
3785 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3786 BPF_MOV64_IMM(BPF_REG_2, -8),
3787 BPF_MOV64_IMM(BPF_REG_3, 0),
3788 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3789 BPF_EXIT_INSN(),
3790 },
3791 .fixup_map2 = { 3 },
3792 .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
3793 .result = REJECT,
3794 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3795 },
3796 {
3797 "helper access to adjusted map (via const reg): negative range (< adjustment)",
3798 .insns = {
3799 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3801 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3802 BPF_LD_MAP_FD(BPF_REG_1, 0),
3803 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3806 BPF_MOV64_IMM(BPF_REG_3,
3807 offsetof(struct test_val, foo)),
3808 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3809 BPF_MOV64_IMM(BPF_REG_2, -1),
3810 BPF_MOV64_IMM(BPF_REG_3, 0),
3811 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3812 BPF_EXIT_INSN(),
3813 },
3814 .fixup_map2 = { 3 },
3815 .errstr = "R1 min value is outside of the array range",
3816 .result = REJECT,
3817 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3818 },
3819 {
3820 "helper access to adjusted map (via variable): full range",
3821 .insns = {
3822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3824 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3825 BPF_LD_MAP_FD(BPF_REG_1, 0),
3826 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3828 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3829 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3830 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3831 offsetof(struct test_val, foo), 4),
3832 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3833 BPF_MOV64_IMM(BPF_REG_2,
3834 sizeof(struct test_val) -
3835 offsetof(struct test_val, foo)),
3836 BPF_MOV64_IMM(BPF_REG_3, 0),
3837 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3838 BPF_EXIT_INSN(),
3839 },
3840 .fixup_map2 = { 3 },
3841 .result = ACCEPT,
3842 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3843 },
3844 {
3845 "helper access to adjusted map (via variable): partial range",
3846 .insns = {
3847 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3849 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3850 BPF_LD_MAP_FD(BPF_REG_1, 0),
3851 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3853 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3854 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3855 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3856 offsetof(struct test_val, foo), 4),
3857 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3858 BPF_MOV64_IMM(BPF_REG_2, 8),
3859 BPF_MOV64_IMM(BPF_REG_3, 0),
3860 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3861 BPF_EXIT_INSN(),
3862 },
3863 .fixup_map2 = { 3 },
3864 .result = ACCEPT,
3865 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3866 },
3867 {
3868 "helper access to adjusted map (via variable): empty range",
3869 .insns = {
3870 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3872 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3873 BPF_LD_MAP_FD(BPF_REG_1, 0),
3874 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3875 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3876 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3877 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3878 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3879 offsetof(struct test_val, foo), 4),
3880 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3881 BPF_MOV64_IMM(BPF_REG_2, 0),
3882 BPF_MOV64_IMM(BPF_REG_3, 0),
3883 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3884 BPF_EXIT_INSN(),
3885 },
3886 .fixup_map2 = { 3 },
3887 .errstr = "R1 min value is outside of the array range",
3888 .result = REJECT,
3889 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3890 },
3891 {
3892 "helper access to adjusted map (via variable): no max check",
3893 .insns = {
3894 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3896 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3897 BPF_LD_MAP_FD(BPF_REG_1, 0),
3898 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3899 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3901 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3902 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3903 BPF_MOV64_IMM(BPF_REG_2, 0),
3904 BPF_MOV64_IMM(BPF_REG_3, 0),
3905 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3906 BPF_EXIT_INSN(),
3907 },
3908 .fixup_map2 = { 3 },
3909 .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
3910 .result = REJECT,
3911 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3912 },
3913 {
3914 "helper access to adjusted map (via variable): wrong max check",
3915 .insns = {
3916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3918 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3919 BPF_LD_MAP_FD(BPF_REG_1, 0),
3920 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3922 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
3923 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
3924 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
3925 offsetof(struct test_val, foo), 4),
3926 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
3927 BPF_MOV64_IMM(BPF_REG_2,
3928 sizeof(struct test_val) -
3929 offsetof(struct test_val, foo) + 1),
3930 BPF_MOV64_IMM(BPF_REG_3, 0),
3931 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3932 BPF_EXIT_INSN(),
3933 },
3934 .fixup_map2 = { 3 },
3935 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
3936 .result = REJECT,
3937 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3938 },
3939 {
3940 "map element value is preserved across register spilling",
3941 .insns = {
3942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3944 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3945 BPF_LD_MAP_FD(BPF_REG_1, 0),
3946 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3947 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3948 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
3949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
3951 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3952 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
3953 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
3954 BPF_EXIT_INSN(),
3955 },
3956 .fixup_map2 = { 3 },
3957 .errstr_unpriv = "R0 leaks addr",
3958 .result = ACCEPT,
3959 .result_unpriv = REJECT,
3960 },
3961 {
3962 "map element value (adjusted) is preserved across register spilling",
3963 .insns = {
3964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3966 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
3967 BPF_LD_MAP_FD(BPF_REG_1, 0),
3968 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
3969 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
3971 offsetof(struct test_val, foo)),
3972 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
3973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
3975 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3976 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
3977 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
3978 BPF_EXIT_INSN(),
3979 },
3980 .fixup_map2 = { 3 },
3981 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3982 .result = ACCEPT,
3983 .result_unpriv = REJECT,
3984 },
3985 {
3986 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
3987 .insns = {
3988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
3990 BPF_MOV64_IMM(BPF_REG_0, 0),
3991 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
3992 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
3993 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
3994 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
3995 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
3996 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
3997 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
3998 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3999 BPF_MOV64_IMM(BPF_REG_2, 16),
4000 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4001 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4002 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4003 BPF_MOV64_IMM(BPF_REG_4, 0),
4004 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4005 BPF_MOV64_IMM(BPF_REG_3, 0),
4006 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4007 BPF_MOV64_IMM(BPF_REG_0, 0),
4008 BPF_EXIT_INSN(),
4009 },
4010 .result = ACCEPT,
4011 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4012 },
4013 {
4014 "helper access to variable memory: stack, bitwise AND, zero included",
4015 .insns = {
4016 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4018 BPF_MOV64_IMM(BPF_REG_2, 16),
4019 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4020 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4021 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4022 BPF_MOV64_IMM(BPF_REG_3, 0),
4023 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4024 BPF_EXIT_INSN(),
4025 },
4026 .errstr = "invalid stack type R1 off=-64 access_size=0",
4027 .result = REJECT,
4028 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4029 },
4030 {
4031 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
4032 .insns = {
4033 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4035 BPF_MOV64_IMM(BPF_REG_2, 16),
4036 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4037 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4038 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
4039 BPF_MOV64_IMM(BPF_REG_4, 0),
4040 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4041 BPF_MOV64_IMM(BPF_REG_3, 0),
4042 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4043 BPF_MOV64_IMM(BPF_REG_0, 0),
4044 BPF_EXIT_INSN(),
4045 },
4046 .errstr = "invalid stack type R1 off=-64 access_size=65",
4047 .result = REJECT,
4048 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4049 },
4050 {
4051 "helper access to variable memory: stack, JMP, correct bounds",
4052 .insns = {
4053 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4055 BPF_MOV64_IMM(BPF_REG_0, 0),
4056 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4058 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4061 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4062 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4063 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4064 BPF_MOV64_IMM(BPF_REG_2, 16),
4065 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4066 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4067 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
4068 BPF_MOV64_IMM(BPF_REG_4, 0),
4069 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4070 BPF_MOV64_IMM(BPF_REG_3, 0),
4071 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4072 BPF_MOV64_IMM(BPF_REG_0, 0),
4073 BPF_EXIT_INSN(),
4074 },
4075 .result = ACCEPT,
4076 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4077 },
4078 {
4079 "helper access to variable memory: stack, JMP (signed), correct bounds",
4080 .insns = {
4081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4083 BPF_MOV64_IMM(BPF_REG_0, 0),
4084 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4085 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4086 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4087 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4088 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4089 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4090 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4091 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4092 BPF_MOV64_IMM(BPF_REG_2, 16),
4093 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4094 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4095 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
4096 BPF_MOV64_IMM(BPF_REG_4, 0),
4097 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
4098 BPF_MOV64_IMM(BPF_REG_3, 0),
4099 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4100 BPF_MOV64_IMM(BPF_REG_0, 0),
4101 BPF_EXIT_INSN(),
4102 },
4103 .result = ACCEPT,
4104 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4105 },
4106 {
4107 "helper access to variable memory: stack, JMP, bounds + offset",
4108 .insns = {
4109 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4111 BPF_MOV64_IMM(BPF_REG_2, 16),
4112 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4113 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4114 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
4115 BPF_MOV64_IMM(BPF_REG_4, 0),
4116 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4118 BPF_MOV64_IMM(BPF_REG_3, 0),
4119 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4120 BPF_MOV64_IMM(BPF_REG_0, 0),
4121 BPF_EXIT_INSN(),
4122 },
4123 .errstr = "invalid stack type R1 off=-64 access_size=65",
4124 .result = REJECT,
4125 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4126 },
4127 {
4128 "helper access to variable memory: stack, JMP, wrong max",
4129 .insns = {
4130 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4131 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4132 BPF_MOV64_IMM(BPF_REG_2, 16),
4133 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4134 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4135 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
4136 BPF_MOV64_IMM(BPF_REG_4, 0),
4137 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4138 BPF_MOV64_IMM(BPF_REG_3, 0),
4139 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4140 BPF_MOV64_IMM(BPF_REG_0, 0),
4141 BPF_EXIT_INSN(),
4142 },
4143 .errstr = "invalid stack type R1 off=-64 access_size=65",
4144 .result = REJECT,
4145 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4146 },
4147 {
4148 "helper access to variable memory: stack, JMP, no max check",
4149 .insns = {
4150 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4152 BPF_MOV64_IMM(BPF_REG_2, 16),
4153 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4154 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4155 BPF_MOV64_IMM(BPF_REG_4, 0),
4156 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4157 BPF_MOV64_IMM(BPF_REG_3, 0),
4158 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4159 BPF_MOV64_IMM(BPF_REG_0, 0),
4160 BPF_EXIT_INSN(),
4161 },
4162 .errstr = "R2 unbounded memory access",
4163 .result = REJECT,
4164 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4165 },
4166 {
4167 "helper access to variable memory: stack, JMP, no min check",
4168 .insns = {
4169 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4171 BPF_MOV64_IMM(BPF_REG_2, 16),
4172 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4173 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4174 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
4175 BPF_MOV64_IMM(BPF_REG_3, 0),
4176 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4177 BPF_MOV64_IMM(BPF_REG_0, 0),
4178 BPF_EXIT_INSN(),
4179 },
4180 .errstr = "invalid stack type R1 off=-64 access_size=0",
4181 .result = REJECT,
4182 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4183 },
4184 {
4185 "helper access to variable memory: stack, JMP (signed), no min check",
4186 .insns = {
4187 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4189 BPF_MOV64_IMM(BPF_REG_2, 16),
4190 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
4191 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
4192 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
4193 BPF_MOV64_IMM(BPF_REG_3, 0),
4194 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4195 BPF_MOV64_IMM(BPF_REG_0, 0),
4196 BPF_EXIT_INSN(),
4197 },
4198 .errstr = "R2 min value is negative",
4199 .result = REJECT,
4200 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4201 },
4202 {
4203 "helper access to variable memory: map, JMP, correct bounds",
4204 .insns = {
4205 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4207 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4208 BPF_LD_MAP_FD(BPF_REG_1, 0),
4209 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4210 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4212 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4213 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4214 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4215 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4216 sizeof(struct test_val), 4),
4217 BPF_MOV64_IMM(BPF_REG_4, 0),
4218 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4219 BPF_MOV64_IMM(BPF_REG_3, 0),
4220 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4221 BPF_MOV64_IMM(BPF_REG_0, 0),
4222 BPF_EXIT_INSN(),
4223 },
4224 .fixup_map2 = { 3 },
4225 .result = ACCEPT,
4226 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4227 },
4228 {
4229 "helper access to variable memory: map, JMP, wrong max",
4230 .insns = {
4231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4233 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4234 BPF_LD_MAP_FD(BPF_REG_1, 0),
4235 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4236 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4237 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4238 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4239 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4240 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4241 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4242 sizeof(struct test_val) + 1, 4),
4243 BPF_MOV64_IMM(BPF_REG_4, 0),
4244 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4245 BPF_MOV64_IMM(BPF_REG_3, 0),
4246 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4247 BPF_MOV64_IMM(BPF_REG_0, 0),
4248 BPF_EXIT_INSN(),
4249 },
4250 .fixup_map2 = { 3 },
4251 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
4252 .result = REJECT,
4253 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4254 },
4255 {
4256 "helper access to variable memory: map adjusted, JMP, correct bounds",
4257 .insns = {
4258 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4260 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4261 BPF_LD_MAP_FD(BPF_REG_1, 0),
4262 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4266 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4267 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4268 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4269 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4270 sizeof(struct test_val) - 20, 4),
4271 BPF_MOV64_IMM(BPF_REG_4, 0),
4272 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4273 BPF_MOV64_IMM(BPF_REG_3, 0),
4274 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4275 BPF_MOV64_IMM(BPF_REG_0, 0),
4276 BPF_EXIT_INSN(),
4277 },
4278 .fixup_map2 = { 3 },
4279 .result = ACCEPT,
4280 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4281 },
4282 {
4283 "helper access to variable memory: map adjusted, JMP, wrong max",
4284 .insns = {
4285 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4287 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4288 BPF_LD_MAP_FD(BPF_REG_1, 0),
4289 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4290 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
4293 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4294 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4295 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4296 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
4297 sizeof(struct test_val) - 19, 4),
4298 BPF_MOV64_IMM(BPF_REG_4, 0),
4299 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
4300 BPF_MOV64_IMM(BPF_REG_3, 0),
4301 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4302 BPF_MOV64_IMM(BPF_REG_0, 0),
4303 BPF_EXIT_INSN(),
4304 },
4305 .fixup_map2 = { 3 },
4306 .errstr = "R1 min value is outside of the array range",
4307 .result = REJECT,
4308 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4309 },
4310 {
4311 "helper access to variable memory: size > 0 not allowed on NULL",
4312 .insns = {
4313 BPF_MOV64_IMM(BPF_REG_1, 0),
4314 BPF_MOV64_IMM(BPF_REG_2, 0),
4315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4316 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4317 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
4318 BPF_MOV64_IMM(BPF_REG_3, 0),
4319 BPF_MOV64_IMM(BPF_REG_4, 0),
4320 BPF_MOV64_IMM(BPF_REG_5, 0),
4321 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4322 BPF_EXIT_INSN(),
4323 },
4324 .errstr = "R1 type=imm expected=fp",
4325 .result = REJECT,
4326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4327 },
4328 {
4329 "helper access to variable memory: size = 0 not allowed on != NULL",
4330 .insns = {
4331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
4333 BPF_MOV64_IMM(BPF_REG_2, 0),
4334 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
4335 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
4336 BPF_MOV64_IMM(BPF_REG_3, 0),
4337 BPF_MOV64_IMM(BPF_REG_4, 0),
4338 BPF_MOV64_IMM(BPF_REG_5, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
4340 BPF_EXIT_INSN(),
4341 },
4342 .errstr = "invalid stack type R1 off=-8 access_size=0",
4343 .result = REJECT,
4344 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4345 },
4346 {
4347 "helper access to variable memory: 8 bytes leak",
4348 .insns = {
4349 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4351 BPF_MOV64_IMM(BPF_REG_0, 0),
4352 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4354 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4355 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4356 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4357 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4359 BPF_MOV64_IMM(BPF_REG_2, 0),
4360 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
4361 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
4362 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
4363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4364 BPF_MOV64_IMM(BPF_REG_3, 0),
4365 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4366 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4367 BPF_EXIT_INSN(),
4368 },
4369 .errstr = "invalid indirect read from stack off -64+32 size 64",
4370 .result = REJECT,
4371 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4372 },
4373 {
4374 "helper access to variable memory: 8 bytes no leak (init memory)",
4375 .insns = {
4376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4377 BPF_MOV64_IMM(BPF_REG_0, 0),
4378 BPF_MOV64_IMM(BPF_REG_0, 0),
4379 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
4380 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
4381 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
4382 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
4383 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
4384 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
4385 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
4386 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
4388 BPF_MOV64_IMM(BPF_REG_2, 0),
4389 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
4390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
4391 BPF_MOV64_IMM(BPF_REG_3, 0),
4392 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4393 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
4394 BPF_EXIT_INSN(),
4395 },
4396 .result = ACCEPT,
4397 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4398 },
4399 {
4400 "invalid and of negative number",
4401 .insns = {
4402 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4403 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4405 BPF_LD_MAP_FD(BPF_REG_1, 0),
4406 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4407 BPF_FUNC_map_lookup_elem),
4408 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4409 BPF_MOV64_IMM(BPF_REG_1, 6),
4410 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
4411 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4412 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4413 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4414 offsetof(struct test_val, foo)),
4415 BPF_EXIT_INSN(),
4416 },
4417 .fixup_map2 = { 3 },
4418 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4419 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4420 .result = REJECT,
4421 .result_unpriv = REJECT,
4422 },
4423 {
4424 "invalid range check",
4425 .insns = {
4426 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4427 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4429 BPF_LD_MAP_FD(BPF_REG_1, 0),
4430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4431 BPF_FUNC_map_lookup_elem),
4432 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
4433 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4434 BPF_MOV64_IMM(BPF_REG_9, 1),
4435 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
4436 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
4437 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
4438 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
4439 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
4440 BPF_MOV32_IMM(BPF_REG_3, 1),
4441 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
4442 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
4443 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
4444 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
4445 BPF_MOV64_REG(BPF_REG_0, 0),
4446 BPF_EXIT_INSN(),
4447 },
4448 .fixup_map2 = { 3 },
4449 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4450 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4451 .result = REJECT,
4452 .result_unpriv = REJECT,
4453 }
2908}; 4454};
2909 4455
2910static int probe_filter_length(const struct bpf_insn *fp) 4456static int probe_filter_length(const struct bpf_insn *fp)
@@ -2921,7 +4467,7 @@ static int create_map(uint32_t size_value, uint32_t max_elem)
2921{ 4467{
2922 int fd; 4468 int fd;
2923 4469
2924 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long), 4470 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
2925 size_value, max_elem, BPF_F_NO_PREALLOC); 4471 size_value, max_elem, BPF_F_NO_PREALLOC);
2926 if (fd < 0) 4472 if (fd < 0)
2927 printf("Failed to create hash map '%s'!\n", strerror(errno)); 4473 printf("Failed to create hash map '%s'!\n", strerror(errno));
@@ -2933,7 +4479,7 @@ static int create_prog_array(void)
2933{ 4479{
2934 int fd; 4480 int fd;
2935 4481
2936 fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 4482 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
2937 sizeof(int), 4, 0); 4483 sizeof(int), 4, 0);
2938 if (fd < 0) 4484 if (fd < 0)
2939 printf("Failed to create prog array '%s'!\n", strerror(errno)); 4485 printf("Failed to create prog array '%s'!\n", strerror(errno));
@@ -2991,9 +4537,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
2991 4537
2992 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); 4538 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
2993 4539
2994 fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 4540 fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
2995 prog, prog_len * sizeof(struct bpf_insn), 4541 prog, prog_len, "GPL", 0, bpf_vlog,
2996 "GPL", bpf_vlog, sizeof(bpf_vlog)); 4542 sizeof(bpf_vlog));
2997 4543
2998 expected_ret = unpriv && test->result_unpriv != UNDEF ? 4544 expected_ret = unpriv && test->result_unpriv != UNDEF ?
2999 test->result_unpriv : test->result; 4545 test->result_unpriv : test->result;
@@ -3031,6 +4577,55 @@ fail_log:
3031 goto close_fds; 4577 goto close_fds;
3032} 4578}
3033 4579
4580static bool is_admin(void)
4581{
4582 cap_t caps;
4583 cap_flag_value_t sysadmin = CAP_CLEAR;
4584 const cap_value_t cap_val = CAP_SYS_ADMIN;
4585
4586 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
4587 perror("cap_get_flag");
4588 return false;
4589 }
4590 caps = cap_get_proc();
4591 if (!caps) {
4592 perror("cap_get_proc");
4593 return false;
4594 }
4595 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
4596 perror("cap_get_flag");
4597 if (cap_free(caps))
4598 perror("cap_free");
4599 return (sysadmin == CAP_SET);
4600}
4601
4602static int set_admin(bool admin)
4603{
4604 cap_t caps;
4605 const cap_value_t cap_val = CAP_SYS_ADMIN;
4606 int ret = -1;
4607
4608 caps = cap_get_proc();
4609 if (!caps) {
4610 perror("cap_get_proc");
4611 return -1;
4612 }
4613 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
4614 admin ? CAP_SET : CAP_CLEAR)) {
4615 perror("cap_set_flag");
4616 goto out;
4617 }
4618 if (cap_set_proc(caps)) {
4619 perror("cap_set_proc");
4620 goto out;
4621 }
4622 ret = 0;
4623out:
4624 if (cap_free(caps))
4625 perror("cap_free");
4626 return ret;
4627}
4628
3034static int do_test(bool unpriv, unsigned int from, unsigned int to) 4629static int do_test(bool unpriv, unsigned int from, unsigned int to)
3035{ 4630{
3036 int i, passes = 0, errors = 0; 4631 int i, passes = 0, errors = 0;
@@ -3041,11 +4636,19 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
3041 /* Program types that are not supported by non-root we 4636 /* Program types that are not supported by non-root we
3042 * skip right away. 4637 * skip right away.
3043 */ 4638 */
3044 if (unpriv && test->prog_type) 4639 if (!test->prog_type) {
3045 continue; 4640 if (!unpriv)
4641 set_admin(false);
4642 printf("#%d/u %s ", i, test->descr);
4643 do_test_single(test, true, &passes, &errors);
4644 if (!unpriv)
4645 set_admin(true);
4646 }
3046 4647
3047 printf("#%d %s ", i, test->descr); 4648 if (!unpriv) {
3048 do_test_single(test, unpriv, &passes, &errors); 4649 printf("#%d/p %s ", i, test->descr);
4650 do_test_single(test, false, &passes, &errors);
4651 }
3049 } 4652 }
3050 4653
3051 printf("Summary: %d PASSED, %d FAILED\n", passes, errors); 4654 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
@@ -3057,7 +4660,7 @@ int main(int argc, char **argv)
3057 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 4660 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
3058 struct rlimit rlim = { 1 << 20, 1 << 20 }; 4661 struct rlimit rlim = { 1 << 20, 1 << 20 };
3059 unsigned int from = 0, to = ARRAY_SIZE(tests); 4662 unsigned int from = 0, to = ARRAY_SIZE(tests);
3060 bool unpriv = geteuid() != 0; 4663 bool unpriv = !is_admin();
3061 4664
3062 if (argc == 3) { 4665 if (argc == 3) {
3063 unsigned int l = atoi(argv[argc - 2]); 4666 unsigned int l = atoi(argv[argc - 2]);
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 24bc7ec1be7d..a77da88bf946 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -40,14 +40,39 @@
40 40
41static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) 41static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
42{ 42{
43 /* the filter below checks for all of the following conditions that
44 * are based on the contents of create_payload()
45 * ether type 0x800 and
46 * ip proto udp and
47 * skb->len == DATA_LEN and
48 * udp[38] == 'a' or udp[38] == 'b'
49 * It can be generated from the following bpf_asm input:
50 * ldh [12]
51 * jne #0x800, drop ; ETH_P_IP
52 * ldb [23]
53 * jneq #17, drop ; IPPROTO_UDP
54 * ld len ; ld skb->len
55 * jlt #100, drop ; DATA_LEN
56 * ldb [80]
57 * jeq #97, pass ; DATA_CHAR
58 * jne #98, drop ; DATA_CHAR_1
59 * pass:
60 * ret #-1
61 * drop:
62 * ret #0
63 */
43 struct sock_filter bpf_filter[] = { 64 struct sock_filter bpf_filter[] = {
44 { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */ 65 { 0x28, 0, 0, 0x0000000c },
45 { 0x35, 0, 4, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/ 66 { 0x15, 0, 8, 0x00000800 },
46 { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */ 67 { 0x30, 0, 0, 0x00000017 },
47 { 0x15, 1, 0, DATA_CHAR }, /* JEQ DATA_CHAR [t goto match]*/ 68 { 0x15, 0, 6, 0x00000011 },
48 { 0x15, 0, 1, DATA_CHAR_1}, /* JEQ DATA_CHAR_1 [t goto match]*/ 69 { 0x80, 0, 0, 0000000000 },
49 { 0x06, 0, 0, 0x00000060 }, /* RET match */ 70 { 0x35, 0, 4, 0x00000064 },
50 { 0x06, 0, 0, 0x00000000 }, /* RET no match */ 71 { 0x30, 0, 0, 0x00000050 },
72 { 0x15, 1, 0, 0x00000061 },
73 { 0x15, 0, 1, 0x00000062 },
74 { 0x06, 0, 0, 0xffffffff },
75 { 0x06, 0, 0, 0000000000 },
51 }; 76 };
52 struct sock_fprog bpf_prog; 77 struct sock_fprog bpf_prog;
53 78
diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c
index 24adf709bd9d..7f6cd9fdacf3 100644
--- a/tools/testing/selftests/net/psock_tpacket.c
+++ b/tools/testing/selftests/net/psock_tpacket.c
@@ -110,7 +110,7 @@ static unsigned int total_packets, total_bytes;
110 110
111static int pfsocket(int ver) 111static int pfsocket(int ver)
112{ 112{
113 int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); 113 int ret, sock = socket(PF_PACKET, SOCK_RAW, 0);
114 if (sock == -1) { 114 if (sock == -1) {
115 perror("socket"); 115 perror("socket");
116 exit(1); 116 exit(1);
@@ -239,7 +239,6 @@ static void walk_v1_v2_rx(int sock, struct ring *ring)
239 bug_on(ring->type != PACKET_RX_RING); 239 bug_on(ring->type != PACKET_RX_RING);
240 240
241 pair_udp_open(udp_sock, PORT_BASE); 241 pair_udp_open(udp_sock, PORT_BASE);
242 pair_udp_setfilter(sock);
243 242
244 memset(&pfd, 0, sizeof(pfd)); 243 memset(&pfd, 0, sizeof(pfd));
245 pfd.fd = sock; 244 pfd.fd = sock;
@@ -311,20 +310,33 @@ static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr)
311 __sync_synchronize(); 310 __sync_synchronize();
312} 311}
313 312
314static inline int __v1_v2_tx_kernel_ready(void *base, int version) 313static inline int __v3_tx_kernel_ready(struct tpacket3_hdr *hdr)
314{
315 return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
316}
317
318static inline void __v3_tx_user_ready(struct tpacket3_hdr *hdr)
319{
320 hdr->tp_status = TP_STATUS_SEND_REQUEST;
321 __sync_synchronize();
322}
323
324static inline int __tx_kernel_ready(void *base, int version)
315{ 325{
316 switch (version) { 326 switch (version) {
317 case TPACKET_V1: 327 case TPACKET_V1:
318 return __v1_tx_kernel_ready(base); 328 return __v1_tx_kernel_ready(base);
319 case TPACKET_V2: 329 case TPACKET_V2:
320 return __v2_tx_kernel_ready(base); 330 return __v2_tx_kernel_ready(base);
331 case TPACKET_V3:
332 return __v3_tx_kernel_ready(base);
321 default: 333 default:
322 bug_on(1); 334 bug_on(1);
323 return 0; 335 return 0;
324 } 336 }
325} 337}
326 338
327static inline void __v1_v2_tx_user_ready(void *base, int version) 339static inline void __tx_user_ready(void *base, int version)
328{ 340{
329 switch (version) { 341 switch (version) {
330 case TPACKET_V1: 342 case TPACKET_V1:
@@ -333,6 +345,9 @@ static inline void __v1_v2_tx_user_ready(void *base, int version)
333 case TPACKET_V2: 345 case TPACKET_V2:
334 __v2_tx_user_ready(base); 346 __v2_tx_user_ready(base);
335 break; 347 break;
348 case TPACKET_V3:
349 __v3_tx_user_ready(base);
350 break;
336 } 351 }
337} 352}
338 353
@@ -348,7 +363,22 @@ static void __v1_v2_set_packet_loss_discard(int sock)
348 } 363 }
349} 364}
350 365
351static void walk_v1_v2_tx(int sock, struct ring *ring) 366static inline void *get_next_frame(struct ring *ring, int n)
367{
368 uint8_t *f0 = ring->rd[0].iov_base;
369
370 switch (ring->version) {
371 case TPACKET_V1:
372 case TPACKET_V2:
373 return ring->rd[n].iov_base;
374 case TPACKET_V3:
375 return f0 + (n * ring->req3.tp_frame_size);
376 default:
377 bug_on(1);
378 }
379}
380
381static void walk_tx(int sock, struct ring *ring)
352{ 382{
353 struct pollfd pfd; 383 struct pollfd pfd;
354 int rcv_sock, ret; 384 int rcv_sock, ret;
@@ -360,9 +390,19 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
360 .sll_family = PF_PACKET, 390 .sll_family = PF_PACKET,
361 .sll_halen = ETH_ALEN, 391 .sll_halen = ETH_ALEN,
362 }; 392 };
393 int nframes;
394
395 /* TPACKET_V{1,2} sets up the ring->rd* related variables based
396 * on frames (e.g., rd_num is tp_frame_nr) whereas V3 sets these
397 * up based on blocks (e.g, rd_num is tp_block_nr)
398 */
399 if (ring->version <= TPACKET_V2)
400 nframes = ring->rd_num;
401 else
402 nframes = ring->req3.tp_frame_nr;
363 403
364 bug_on(ring->type != PACKET_TX_RING); 404 bug_on(ring->type != PACKET_TX_RING);
365 bug_on(ring->rd_num < NUM_PACKETS); 405 bug_on(nframes < NUM_PACKETS);
366 406
367 rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); 407 rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
368 if (rcv_sock == -1) { 408 if (rcv_sock == -1) {
@@ -388,10 +428,11 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
388 create_payload(packet, &packet_len); 428 create_payload(packet, &packet_len);
389 429
390 while (total_packets > 0) { 430 while (total_packets > 0) {
391 while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base, 431 void *next = get_next_frame(ring, frame_num);
392 ring->version) && 432
433 while (__tx_kernel_ready(next, ring->version) &&
393 total_packets > 0) { 434 total_packets > 0) {
394 ppd.raw = ring->rd[frame_num].iov_base; 435 ppd.raw = next;
395 436
396 switch (ring->version) { 437 switch (ring->version) {
397 case TPACKET_V1: 438 case TPACKET_V1:
@@ -413,14 +454,27 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
413 packet_len); 454 packet_len);
414 total_bytes += ppd.v2->tp_h.tp_snaplen; 455 total_bytes += ppd.v2->tp_h.tp_snaplen;
415 break; 456 break;
457 case TPACKET_V3: {
458 struct tpacket3_hdr *tx = next;
459
460 tx->tp_snaplen = packet_len;
461 tx->tp_len = packet_len;
462 tx->tp_next_offset = 0;
463
464 memcpy((uint8_t *)tx + TPACKET3_HDRLEN -
465 sizeof(struct sockaddr_ll), packet,
466 packet_len);
467 total_bytes += tx->tp_snaplen;
468 break;
469 }
416 } 470 }
417 471
418 status_bar_update(); 472 status_bar_update();
419 total_packets--; 473 total_packets--;
420 474
421 __v1_v2_tx_user_ready(ppd.raw, ring->version); 475 __tx_user_ready(next, ring->version);
422 476
423 frame_num = (frame_num + 1) % ring->rd_num; 477 frame_num = (frame_num + 1) % nframes;
424 } 478 }
425 479
426 poll(&pfd, 1, 1); 480 poll(&pfd, 1, 1);
@@ -460,7 +514,7 @@ static void walk_v1_v2(int sock, struct ring *ring)
460 if (ring->type == PACKET_RX_RING) 514 if (ring->type == PACKET_RX_RING)
461 walk_v1_v2_rx(sock, ring); 515 walk_v1_v2_rx(sock, ring);
462 else 516 else
463 walk_v1_v2_tx(sock, ring); 517 walk_tx(sock, ring);
464} 518}
465 519
466static uint64_t __v3_prev_block_seq_num = 0; 520static uint64_t __v3_prev_block_seq_num = 0;
@@ -546,7 +600,6 @@ static void walk_v3_rx(int sock, struct ring *ring)
546 bug_on(ring->type != PACKET_RX_RING); 600 bug_on(ring->type != PACKET_RX_RING);
547 601
548 pair_udp_open(udp_sock, PORT_BASE); 602 pair_udp_open(udp_sock, PORT_BASE);
549 pair_udp_setfilter(sock);
550 603
551 memset(&pfd, 0, sizeof(pfd)); 604 memset(&pfd, 0, sizeof(pfd));
552 pfd.fd = sock; 605 pfd.fd = sock;
@@ -583,7 +636,7 @@ static void walk_v3(int sock, struct ring *ring)
583 if (ring->type == PACKET_RX_RING) 636 if (ring->type == PACKET_RX_RING)
584 walk_v3_rx(sock, ring); 637 walk_v3_rx(sock, ring);
585 else 638 else
586 bug_on(1); 639 walk_tx(sock, ring);
587} 640}
588 641
589static void __v1_v2_fill(struct ring *ring, unsigned int blocks) 642static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
@@ -602,12 +655,13 @@ static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
602 ring->flen = ring->req.tp_frame_size; 655 ring->flen = ring->req.tp_frame_size;
603} 656}
604 657
605static void __v3_fill(struct ring *ring, unsigned int blocks) 658static void __v3_fill(struct ring *ring, unsigned int blocks, int type)
606{ 659{
607 ring->req3.tp_retire_blk_tov = 64; 660 if (type == PACKET_RX_RING) {
608 ring->req3.tp_sizeof_priv = 0; 661 ring->req3.tp_retire_blk_tov = 64;
609 ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; 662 ring->req3.tp_sizeof_priv = 0;
610 663 ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
664 }
611 ring->req3.tp_block_size = getpagesize() << 2; 665 ring->req3.tp_block_size = getpagesize() << 2;
612 ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7; 666 ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7;
613 ring->req3.tp_block_nr = blocks; 667 ring->req3.tp_block_nr = blocks;
@@ -641,7 +695,7 @@ static void setup_ring(int sock, struct ring *ring, int version, int type)
641 break; 695 break;
642 696
643 case TPACKET_V3: 697 case TPACKET_V3:
644 __v3_fill(ring, blocks); 698 __v3_fill(ring, blocks, type);
645 ret = setsockopt(sock, SOL_PACKET, type, &ring->req3, 699 ret = setsockopt(sock, SOL_PACKET, type, &ring->req3,
646 sizeof(ring->req3)); 700 sizeof(ring->req3));
647 break; 701 break;
@@ -685,6 +739,8 @@ static void bind_ring(int sock, struct ring *ring)
685{ 739{
686 int ret; 740 int ret;
687 741
742 pair_udp_setfilter(sock);
743
688 ring->ll.sll_family = PF_PACKET; 744 ring->ll.sll_family = PF_PACKET;
689 ring->ll.sll_protocol = htons(ETH_P_ALL); 745 ring->ll.sll_protocol = htons(ETH_P_ALL);
690 ring->ll.sll_ifindex = if_nametoindex("lo"); 746 ring->ll.sll_ifindex = if_nametoindex("lo");
@@ -796,6 +852,7 @@ int main(void)
796 ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING); 852 ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING);
797 853
798 ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING); 854 ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING);
855 ret |= test_tpacket(TPACKET_V3, PACKET_TX_RING);
799 856
800 if (ret) 857 if (ret)
801 return 1; 858 return 1;