summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2019-04-26 19:39:44 -0400
committerAlexei Starovoitov <ast@kernel.org>2019-04-27 12:07:05 -0400
commita19f89f3667c950ad13c1560e4abd8aa8526b6b1 (patch)
treee4c776cef6196ebc9599dc4939529453ff028bba
parent948d930e3d531e81dc6a2c864bda25618dfe7ff0 (diff)
bpf: Support BPF_MAP_TYPE_SK_STORAGE in bpf map probing
This patch supports probing for the new BPF_MAP_TYPE_SK_STORAGE. BPF_MAP_TYPE_SK_STORAGE enforces BTF usage, so the new probe requires to create and load a BTF also. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--tools/bpf/bpftool/map.c1
-rw-r--r--tools/lib/bpf/libbpf_probes.c74
2 files changed, 74 insertions, 1 deletions
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e6dcb3653a77..e951d45c0131 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -46,6 +46,7 @@ const char * const map_type_name[] = {
46 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage", 46 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
47 [BPF_MAP_TYPE_QUEUE] = "queue", 47 [BPF_MAP_TYPE_QUEUE] = "queue",
48 [BPF_MAP_TYPE_STACK] = "stack", 48 [BPF_MAP_TYPE_STACK] = "stack",
49 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
49}; 50};
50 51
51const size_t map_type_name_size = ARRAY_SIZE(map_type_name); 52const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 80ee922f290c..a2c64a9ce1a6 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -9,6 +9,7 @@
9#include <net/if.h> 9#include <net/if.h>
10#include <sys/utsname.h> 10#include <sys/utsname.h>
11 11
12#include <linux/btf.h>
12#include <linux/filter.h> 13#include <linux/filter.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14 15
@@ -131,11 +132,65 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
131 return errno != EINVAL && errno != EOPNOTSUPP; 132 return errno != EINVAL && errno != EOPNOTSUPP;
132} 133}
133 134
135static int load_btf(void)
136{
137#define BTF_INFO_ENC(kind, kind_flag, vlen) \
138 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
139#define BTF_TYPE_ENC(name, info, size_or_type) \
140 (name), (info), (size_or_type)
141#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
142 ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
143#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
144 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
145 BTF_INT_ENC(encoding, bits_offset, bits)
146#define BTF_MEMBER_ENC(name, type, bits_offset) \
147 (name), (type), (bits_offset)
148
149 const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
150 /* struct bpf_spin_lock {
151 * int val;
152 * };
153 * struct val {
154 * int cnt;
155 * struct bpf_spin_lock l;
156 * };
157 */
158 __u32 btf_raw_types[] = {
159 /* int */
160 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
161 /* struct bpf_spin_lock */ /* [2] */
162 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
163 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
164 /* struct val */ /* [3] */
165 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
166 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
167 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
168 };
169 struct btf_header btf_hdr = {
170 .magic = BTF_MAGIC,
171 .version = BTF_VERSION,
172 .hdr_len = sizeof(struct btf_header),
173 .type_len = sizeof(btf_raw_types),
174 .str_off = sizeof(btf_raw_types),
175 .str_len = sizeof(btf_str_sec),
176 };
177 __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
178 sizeof(btf_str_sec)];
179
180 memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
181 memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
182 memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
183 btf_str_sec, sizeof(btf_str_sec));
184
185 return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
186}
187
134bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) 188bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
135{ 189{
136 int key_size, value_size, max_entries, map_flags; 190 int key_size, value_size, max_entries, map_flags;
191 __u32 btf_key_type_id = 0, btf_value_type_id = 0;
137 struct bpf_create_map_attr attr = {}; 192 struct bpf_create_map_attr attr = {};
138 int fd = -1, fd_inner; 193 int fd = -1, btf_fd = -1, fd_inner;
139 194
140 key_size = sizeof(__u32); 195 key_size = sizeof(__u32);
141 value_size = sizeof(__u32); 196 value_size = sizeof(__u32);
@@ -161,6 +216,16 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
161 case BPF_MAP_TYPE_STACK: 216 case BPF_MAP_TYPE_STACK:
162 key_size = 0; 217 key_size = 0;
163 break; 218 break;
219 case BPF_MAP_TYPE_SK_STORAGE:
220 btf_key_type_id = 1;
221 btf_value_type_id = 3;
222 value_size = 8;
223 max_entries = 0;
224 map_flags = BPF_F_NO_PREALLOC;
225 btf_fd = load_btf();
226 if (btf_fd < 0)
227 return false;
228 break;
164 case BPF_MAP_TYPE_UNSPEC: 229 case BPF_MAP_TYPE_UNSPEC:
165 case BPF_MAP_TYPE_HASH: 230 case BPF_MAP_TYPE_HASH:
166 case BPF_MAP_TYPE_ARRAY: 231 case BPF_MAP_TYPE_ARRAY:
@@ -206,11 +271,18 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
206 attr.max_entries = max_entries; 271 attr.max_entries = max_entries;
207 attr.map_flags = map_flags; 272 attr.map_flags = map_flags;
208 attr.map_ifindex = ifindex; 273 attr.map_ifindex = ifindex;
274 if (btf_fd >= 0) {
275 attr.btf_fd = btf_fd;
276 attr.btf_key_type_id = btf_key_type_id;
277 attr.btf_value_type_id = btf_value_type_id;
278 }
209 279
210 fd = bpf_create_map_xattr(&attr); 280 fd = bpf_create_map_xattr(&attr);
211 } 281 }
212 if (fd >= 0) 282 if (fd >= 0)
213 close(fd); 283 close(fd);
284 if (btf_fd >= 0)
285 close(btf_fd);
214 286
215 return fd >= 0; 287 return fd >= 0;
216} 288}