diff options
author | Jakub Kicinski <jakub.kicinski@netronome.com> | 2018-01-11 23:29:12 -0500 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2018-01-14 17:36:30 -0500 |
commit | ff3d43f7568c82b335d7df2d40a31447c3fce10c (patch) | |
tree | 85ebe3e5875454a89cec26315b838f8dd7ea3f40 /drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | |
parent | d48ae231c5e13d98e3664443c6342c2011f5df2b (diff) |
nfp: bpf: implement helpers for FW map ops
Implement calls for FW map communication.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/bpf/cmsg.c')
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | 210 |
1 files changed, 209 insertions, 1 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 46753ee9f7c5..71e6586acc36 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/bpf.h> | ||
34 | #include <linux/bitops.h> | 35 | #include <linux/bitops.h> |
35 | #include <linux/bug.h> | 36 | #include <linux/bug.h> |
36 | #include <linux/jiffies.h> | 37 | #include <linux/jiffies.h> |
@@ -79,6 +80,28 @@ static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag) | |||
79 | bpf->tag_alloc_last++; | 80 | bpf->tag_alloc_last++; |
80 | } | 81 | } |
81 | 82 | ||
83 | static struct sk_buff * | ||
84 | nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) | ||
85 | { | ||
86 | struct sk_buff *skb; | ||
87 | |||
88 | skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL); | ||
89 | skb_put(skb, size); | ||
90 | |||
91 | return skb; | ||
92 | } | ||
93 | |||
94 | static struct sk_buff * | ||
95 | nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) | ||
96 | { | ||
97 | unsigned int size; | ||
98 | |||
99 | size = sizeof(struct cmsg_req_map_op); | ||
100 | size += sizeof(struct cmsg_key_value_pair) * n; | ||
101 | |||
102 | return nfp_bpf_cmsg_alloc(bpf, size); | ||
103 | } | ||
104 | |||
82 | static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) | 105 | static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) |
83 | { | 106 | { |
84 | struct cmsg_hdr *hdr; | 107 | struct cmsg_hdr *hdr; |
@@ -159,7 +182,7 @@ nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type, | |||
159 | return skb; | 182 | return skb; |
160 | } | 183 | } |
161 | 184 | ||
162 | struct sk_buff * | 185 | static struct sk_buff * |
163 | nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb, | 186 | nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb, |
164 | enum nfp_bpf_cmsg_type type, unsigned int reply_size) | 187 | enum nfp_bpf_cmsg_type type, unsigned int reply_size) |
165 | { | 188 | { |
@@ -206,6 +229,191 @@ err_free: | |||
206 | return ERR_PTR(-EIO); | 229 | return ERR_PTR(-EIO); |
207 | } | 230 | } |
208 | 231 | ||
232 | static int | ||
233 | nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf, | ||
234 | struct cmsg_reply_map_simple *reply) | ||
235 | { | ||
236 | static const int res_table[] = { | ||
237 | [CMSG_RC_SUCCESS] = 0, | ||
238 | [CMSG_RC_ERR_MAP_FD] = -EBADFD, | ||
239 | [CMSG_RC_ERR_MAP_NOENT] = -ENOENT, | ||
240 | [CMSG_RC_ERR_MAP_ERR] = -EINVAL, | ||
241 | [CMSG_RC_ERR_MAP_PARSE] = -EIO, | ||
242 | [CMSG_RC_ERR_MAP_EXIST] = -EEXIST, | ||
243 | [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM, | ||
244 | [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG, | ||
245 | }; | ||
246 | u32 rc; | ||
247 | |||
248 | rc = be32_to_cpu(reply->rc); | ||
249 | if (rc >= ARRAY_SIZE(res_table)) { | ||
250 | cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc); | ||
251 | return -EIO; | ||
252 | } | ||
253 | |||
254 | return res_table[rc]; | ||
255 | } | ||
256 | |||
257 | long long int | ||
258 | nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) | ||
259 | { | ||
260 | struct cmsg_reply_map_alloc_tbl *reply; | ||
261 | struct cmsg_req_map_alloc_tbl *req; | ||
262 | struct sk_buff *skb; | ||
263 | u32 tid; | ||
264 | int err; | ||
265 | |||
266 | skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req)); | ||
267 | if (!skb) | ||
268 | return -ENOMEM; | ||
269 | |||
270 | req = (void *)skb->data; | ||
271 | req->key_size = cpu_to_be32(map->key_size); | ||
272 | req->value_size = cpu_to_be32(map->value_size); | ||
273 | req->max_entries = cpu_to_be32(map->max_entries); | ||
274 | req->map_type = cpu_to_be32(map->map_type); | ||
275 | req->map_flags = 0; | ||
276 | |||
277 | skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC, | ||
278 | sizeof(*reply)); | ||
279 | if (IS_ERR(skb)) | ||
280 | return PTR_ERR(skb); | ||
281 | |||
282 | reply = (void *)skb->data; | ||
283 | err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); | ||
284 | if (err) | ||
285 | goto err_free; | ||
286 | |||
287 | tid = be32_to_cpu(reply->tid); | ||
288 | dev_consume_skb_any(skb); | ||
289 | |||
290 | return tid; | ||
291 | err_free: | ||
292 | dev_kfree_skb_any(skb); | ||
293 | return err; | ||
294 | } | ||
295 | |||
296 | void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) | ||
297 | { | ||
298 | struct cmsg_reply_map_free_tbl *reply; | ||
299 | struct cmsg_req_map_free_tbl *req; | ||
300 | struct sk_buff *skb; | ||
301 | int err; | ||
302 | |||
303 | skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req)); | ||
304 | if (!skb) { | ||
305 | cmsg_warn(bpf, "leaking map - failed to allocate msg\n"); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | req = (void *)skb->data; | ||
310 | req->tid = cpu_to_be32(nfp_map->tid); | ||
311 | |||
312 | skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE, | ||
313 | sizeof(*reply)); | ||
314 | if (IS_ERR(skb)) { | ||
315 | cmsg_warn(bpf, "leaking map - I/O error\n"); | ||
316 | return; | ||
317 | } | ||
318 | |||
319 | reply = (void *)skb->data; | ||
320 | err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); | ||
321 | if (err) | ||
322 | cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err); | ||
323 | |||
324 | dev_consume_skb_any(skb); | ||
325 | } | ||
326 | |||
327 | static int | ||
328 | nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, | ||
329 | enum nfp_bpf_cmsg_type op, | ||
330 | u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) | ||
331 | { | ||
332 | struct nfp_bpf_map *nfp_map = offmap->dev_priv; | ||
333 | struct nfp_app_bpf *bpf = nfp_map->bpf; | ||
334 | struct bpf_map *map = &offmap->map; | ||
335 | struct cmsg_reply_map_op *reply; | ||
336 | struct cmsg_req_map_op *req; | ||
337 | struct sk_buff *skb; | ||
338 | int err; | ||
339 | |||
340 | /* FW messages have no space for more than 32 bits of flags */ | ||
341 | if (flags >> 32) | ||
342 | return -EOPNOTSUPP; | ||
343 | |||
344 | skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1); | ||
345 | if (!skb) | ||
346 | return -ENOMEM; | ||
347 | |||
348 | req = (void *)skb->data; | ||
349 | req->tid = cpu_to_be32(nfp_map->tid); | ||
350 | req->count = cpu_to_be32(1); | ||
351 | req->flags = cpu_to_be32(flags); | ||
352 | |||
353 | /* Copy inputs */ | ||
354 | if (key) | ||
355 | memcpy(&req->elem[0].key, key, map->key_size); | ||
356 | if (value) | ||
357 | memcpy(&req->elem[0].value, value, map->value_size); | ||
358 | |||
359 | skb = nfp_bpf_cmsg_communicate(bpf, skb, op, | ||
360 | sizeof(*reply) + sizeof(*reply->elem)); | ||
361 | if (IS_ERR(skb)) | ||
362 | return PTR_ERR(skb); | ||
363 | |||
364 | reply = (void *)skb->data; | ||
365 | err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); | ||
366 | if (err) | ||
367 | goto err_free; | ||
368 | |||
369 | /* Copy outputs */ | ||
370 | if (out_key) | ||
371 | memcpy(out_key, &reply->elem[0].key, map->key_size); | ||
372 | if (out_value) | ||
373 | memcpy(out_value, &reply->elem[0].value, map->value_size); | ||
374 | |||
375 | dev_consume_skb_any(skb); | ||
376 | |||
377 | return 0; | ||
378 | err_free: | ||
379 | dev_kfree_skb_any(skb); | ||
380 | return err; | ||
381 | } | ||
382 | |||
383 | int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, | ||
384 | void *key, void *value, u64 flags) | ||
385 | { | ||
386 | return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE, | ||
387 | key, value, flags, NULL, NULL); | ||
388 | } | ||
389 | |||
390 | int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key) | ||
391 | { | ||
392 | return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE, | ||
393 | key, NULL, 0, NULL, NULL); | ||
394 | } | ||
395 | |||
396 | int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, | ||
397 | void *key, void *value) | ||
398 | { | ||
399 | return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP, | ||
400 | key, NULL, 0, NULL, value); | ||
401 | } | ||
402 | |||
403 | int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, | ||
404 | void *next_key) | ||
405 | { | ||
406 | return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST, | ||
407 | NULL, NULL, 0, next_key, NULL); | ||
408 | } | ||
409 | |||
410 | int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, | ||
411 | void *key, void *next_key) | ||
412 | { | ||
413 | return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT, | ||
414 | key, NULL, 0, next_key, NULL); | ||
415 | } | ||
416 | |||
209 | void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) | 417 | void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) |
210 | { | 418 | { |
211 | struct nfp_app_bpf *bpf = app->priv; | 419 | struct nfp_app_bpf *bpf = app->priv; |