diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2018-05-24 10:45:46 -0400 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-05-24 21:36:14 -0400 |
commit | 67f29e07e131ffa13ea158c259a513f474c7df27 (patch) | |
tree | 1ca129546e7e79d65b91fab1e8be77c8cbc66db9 /kernel/bpf/devmap.c | |
parent | f80acbd233382619f597f785f8c238084dc62e21 (diff) |
bpf: devmap introduce dev_map_enqueue
Functionality is the same, but the ndo_xdp_xmit call is now
simply invoked from inside the devmap.c code.
V2: Fix compile issue reported by kbuild test robot <lkp@intel.com>
V5: Cleanups requested by Daniel
- Newlines before func definition
- Use BUILD_BUG_ON checks
- Remove unnecessary use return value store in dev_map_enqueue
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/devmap.c')
-rw-r--r-- | kernel/bpf/devmap.c | 34 |
1 files changed, 28 insertions, 6 deletions
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 565f9ece9115..06c400e7e4ff 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -48,13 +48,15 @@ | |||
48 | * calls will fail at this point. | 48 | * calls will fail at this point. |
49 | */ | 49 | */ |
50 | #include <linux/bpf.h> | 50 | #include <linux/bpf.h> |
51 | #include <net/xdp.h> | ||
51 | #include <linux/filter.h> | 52 | #include <linux/filter.h> |
53 | #include <trace/events/xdp.h> | ||
52 | 54 | ||
53 | #define DEV_CREATE_FLAG_MASK \ | 55 | #define DEV_CREATE_FLAG_MASK \ |
54 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | 56 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
55 | 57 | ||
56 | struct bpf_dtab_netdev { | 58 | struct bpf_dtab_netdev { |
57 | struct net_device *dev; | 59 | struct net_device *dev; /* must be first member, due to tracepoint */ |
58 | struct bpf_dtab *dtab; | 60 | struct bpf_dtab *dtab; |
59 | unsigned int bit; | 61 | unsigned int bit; |
60 | struct rcu_head rcu; | 62 | struct rcu_head rcu; |
@@ -240,21 +242,38 @@ void __dev_map_flush(struct bpf_map *map) | |||
240 | * update happens in parallel here a dev_put wont happen until after reading the | 242 | * update happens in parallel here a dev_put wont happen until after reading the |
241 | * ifindex. | 243 | * ifindex. |
242 | */ | 244 | */ |
243 | struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) | 245 | struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) |
244 | { | 246 | { |
245 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | 247 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
246 | struct bpf_dtab_netdev *dev; | 248 | struct bpf_dtab_netdev *obj; |
247 | 249 | ||
248 | if (key >= map->max_entries) | 250 | if (key >= map->max_entries) |
249 | return NULL; | 251 | return NULL; |
250 | 252 | ||
251 | dev = READ_ONCE(dtab->netdev_map[key]); | 253 | obj = READ_ONCE(dtab->netdev_map[key]); |
252 | return dev ? dev->dev : NULL; | 254 | return obj; |
255 | } | ||
256 | |||
257 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp) | ||
258 | { | ||
259 | struct net_device *dev = dst->dev; | ||
260 | struct xdp_frame *xdpf; | ||
261 | |||
262 | if (!dev->netdev_ops->ndo_xdp_xmit) | ||
263 | return -EOPNOTSUPP; | ||
264 | |||
265 | xdpf = convert_to_xdp_frame(xdp); | ||
266 | if (unlikely(!xdpf)) | ||
267 | return -EOVERFLOW; | ||
268 | |||
269 | /* TODO: implement a bulking/enqueue step later */ | ||
270 | return dev->netdev_ops->ndo_xdp_xmit(dev, xdpf); | ||
253 | } | 271 | } |
254 | 272 | ||
255 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) | 273 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |
256 | { | 274 | { |
257 | struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key); | 275 | struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); |
276 | struct net_device *dev = dev = obj ? obj->dev : NULL; | ||
258 | 277 | ||
259 | return dev ? &dev->ifindex : NULL; | 278 | return dev ? &dev->ifindex : NULL; |
260 | } | 279 | } |
@@ -405,6 +424,9 @@ static struct notifier_block dev_map_notifier = { | |||
405 | 424 | ||
406 | static int __init dev_map_init(void) | 425 | static int __init dev_map_init(void) |
407 | { | 426 | { |
427 | /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ | ||
428 | BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != | ||
429 | offsetof(struct _bpf_dtab_netdev, dev)); | ||
408 | register_netdevice_notifier(&dev_map_notifier); | 430 | register_netdevice_notifier(&dev_map_notifier); |
409 | return 0; | 431 | return 0; |
410 | } | 432 | } |