aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/macvtap.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/macvtap.c')
-rw-r--r--drivers/net/macvtap.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 4a34bcb6549f..64409af0da31 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -70,6 +70,11 @@ static const struct proto_ops macvtap_socket_ops;
70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
72 72
73static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
74{
75 return rcu_dereference(dev->rx_handler_data);
76}
77
73/* 78/*
74 * RCU usage: 79 * RCU usage:
75 * The macvtap_queue and the macvlan_dev are loosely coupled, the 80 * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -271,24 +276,27 @@ static void macvtap_del_queues(struct net_device *dev)
271 sock_put(&qlist[j]->sk); 276 sock_put(&qlist[j]->sk);
272} 277}
273 278
274/* 279static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
275 * Forward happens for data that gets sent from one macvlan
276 * endpoint to another one in bridge mode. We just take
277 * the skb and put it into the receive queue.
278 */
279static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
280{ 280{
281 struct macvlan_dev *vlan = netdev_priv(dev); 281 struct sk_buff *skb = *pskb;
282 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 282 struct net_device *dev = skb->dev;
283 struct macvlan_dev *vlan;
284 struct macvtap_queue *q;
283 netdev_features_t features = TAP_FEATURES; 285 netdev_features_t features = TAP_FEATURES;
284 286
287 vlan = macvtap_get_vlan_rcu(dev);
288 if (!vlan)
289 return RX_HANDLER_PASS;
290
291 q = macvtap_get_queue(dev, skb);
285 if (!q) 292 if (!q)
286 goto drop; 293 return RX_HANDLER_PASS;
287 294
288 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) 295 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
289 goto drop; 296 goto drop;
290 297
291 skb->dev = dev; 298 skb_push(skb, ETH_HLEN);
299
292 /* Apply the forward feature mask so that we perform segmentation 300 /* Apply the forward feature mask so that we perform segmentation
293 * according to users wishes. This only works if VNET_HDR is 301 * according to users wishes. This only works if VNET_HDR is
294 * enabled. 302 * enabled.
@@ -320,22 +328,13 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
320 328
321wake_up: 329wake_up:
322 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 330 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
323 return NET_RX_SUCCESS; 331 return RX_HANDLER_CONSUMED;
324 332
325drop: 333drop:
334 /* Count errors/drops only here, thus don't care about args. */
335 macvlan_count_rx(vlan, 0, 0, 0);
326 kfree_skb(skb); 336 kfree_skb(skb);
327 return NET_RX_DROP; 337 return RX_HANDLER_CONSUMED;
328}
329
330/*
331 * Receive is for data from the external interface (lowerdev),
332 * in case of macvtap, we can treat that the same way as
333 * forward, which macvlan cannot.
334 */
335static int macvtap_receive(struct sk_buff *skb)
336{
337 skb_push(skb, ETH_HLEN);
338 return macvtap_forward(skb->dev, skb);
339} 338}
340 339
341static int macvtap_get_minor(struct macvlan_dev *vlan) 340static int macvtap_get_minor(struct macvlan_dev *vlan)
@@ -385,6 +384,8 @@ static int macvtap_newlink(struct net *src_net,
385 struct nlattr *data[]) 384 struct nlattr *data[])
386{ 385{
387 struct macvlan_dev *vlan = netdev_priv(dev); 386 struct macvlan_dev *vlan = netdev_priv(dev);
387 int err;
388
388 INIT_LIST_HEAD(&vlan->queue_list); 389 INIT_LIST_HEAD(&vlan->queue_list);
389 390
390 /* Since macvlan supports all offloads by default, make 391 /* Since macvlan supports all offloads by default, make
@@ -392,16 +393,20 @@ static int macvtap_newlink(struct net *src_net,
392 */ 393 */
393 vlan->tap_features = TUN_OFFLOADS; 394 vlan->tap_features = TUN_OFFLOADS;
394 395
396 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
397 if (err)
398 return err;
399
395 /* Don't put anything that may fail after macvlan_common_newlink 400 /* Don't put anything that may fail after macvlan_common_newlink
396 * because we can't undo what it does. 401 * because we can't undo what it does.
397 */ 402 */
398 return macvlan_common_newlink(src_net, dev, tb, data, 403 return macvlan_common_newlink(src_net, dev, tb, data);
399 macvtap_receive, macvtap_forward);
400} 404}
401 405
402static void macvtap_dellink(struct net_device *dev, 406static void macvtap_dellink(struct net_device *dev,
403 struct list_head *head) 407 struct list_head *head)
404{ 408{
409 netdev_rx_handler_unregister(dev);
405 macvtap_del_queues(dev); 410 macvtap_del_queues(dev);
406 macvlan_dellink(dev, head); 411 macvlan_dellink(dev, head);
407} 412}
@@ -725,9 +730,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
725 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 730 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
726 } 731 }
727 if (vlan) { 732 if (vlan) {
728 local_bh_disable(); 733 skb->dev = vlan->dev;
729 macvlan_start_xmit(skb, vlan->dev); 734 dev_queue_xmit(skb);
730 local_bh_enable();
731 } else { 735 } else {
732 kfree_skb(skb); 736 kfree_skb(skb);
733 } 737 }