aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/macvtap.c
diff options
context:
space:
mode:
authorVlad Yasevich <vyasevic@redhat.com>2013-06-25 16:04:22 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-25 19:45:23 -0400
commit3e4f8b787370978733ca6cae452720a4f0c296b8 (patch)
treedea29bd9127db364d49599759f3a63df4a4babf7 /drivers/net/macvtap.c
parent2be5c76794b0e570aa87b012df5ac864ce668a74 (diff)
macvtap: Perform GSO on forwarding path.
When macvtap forwards skb to its tap, it needs to check if GSO needs to be performed. This is sometimes necessary when the HW device performed GRO, but the guest reading from the tap does not support it (ex: Windows 7). Signed-off-by: Vlad Yasevich <vyasevic@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/macvtap.c')
-rw-r--r--drivers/net/macvtap.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 7eab01975ed1..5bfaecdd2354 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -276,14 +276,44 @@ static void macvtap_del_queues(struct net_device *dev)
276 */ 276 */
277static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) 277static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
278{ 278{
279 struct macvlan_dev *vlan = netdev_priv(dev);
279 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 280 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
281 netdev_features_t features;
280 if (!q) 282 if (!q)
281 goto drop; 283 goto drop;
282 284
283 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) 285 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
284 goto drop; 286 goto drop;
285 287
286 skb_queue_tail(&q->sk.sk_receive_queue, skb); 288 skb->dev = dev;
289 /* Apply the forward feature mask so that we perform segmentation
290 * according to users wishes.
291 */
292 features = netif_skb_features(skb) & vlan->tap_features;
293 if (netif_needs_gso(skb, features)) {
294 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
295
296 if (IS_ERR(segs))
297 goto drop;
298
299 if (!segs) {
300 skb_queue_tail(&q->sk.sk_receive_queue, skb);
301 goto wake_up;
302 }
303
304 kfree_skb(skb);
305 while (segs) {
306 struct sk_buff *nskb = segs->next;
307
308 segs->next = NULL;
309 skb_queue_tail(&q->sk.sk_receive_queue, segs);
310 segs = nskb;
311 }
312 } else {
313 skb_queue_tail(&q->sk.sk_receive_queue, skb);
314 }
315
316wake_up:
287 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 317 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
288 return NET_RX_SUCCESS; 318 return NET_RX_SUCCESS;
289 319