aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2013-10-16 12:50:32 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-17 15:35:17 -0400
commit82cada22a0bbec6a7afb573ef5fb6c512aaa2739 (patch)
tree509d5a4bea1dc636885572a4c5d85e9e7bbce32b /drivers/net/xen-netback/netback.c
parenta94685876859be30446357db6d6c4a9c951305b4 (diff)
xen-netback: enable IPv6 TCP GSO to the guest
This patch adds code to handle SKB_GSO_TCPV6 skbs and construct appropriate extra or prefix segments to pass the large packet to the frontend. New xenstore flags, feature-gso-tcpv6 and feature-gso-tcpv6-prefix, are sampled to determine if the frontend is capable of handling such packets. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c48
1 files changed, 38 insertions, 10 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0e327d46a139..828fdab4f1a4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -142,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
142 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); 142 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
143 143
144 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ 144 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
145 if (vif->can_sg || vif->gso || vif->gso_prefix) 145 if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
146 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ 146 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
147 147
148 return max; 148 return max;
@@ -314,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
314 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 314 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
315 315
316 meta = npo->meta + npo->meta_prod++; 316 meta = npo->meta + npo->meta_prod++;
317 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
317 meta->gso_size = 0; 318 meta->gso_size = 0;
318 meta->size = 0; 319 meta->size = 0;
319 meta->id = req->id; 320 meta->id = req->id;
@@ -336,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
336 struct gnttab_copy *copy_gop; 337 struct gnttab_copy *copy_gop;
337 struct xenvif_rx_meta *meta; 338 struct xenvif_rx_meta *meta;
338 unsigned long bytes; 339 unsigned long bytes;
340 int gso_type;
339 341
340 /* Data must not cross a page boundary. */ 342 /* Data must not cross a page boundary. */
341 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); 343 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -394,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
394 } 396 }
395 397
396 /* Leave a gap for the GSO descriptor. */ 398 /* Leave a gap for the GSO descriptor. */
397 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 399 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
400 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
401 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
402 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
403 else
404 gso_type = XEN_NETIF_GSO_TYPE_NONE;
405
406 if (*head && ((1 << gso_type) & vif->gso_mask))
398 vif->rx.req_cons++; 407 vif->rx.req_cons++;
399 408
400 *head = 0; /* There must be something in this buffer now. */ 409 *head = 0; /* There must be something in this buffer now. */
@@ -425,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
425 unsigned char *data; 434 unsigned char *data;
426 int head = 1; 435 int head = 1;
427 int old_meta_prod; 436 int old_meta_prod;
437 int gso_type;
438 int gso_size;
428 439
429 old_meta_prod = npo->meta_prod; 440 old_meta_prod = npo->meta_prod;
430 441
442 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
443 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
444 gso_size = skb_shinfo(skb)->gso_size;
445 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
446 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
447 gso_size = skb_shinfo(skb)->gso_size;
448 } else {
449 gso_type = XEN_NETIF_GSO_TYPE_NONE;
450 gso_size = 0;
451 }
452
431 /* Set up a GSO prefix descriptor, if necessary */ 453 /* Set up a GSO prefix descriptor, if necessary */
432 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { 454 if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
433 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 455 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
434 meta = npo->meta + npo->meta_prod++; 456 meta = npo->meta + npo->meta_prod++;
435 meta->gso_size = skb_shinfo(skb)->gso_size; 457 meta->gso_type = gso_type;
458 meta->gso_size = gso_size;
436 meta->size = 0; 459 meta->size = 0;
437 meta->id = req->id; 460 meta->id = req->id;
438 } 461 }
@@ -440,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
440 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 463 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
441 meta = npo->meta + npo->meta_prod++; 464 meta = npo->meta + npo->meta_prod++;
442 465
443 if (!vif->gso_prefix) 466 if ((1 << gso_type) & vif->gso_mask) {
444 meta->gso_size = skb_shinfo(skb)->gso_size; 467 meta->gso_type = gso_type;
445 else 468 meta->gso_size = gso_size;
469 } else {
470 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
446 meta->gso_size = 0; 471 meta->gso_size = 0;
472 }
447 473
448 meta->size = 0; 474 meta->size = 0;
449 meta->id = req->id; 475 meta->id = req->id;
@@ -589,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
589 615
590 vif = netdev_priv(skb->dev); 616 vif = netdev_priv(skb->dev);
591 617
592 if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { 618 if ((1 << vif->meta[npo.meta_cons].gso_type) &
619 vif->gso_prefix_mask) {
593 resp = RING_GET_RESPONSE(&vif->rx, 620 resp = RING_GET_RESPONSE(&vif->rx,
594 vif->rx.rsp_prod_pvt++); 621 vif->rx.rsp_prod_pvt++);
595 622
@@ -626,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
626 vif->meta[npo.meta_cons].size, 653 vif->meta[npo.meta_cons].size,
627 flags); 654 flags);
628 655
629 if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { 656 if ((1 << vif->meta[npo.meta_cons].gso_type) &
657 vif->gso_mask) {
630 struct xen_netif_extra_info *gso = 658 struct xen_netif_extra_info *gso =
631 (struct xen_netif_extra_info *) 659 (struct xen_netif_extra_info *)
632 RING_GET_RESPONSE(&vif->rx, 660 RING_GET_RESPONSE(&vif->rx,
@@ -634,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
634 662
635 resp->flags |= XEN_NETRXF_extra_info; 663 resp->flags |= XEN_NETRXF_extra_info;
636 664
665 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
637 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 666 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
638 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
639 gso->u.gso.pad = 0; 667 gso->u.gso.pad = 0;
640 gso->u.gso.features = 0; 668 gso->u.gso.features = 0;
641 669