aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c813
1 files changed, 558 insertions, 255 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 438d0c09b7e6..3f021e054ba1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -37,6 +37,7 @@
37#include <linux/kthread.h> 37#include <linux/kthread.h>
38#include <linux/if_vlan.h> 38#include <linux/if_vlan.h>
39#include <linux/udp.h> 39#include <linux/udp.h>
40#include <linux/highmem.h>
40 41
41#include <net/tcp.h> 42#include <net/tcp.h>
42 43
@@ -54,6 +55,13 @@
54bool separate_tx_rx_irq = 1; 55bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644); 56module_param(separate_tx_rx_irq, bool, 0644);
56 57
58/* When guest ring is filled up, qdisc queues the packets for us, but we have
59 * to timeout them, otherwise other guests' packets can get stuck there
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies;
64
57/* 65/*
58 * This is the maximum slots a skb can have. If a guest sends a skb 66 * This is the maximum slots a skb can have. If a guest sends a skb
59 * which exceeds this limit it is considered malicious. 67 * which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; 70static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63module_param(fatal_skb_slots, uint, 0444); 71module_param(fatal_skb_slots, uint, 0444);
64 72
65/*
66 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67 * the maximum slots a valid packet can use. Now this value is defined
68 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
69 * all backend.
70 */
71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
72
73/*
74 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
75 * one or more merged tx requests, otherwise it is the continuation of
76 * previous tx request.
77 */
78static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
79{
80 return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
81}
82
83static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx, 73static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
84 u8 status); 74 u8 status);
85 75
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); 99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
110} 100}
111 101
102#define callback_param(vif, pending_idx) \
103 (vif->pending_tx_info[pending_idx].callback_struct)
104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
108{
109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp =
111 container_of(ubuf, struct pending_tx_info, callback_struct);
112 return container_of(temp - pending_idx,
113 struct xenvif,
114 pending_tx_info[0]);
115}
116
112/* This is a miniumum size for the linear area to avoid lots of 117/* This is a miniumum size for the linear area to avoid lots of
113 * calls to __pskb_pull_tail() as we set up checksum offsets. The 118 * calls to __pskb_pull_tail() as we set up checksum offsets. The
114 * value 128 was chosen as it covers all IPv4 and most likely 119 * value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
131 return i & (MAX_PENDING_REQS-1); 136 return i & (MAX_PENDING_REQS-1);
132} 137}
133 138
134static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
135{
136 return MAX_PENDING_REQS -
137 vif->pending_prod + vif->pending_cons;
138}
139
140bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) 139bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
141{ 140{
142 RING_IDX prod, cons; 141 RING_IDX prod, cons;
@@ -192,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
192 * into multiple copies tend to give large frags their 191 * into multiple copies tend to give large frags their
193 * own buffers as before. 192 * own buffers as before.
194 */ 193 */
195 if ((offset + size > MAX_BUFFER_OFFSET) && 194 BUG_ON(size > MAX_BUFFER_OFFSET);
196 (size <= MAX_BUFFER_OFFSET) && offset && !head) 195 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
197 return true; 196 return true;
198 197
199 return false; 198 return false;
@@ -235,7 +234,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
235static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, 234static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
236 struct netrx_pending_operations *npo, 235 struct netrx_pending_operations *npo,
237 struct page *page, unsigned long size, 236 struct page *page, unsigned long size,
238 unsigned long offset, int *head) 237 unsigned long offset, int *head,
238 struct xenvif *foreign_vif,
239 grant_ref_t foreign_gref)
239{ 240{
240 struct gnttab_copy *copy_gop; 241 struct gnttab_copy *copy_gop;
241 struct xenvif_rx_meta *meta; 242 struct xenvif_rx_meta *meta;
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
277 copy_gop->flags = GNTCOPY_dest_gref; 278 copy_gop->flags = GNTCOPY_dest_gref;
278 copy_gop->len = bytes; 279 copy_gop->len = bytes;
279 280
280 copy_gop->source.domid = DOMID_SELF; 281 if (foreign_vif) {
281 copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); 282 copy_gop->source.domid = foreign_vif->domid;
283 copy_gop->source.u.ref = foreign_gref;
284 copy_gop->flags |= GNTCOPY_source_gref;
285 } else {
286 copy_gop->source.domid = DOMID_SELF;
287 copy_gop->source.u.gmfn =
288 virt_to_mfn(page_address(page));
289 }
282 copy_gop->source.offset = offset; 290 copy_gop->source.offset = offset;
283 291
284 copy_gop->dest.domid = vif->domid; 292 copy_gop->dest.domid = vif->domid;
@@ -338,6 +346,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
338 int head = 1; 346 int head = 1;
339 int old_meta_prod; 347 int old_meta_prod;
340 int gso_type; 348 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
351 struct xenvif *foreign_vif = NULL;
341 352
342 old_meta_prod = npo->meta_prod; 353 old_meta_prod = npo->meta_prod;
343 354
@@ -375,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
375 npo->copy_off = 0; 386 npo->copy_off = 0;
376 npo->copy_gref = req->gref; 387 npo->copy_gref = req->gref;
377 388
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
378 data = skb->data; 402 data = skb->data;
379 while (data < skb_tail_pointer(skb)) { 403 while (data < skb_tail_pointer(skb)) {
380 unsigned int offset = offset_in_page(data); 404 unsigned int offset = offset_in_page(data);
@@ -384,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
384 len = skb_tail_pointer(skb) - data; 408 len = skb_tail_pointer(skb) - data;
385 409
386 xenvif_gop_frag_copy(vif, skb, npo, 410 xenvif_gop_frag_copy(vif, skb, npo,
387 virt_to_page(data), len, offset, &head); 411 virt_to_page(data), len, offset, &head,
412 NULL,
413 0);
388 data += len; 414 data += len;
389 } 415 }
390 416
@@ -393,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
393 skb_frag_page(&skb_shinfo(skb)->frags[i]), 419 skb_frag_page(&skb_shinfo(skb)->frags[i]),
394 skb_frag_size(&skb_shinfo(skb)->frags[i]), 420 skb_frag_size(&skb_shinfo(skb)->frags[i]),
395 skb_shinfo(skb)->frags[i].page_offset, 421 skb_shinfo(skb)->frags[i].page_offset,
396 &head); 422 &head,
423 foreign_vif,
424 foreign_grefs[i]);
397 } 425 }
398 426
399 return npo->meta_prod - old_meta_prod; 427 return npo->meta_prod - old_meta_prod;
@@ -451,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
451 } 479 }
452} 480}
453 481
454struct skb_cb_overlay { 482struct xenvif_rx_cb {
455 int meta_slots_used; 483 int meta_slots_used;
456}; 484};
457 485
486#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
487
458void xenvif_kick_thread(struct xenvif *vif) 488void xenvif_kick_thread(struct xenvif *vif)
459{ 489{
460 wake_up(&vif->wq); 490 wake_up(&vif->wq);
@@ -470,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
470 LIST_HEAD(notify); 500 LIST_HEAD(notify);
471 int ret; 501 int ret;
472 unsigned long offset; 502 unsigned long offset;
473 struct skb_cb_overlay *sco;
474 bool need_to_notify = false; 503 bool need_to_notify = false;
475 504
476 struct netrx_pending_operations npo = { 505 struct netrx_pending_operations npo = {
@@ -482,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
482 511
483 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 512 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
484 RING_IDX max_slots_needed; 513 RING_IDX max_slots_needed;
514 RING_IDX old_req_cons;
515 RING_IDX ring_slots_used;
485 int i; 516 int i;
486 517
487 /* We need a cheap worse case estimate for the number of 518 /* We need a cheap worse case estimate for the number of
@@ -493,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
493 PAGE_SIZE); 524 PAGE_SIZE);
494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 525 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
495 unsigned int size; 526 unsigned int size;
527 unsigned int offset;
528
496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 529 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); 530 offset = skb_shinfo(skb)->frags[i].page_offset;
531
532 /* For a worse-case estimate we need to factor in
533 * the fragment page offset as this will affect the
534 * number of times xenvif_gop_frag_copy() will
535 * call start_new_rx_buffer().
536 */
537 max_slots_needed += DIV_ROUND_UP(offset + size,
538 PAGE_SIZE);
498 } 539 }
540
541 /* To avoid the estimate becoming too pessimal for some
542 * frontends that limit posted rx requests, cap the estimate
543 * at MAX_SKB_FRAGS.
544 */
545 if (max_slots_needed > MAX_SKB_FRAGS)
546 max_slots_needed = MAX_SKB_FRAGS;
547
548 /* We may need one more slot for GSO metadata */
499 if (skb_is_gso(skb) && 549 if (skb_is_gso(skb) &&
500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || 550 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 551 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -510,9 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
510 } else 560 } else
511 vif->rx_last_skb_slots = 0; 561 vif->rx_last_skb_slots = 0;
512 562
513 sco = (struct skb_cb_overlay *)skb->cb; 563 old_req_cons = vif->rx.req_cons;
514 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 564 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
515 BUG_ON(sco->meta_slots_used > max_slots_needed); 565 ring_slots_used = vif->rx.req_cons - old_req_cons;
566
567 BUG_ON(ring_slots_used > max_slots_needed);
516 568
517 __skb_queue_tail(&rxq, skb); 569 __skb_queue_tail(&rxq, skb);
518 } 570 }
@@ -526,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
526 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 578 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
527 579
528 while ((skb = __skb_dequeue(&rxq)) != NULL) { 580 while ((skb = __skb_dequeue(&rxq)) != NULL) {
529 sco = (struct skb_cb_overlay *)skb->cb;
530 581
531 if ((1 << vif->meta[npo.meta_cons].gso_type) & 582 if ((1 << vif->meta[npo.meta_cons].gso_type) &
532 vif->gso_prefix_mask) { 583 vif->gso_prefix_mask) {
@@ -537,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
537 588
538 resp->offset = vif->meta[npo.meta_cons].gso_size; 589 resp->offset = vif->meta[npo.meta_cons].gso_size;
539 resp->id = vif->meta[npo.meta_cons].id; 590 resp->id = vif->meta[npo.meta_cons].id;
540 resp->status = sco->meta_slots_used; 591 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
541 592
542 npo.meta_cons++; 593 npo.meta_cons++;
543 sco->meta_slots_used--; 594 XENVIF_RX_CB(skb)->meta_slots_used--;
544 } 595 }
545 596
546 597
547 vif->dev->stats.tx_bytes += skb->len; 598 vif->dev->stats.tx_bytes += skb->len;
548 vif->dev->stats.tx_packets++; 599 vif->dev->stats.tx_packets++;
549 600
550 status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); 601 status = xenvif_check_gop(vif,
602 XENVIF_RX_CB(skb)->meta_slots_used,
603 &npo);
551 604
552 if (sco->meta_slots_used == 1) 605 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
553 flags = 0; 606 flags = 0;
554 else 607 else
555 flags = XEN_NETRXF_more_data; 608 flags = XEN_NETRXF_more_data;
@@ -586,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
586 639
587 xenvif_add_frag_responses(vif, status, 640 xenvif_add_frag_responses(vif, status,
588 vif->meta + npo.meta_cons + 1, 641 vif->meta + npo.meta_cons + 1,
589 sco->meta_slots_used); 642 XENVIF_RX_CB(skb)->meta_slots_used);
590 643
591 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 644 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
592 645
593 need_to_notify |= !!ret; 646 need_to_notify |= !!ret;
594 647
595 npo.meta_cons += sco->meta_slots_used; 648 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
596 dev_kfree_skb(skb); 649 dev_kfree_skb(skb);
597 } 650 }
598 651
@@ -642,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
642 struct xen_netif_tx_request *txp, RING_IDX end) 695 struct xen_netif_tx_request *txp, RING_IDX end)
643{ 696{
644 RING_IDX cons = vif->tx.req_cons; 697 RING_IDX cons = vif->tx.req_cons;
698 unsigned long flags;
645 699
646 do { 700 do {
701 spin_lock_irqsave(&vif->response_lock, flags);
647 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 702 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
703 spin_unlock_irqrestore(&vif->response_lock, flags);
648 if (cons == end) 704 if (cons == end)
649 break; 705 break;
650 txp = RING_GET_REQUEST(&vif->tx, cons++); 706 txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -655,7 +711,8 @@ static void xenvif_tx_err(struct xenvif *vif,
655static void xenvif_fatal_tx_err(struct xenvif *vif) 711static void xenvif_fatal_tx_err(struct xenvif *vif)
656{ 712{
657 netdev_err(vif->dev, "fatal error; disabling device\n"); 713 netdev_err(vif->dev, "fatal error; disabling device\n");
658 xenvif_carrier_off(vif); 714 vif->disabled = true;
715 xenvif_kick_thread(vif);
659} 716}
660 717
661static int xenvif_count_requests(struct xenvif *vif, 718static int xenvif_count_requests(struct xenvif *vif,
@@ -756,180 +813,168 @@ static int xenvif_count_requests(struct xenvif *vif,
756 return slots; 813 return slots;
757} 814}
758 815
759static struct page *xenvif_alloc_page(struct xenvif *vif, 816
760 u16 pending_idx) 817struct xenvif_tx_cb {
818 u16 pending_idx;
819};
820
821#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
822
823static inline void xenvif_tx_create_gop(struct xenvif *vif,
824 u16 pending_idx,
825 struct xen_netif_tx_request *txp,
826 struct gnttab_map_grant_ref *gop)
761{ 827{
762 struct page *page; 828 vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
829 gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
830 GNTMAP_host_map | GNTMAP_readonly,
831 txp->gref, vif->domid);
832
833 memcpy(&vif->pending_tx_info[pending_idx].req, txp,
834 sizeof(*txp));
835}
763 836
764 page = alloc_page(GFP_ATOMIC|__GFP_COLD); 837static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
765 if (!page) 838{
839 struct sk_buff *skb =
840 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
841 GFP_ATOMIC | __GFP_NOWARN);
842 if (unlikely(skb == NULL))
766 return NULL; 843 return NULL;
767 vif->mmap_pages[pending_idx] = page;
768 844
769 return page; 845 /* Packets passed to netif_rx() must have some headroom. */
846 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
847
848 /* Initialize it here to avoid later surprises */
849 skb_shinfo(skb)->destructor_arg = NULL;
850
851 return skb;
770} 852}
771 853
772static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, 854static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
773 struct sk_buff *skb, 855 struct sk_buff *skb,
774 struct xen_netif_tx_request *txp, 856 struct xen_netif_tx_request *txp,
775 struct gnttab_copy *gop) 857 struct gnttab_map_grant_ref *gop)
776{ 858{
777 struct skb_shared_info *shinfo = skb_shinfo(skb); 859 struct skb_shared_info *shinfo = skb_shinfo(skb);
778 skb_frag_t *frags = shinfo->frags; 860 skb_frag_t *frags = shinfo->frags;
779 u16 pending_idx = *((u16 *)skb->data); 861 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
780 u16 head_idx = 0; 862 int start;
781 int slot, start; 863 pending_ring_idx_t index;
782 struct page *page; 864 unsigned int nr_slots, frag_overflow = 0;
783 pending_ring_idx_t index, start_idx = 0;
784 uint16_t dst_offset;
785 unsigned int nr_slots;
786 struct pending_tx_info *first = NULL;
787 865
788 /* At this point shinfo->nr_frags is in fact the number of 866 /* At this point shinfo->nr_frags is in fact the number of
789 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. 867 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
790 */ 868 */
869 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
870 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
871 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
872 shinfo->nr_frags = MAX_SKB_FRAGS;
873 }
791 nr_slots = shinfo->nr_frags; 874 nr_slots = shinfo->nr_frags;
792 875
793 /* Skip first skb fragment if it is on same page as header fragment. */ 876 /* Skip first skb fragment if it is on same page as header fragment. */
794 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 877 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
795 878
796 /* Coalesce tx requests, at this point the packet passed in 879 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
797 * should be <= 64K. Any packets larger than 64K have been 880 shinfo->nr_frags++, txp++, gop++) {
798 * handled in xenvif_count_requests(). 881 index = pending_index(vif->pending_cons++);
799 */ 882 pending_idx = vif->pending_ring[index];
800 for (shinfo->nr_frags = slot = start; slot < nr_slots; 883 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
801 shinfo->nr_frags++) { 884 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
802 struct pending_tx_info *pending_tx_info = 885 }
803 vif->pending_tx_info;
804 886
805 page = alloc_page(GFP_ATOMIC|__GFP_COLD); 887 if (frag_overflow) {
806 if (!page) 888 struct sk_buff *nskb = xenvif_alloc_skb(0);
807 goto err; 889 if (unlikely(nskb == NULL)) {
808 890 if (net_ratelimit())
809 dst_offset = 0; 891 netdev_err(vif->dev,
810 first = NULL; 892 "Can't allocate the frag_list skb.\n");
811 while (dst_offset < PAGE_SIZE && slot < nr_slots) { 893 return NULL;
812 gop->flags = GNTCOPY_source_gref; 894 }
813 895
814 gop->source.u.ref = txp->gref; 896 shinfo = skb_shinfo(nskb);
815 gop->source.domid = vif->domid; 897 frags = shinfo->frags;
816 gop->source.offset = txp->offset;
817
818 gop->dest.domid = DOMID_SELF;
819
820 gop->dest.offset = dst_offset;
821 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
822
823 if (dst_offset + txp->size > PAGE_SIZE) {
824 /* This page can only merge a portion
825 * of tx request. Do not increment any
826 * pointer / counter here. The txp
827 * will be dealt with in future
828 * rounds, eventually hitting the
829 * `else` branch.
830 */
831 gop->len = PAGE_SIZE - dst_offset;
832 txp->offset += gop->len;
833 txp->size -= gop->len;
834 dst_offset += gop->len; /* quit loop */
835 } else {
836 /* This tx request can be merged in the page */
837 gop->len = txp->size;
838 dst_offset += gop->len;
839
840 index = pending_index(vif->pending_cons++);
841
842 pending_idx = vif->pending_ring[index];
843
844 memcpy(&pending_tx_info[pending_idx].req, txp,
845 sizeof(*txp));
846
847 /* Poison these fields, corresponding
848 * fields for head tx req will be set
849 * to correct values after the loop.
850 */
851 vif->mmap_pages[pending_idx] = (void *)(~0UL);
852 pending_tx_info[pending_idx].head =
853 INVALID_PENDING_RING_IDX;
854
855 if (!first) {
856 first = &pending_tx_info[pending_idx];
857 start_idx = index;
858 head_idx = pending_idx;
859 }
860
861 txp++;
862 slot++;
863 }
864 898
865 gop++; 899 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
900 shinfo->nr_frags++, txp++, gop++) {
901 index = pending_index(vif->pending_cons++);
902 pending_idx = vif->pending_ring[index];
903 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
904 frag_set_pending_idx(&frags[shinfo->nr_frags],
905 pending_idx);
866 } 906 }
867 907
868 first->req.offset = 0; 908 skb_shinfo(skb)->frag_list = nskb;
869 first->req.size = dst_offset;
870 first->head = start_idx;
871 vif->mmap_pages[head_idx] = page;
872 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
873 } 909 }
874 910
875 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
876
877 return gop; 911 return gop;
878err: 912}
879 /* Unwind, freeing all pages and sending error responses. */ 913
880 while (shinfo->nr_frags-- > start) { 914static inline void xenvif_grant_handle_set(struct xenvif *vif,
881 xenvif_idx_release(vif, 915 u16 pending_idx,
882 frag_get_pending_idx(&frags[shinfo->nr_frags]), 916 grant_handle_t handle)
883 XEN_NETIF_RSP_ERROR); 917{
918 if (unlikely(vif->grant_tx_handle[pending_idx] !=
919 NETBACK_INVALID_HANDLE)) {
920 netdev_err(vif->dev,
921 "Trying to overwrite active handle! pending_idx: %x\n",
922 pending_idx);
923 BUG();
884 } 924 }
885 /* The head too, if necessary. */ 925 vif->grant_tx_handle[pending_idx] = handle;
886 if (start) 926}
887 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
888 927
889 return NULL; 928static inline void xenvif_grant_handle_reset(struct xenvif *vif,
929 u16 pending_idx)
930{
931 if (unlikely(vif->grant_tx_handle[pending_idx] ==
932 NETBACK_INVALID_HANDLE)) {
933 netdev_err(vif->dev,
934 "Trying to unmap invalid handle! pending_idx: %x\n",
935 pending_idx);
936 BUG();
937 }
938 vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
890} 939}
891 940
892static int xenvif_tx_check_gop(struct xenvif *vif, 941static int xenvif_tx_check_gop(struct xenvif *vif,
893 struct sk_buff *skb, 942 struct sk_buff *skb,
894 struct gnttab_copy **gopp) 943 struct gnttab_map_grant_ref **gopp)
895{ 944{
896 struct gnttab_copy *gop = *gopp; 945 struct gnttab_map_grant_ref *gop = *gopp;
897 u16 pending_idx = *((u16 *)skb->data); 946 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
898 struct skb_shared_info *shinfo = skb_shinfo(skb); 947 struct skb_shared_info *shinfo = skb_shinfo(skb);
899 struct pending_tx_info *tx_info; 948 struct pending_tx_info *tx_info;
900 int nr_frags = shinfo->nr_frags; 949 int nr_frags = shinfo->nr_frags;
901 int i, err, start; 950 int i, err, start;
902 u16 peek; /* peek into next tx request */ 951 struct sk_buff *first_skb = NULL;
903 952
904 /* Check status of header. */ 953 /* Check status of header. */
905 err = gop->status; 954 err = gop->status;
906 if (unlikely(err)) 955 if (unlikely(err))
907 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR); 956 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
957 else
958 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
908 959
909 /* Skip first skb fragment if it is on same page as header fragment. */ 960 /* Skip first skb fragment if it is on same page as header fragment. */
910 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 961 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
911 962
963check_frags:
912 for (i = start; i < nr_frags; i++) { 964 for (i = start; i < nr_frags; i++) {
913 int j, newerr; 965 int j, newerr;
914 pending_ring_idx_t head;
915 966
916 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 967 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
917 tx_info = &vif->pending_tx_info[pending_idx]; 968 tx_info = &vif->pending_tx_info[pending_idx];
918 head = tx_info->head;
919 969
920 /* Check error status: if okay then remember grant handle. */ 970 /* Check error status: if okay then remember grant handle. */
921 do { 971 newerr = (++gop)->status;
922 newerr = (++gop)->status;
923 if (newerr)
924 break;
925 peek = vif->pending_ring[pending_index(++head)];
926 } while (!pending_tx_is_head(vif, peek));
927 972
928 if (likely(!newerr)) { 973 if (likely(!newerr)) {
974 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
929 /* Had a previous error? Invalidate this fragment. */ 975 /* Had a previous error? Invalidate this fragment. */
930 if (unlikely(err)) 976 if (unlikely(err))
931 xenvif_idx_release(vif, pending_idx, 977 xenvif_idx_unmap(vif, pending_idx);
932 XEN_NETIF_RSP_OKAY);
933 continue; 978 continue;
934 } 979 }
935 980
@@ -939,20 +984,45 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
939 /* Not the first error? Preceding frags already invalidated. */ 984 /* Not the first error? Preceding frags already invalidated. */
940 if (err) 985 if (err)
941 continue; 986 continue;
942
943 /* First error: invalidate header and preceding fragments. */ 987 /* First error: invalidate header and preceding fragments. */
944 pending_idx = *((u16 *)skb->data); 988 if (!first_skb)
945 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 989 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
990 else
991 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
992 xenvif_idx_unmap(vif, pending_idx);
946 for (j = start; j < i; j++) { 993 for (j = start; j < i; j++) {
947 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 994 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
948 xenvif_idx_release(vif, pending_idx, 995 xenvif_idx_unmap(vif, pending_idx);
949 XEN_NETIF_RSP_OKAY);
950 } 996 }
951 997
952 /* Remember the error: invalidate all subsequent fragments. */ 998 /* Remember the error: invalidate all subsequent fragments. */
953 err = newerr; 999 err = newerr;
954 } 1000 }
955 1001
1002 if (skb_has_frag_list(skb)) {
1003 first_skb = skb;
1004 skb = shinfo->frag_list;
1005 shinfo = skb_shinfo(skb);
1006 nr_frags = shinfo->nr_frags;
1007 start = 0;
1008
1009 goto check_frags;
1010 }
1011
1012 /* There was a mapping error in the frag_list skb. We have to unmap
1013 * the first skb's frags
1014 */
1015 if (first_skb && err) {
1016 int j;
1017 shinfo = skb_shinfo(first_skb);
1018 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1019 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1020 for (j = start; j < shinfo->nr_frags; j++) {
1021 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1022 xenvif_idx_unmap(vif, pending_idx);
1023 }
1024 }
1025
956 *gopp = gop + 1; 1026 *gopp = gop + 1;
957 return err; 1027 return err;
958} 1028}
@@ -962,6 +1032,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
962 struct skb_shared_info *shinfo = skb_shinfo(skb); 1032 struct skb_shared_info *shinfo = skb_shinfo(skb);
963 int nr_frags = shinfo->nr_frags; 1033 int nr_frags = shinfo->nr_frags;
964 int i; 1034 int i;
1035 u16 prev_pending_idx = INVALID_PENDING_IDX;
1036
1037 if (skb_shinfo(skb)->destructor_arg)
1038 prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
965 1039
966 for (i = 0; i < nr_frags; i++) { 1040 for (i = 0; i < nr_frags; i++) {
967 skb_frag_t *frag = shinfo->frags + i; 1041 skb_frag_t *frag = shinfo->frags + i;
@@ -971,6 +1045,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
971 1045
972 pending_idx = frag_get_pending_idx(frag); 1046 pending_idx = frag_get_pending_idx(frag);
973 1047
1048 /* If this is not the first frag, chain it to the previous*/
1049 if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
1050 skb_shinfo(skb)->destructor_arg =
1051 &callback_param(vif, pending_idx);
1052 else if (likely(pending_idx != prev_pending_idx))
1053 callback_param(vif, prev_pending_idx).ctx =
1054 &callback_param(vif, pending_idx);
1055
1056 callback_param(vif, pending_idx).ctx = NULL;
1057 prev_pending_idx = pending_idx;
1058
974 txp = &vif->pending_tx_info[pending_idx].req; 1059 txp = &vif->pending_tx_info[pending_idx].req;
975 page = virt_to_page(idx_to_kaddr(vif, pending_idx)); 1060 page = virt_to_page(idx_to_kaddr(vif, pending_idx));
976 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 1061 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -978,10 +1063,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
978 skb->data_len += txp->size; 1063 skb->data_len += txp->size;
979 skb->truesize += txp->size; 1064 skb->truesize += txp->size;
980 1065
981 /* Take an extra reference to offset xenvif_idx_release */ 1066 /* Take an extra reference to offset network stack's put_page */
982 get_page(vif->mmap_pages[pending_idx]); 1067 get_page(vif->mmap_pages[pending_idx]);
983 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
984 } 1068 }
1069 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1070 * overlaps with "index", and "mapping" is not set. I think mapping
1071 * should be set. If delivered to local stack, it would drop this
1072 * skb in sk_filter unless the socket has the right to use it.
1073 */
1074 skb->pfmemalloc = false;
985} 1075}
986 1076
987static int xenvif_get_extras(struct xenvif *vif, 1077static int xenvif_get_extras(struct xenvif *vif,
@@ -1101,16 +1191,13 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1101 1191
1102static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) 1192static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1103{ 1193{
1104 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; 1194 struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
1105 struct sk_buff *skb; 1195 struct sk_buff *skb;
1106 int ret; 1196 int ret;
1107 1197
1108 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX 1198 while (skb_queue_len(&vif->tx_queue) < budget) {
1109 < MAX_PENDING_REQS) &&
1110 (skb_queue_len(&vif->tx_queue) < budget)) {
1111 struct xen_netif_tx_request txreq; 1199 struct xen_netif_tx_request txreq;
1112 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1200 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1113 struct page *page;
1114 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1201 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1115 u16 pending_idx; 1202 u16 pending_idx;
1116 RING_IDX idx; 1203 RING_IDX idx;
@@ -1126,7 +1213,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1126 vif->tx.sring->req_prod, vif->tx.req_cons, 1213 vif->tx.sring->req_prod, vif->tx.req_cons,
1127 XEN_NETIF_TX_RING_SIZE); 1214 XEN_NETIF_TX_RING_SIZE);
1128 xenvif_fatal_tx_err(vif); 1215 xenvif_fatal_tx_err(vif);
1129 continue; 1216 break;
1130 } 1217 }
1131 1218
1132 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); 1219 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
@@ -1186,8 +1273,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1186 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? 1273 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1187 PKT_PROT_LEN : txreq.size; 1274 PKT_PROT_LEN : txreq.size;
1188 1275
1189 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, 1276 skb = xenvif_alloc_skb(data_len);
1190 GFP_ATOMIC | __GFP_NOWARN);
1191 if (unlikely(skb == NULL)) { 1277 if (unlikely(skb == NULL)) {
1192 netdev_dbg(vif->dev, 1278 netdev_dbg(vif->dev,
1193 "Can't allocate a skb in start_xmit.\n"); 1279 "Can't allocate a skb in start_xmit.\n");
@@ -1195,9 +1281,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1195 break; 1281 break;
1196 } 1282 }
1197 1283
1198 /* Packets passed to netif_rx() must have some headroom. */
1199 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1200
1201 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1284 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1202 struct xen_netif_extra_info *gso; 1285 struct xen_netif_extra_info *gso;
1203 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1286 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1209,31 +1292,11 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1209 } 1292 }
1210 } 1293 }
1211 1294
1212 /* XXX could copy straight to head */ 1295 xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
1213 page = xenvif_alloc_page(vif, pending_idx);
1214 if (!page) {
1215 kfree_skb(skb);
1216 xenvif_tx_err(vif, &txreq, idx);
1217 break;
1218 }
1219
1220 gop->source.u.ref = txreq.gref;
1221 gop->source.domid = vif->domid;
1222 gop->source.offset = txreq.offset;
1223
1224 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1225 gop->dest.domid = DOMID_SELF;
1226 gop->dest.offset = txreq.offset;
1227
1228 gop->len = txreq.size;
1229 gop->flags = GNTCOPY_source_gref;
1230 1296
1231 gop++; 1297 gop++;
1232 1298
1233 memcpy(&vif->pending_tx_info[pending_idx].req, 1299 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1234 &txreq, sizeof(txreq));
1235 vif->pending_tx_info[pending_idx].head = index;
1236 *((u16 *)skb->data) = pending_idx;
1237 1300
1238 __skb_put(skb, data_len); 1301 __skb_put(skb, data_len);
1239 1302
@@ -1261,17 +1324,82 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1261 1324
1262 vif->tx.req_cons = idx; 1325 vif->tx.req_cons = idx;
1263 1326
1264 if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops)) 1327 if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
1265 break; 1328 break;
1266 } 1329 }
1267 1330
1268 return gop - vif->tx_copy_ops; 1331 return gop - vif->tx_map_ops;
1269} 1332}
1270 1333
1334/* Consolidate skb with a frag_list into a brand new one with local pages on
1335 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1336 */
1337static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1338{
1339 unsigned int offset = skb_headlen(skb);
1340 skb_frag_t frags[MAX_SKB_FRAGS];
1341 int i;
1342 struct ubuf_info *uarg;
1343 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1344
1345 vif->tx_zerocopy_sent += 2;
1346 vif->tx_frag_overflow++;
1347
1348 xenvif_fill_frags(vif, nskb);
1349 /* Subtract frags size, we will correct it later */
1350 skb->truesize -= skb->data_len;
1351 skb->len += nskb->len;
1352 skb->data_len += nskb->len;
1353
1354 /* create a brand new frags array and coalesce there */
1355 for (i = 0; offset < skb->len; i++) {
1356 struct page *page;
1357 unsigned int len;
1358
1359 BUG_ON(i >= MAX_SKB_FRAGS);
1360 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1361 if (!page) {
1362 int j;
1363 skb->truesize += skb->data_len;
1364 for (j = 0; j < i; j++)
1365 put_page(frags[j].page.p);
1366 return -ENOMEM;
1367 }
1368
1369 if (offset + PAGE_SIZE < skb->len)
1370 len = PAGE_SIZE;
1371 else
1372 len = skb->len - offset;
1373 if (skb_copy_bits(skb, offset, page_address(page), len))
1374 BUG();
1375
1376 offset += len;
1377 frags[i].page.p = page;
1378 frags[i].page_offset = 0;
1379 skb_frag_size_set(&frags[i], len);
1380 }
1381 /* swap out with old one */
1382 memcpy(skb_shinfo(skb)->frags,
1383 frags,
1384 i * sizeof(skb_frag_t));
1385 skb_shinfo(skb)->nr_frags = i;
1386 skb->truesize += i * PAGE_SIZE;
1387
1388 /* remove traces of mapped pages and frag_list */
1389 skb_frag_list_init(skb);
1390 uarg = skb_shinfo(skb)->destructor_arg;
1391 uarg->callback(uarg, true);
1392 skb_shinfo(skb)->destructor_arg = NULL;
1393
1394 skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1395 kfree_skb(nskb);
1396
1397 return 0;
1398}
1271 1399
1272static int xenvif_tx_submit(struct xenvif *vif) 1400static int xenvif_tx_submit(struct xenvif *vif)
1273{ 1401{
1274 struct gnttab_copy *gop = vif->tx_copy_ops; 1402 struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
1275 struct sk_buff *skb; 1403 struct sk_buff *skb;
1276 int work_done = 0; 1404 int work_done = 0;
1277 1405
@@ -1280,7 +1408,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
1280 u16 pending_idx; 1408 u16 pending_idx;
1281 unsigned data_len; 1409 unsigned data_len;
1282 1410
1283 pending_idx = *((u16 *)skb->data); 1411 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1284 txp = &vif->pending_tx_info[pending_idx].req; 1412 txp = &vif->pending_tx_info[pending_idx].req;
1285 1413
1286 /* Check the remap error code. */ 1414 /* Check the remap error code. */
@@ -1295,14 +1423,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
1295 memcpy(skb->data, 1423 memcpy(skb->data,
1296 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), 1424 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1297 data_len); 1425 data_len);
1426 callback_param(vif, pending_idx).ctx = NULL;
1298 if (data_len < txp->size) { 1427 if (data_len < txp->size) {
1299 /* Append the packet payload as a fragment. */ 1428 /* Append the packet payload as a fragment. */
1300 txp->offset += data_len; 1429 txp->offset += data_len;
1301 txp->size -= data_len; 1430 txp->size -= data_len;
1431 skb_shinfo(skb)->destructor_arg =
1432 &callback_param(vif, pending_idx);
1302 } else { 1433 } else {
1303 /* Schedule a response immediately. */ 1434 /* Schedule a response immediately. */
1304 xenvif_idx_release(vif, pending_idx, 1435 xenvif_idx_unmap(vif, pending_idx);
1305 XEN_NETIF_RSP_OKAY);
1306 } 1436 }
1307 1437
1308 if (txp->flags & XEN_NETTXF_csum_blank) 1438 if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1312,6 +1442,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
1312 1442
1313 xenvif_fill_frags(vif, skb); 1443 xenvif_fill_frags(vif, skb);
1314 1444
1445 if (unlikely(skb_has_frag_list(skb))) {
1446 if (xenvif_handle_frag_list(vif, skb)) {
1447 if (net_ratelimit())
1448 netdev_err(vif->dev,
1449 "Not enough memory to consolidate frag_list!\n");
1450 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1451 kfree_skb(skb);
1452 continue;
1453 }
1454 }
1455
1315 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { 1456 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1316 int target = min_t(int, skb->len, PKT_PROT_LEN); 1457 int target = min_t(int, skb->len, PKT_PROT_LEN);
1317 __pskb_pull_tail(skb, target - skb_headlen(skb)); 1458 __pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1324,6 +1465,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
1324 if (checksum_setup(vif, skb)) { 1465 if (checksum_setup(vif, skb)) {
1325 netdev_dbg(vif->dev, 1466 netdev_dbg(vif->dev,
1326 "Can't setup checksum in net_tx_action\n"); 1467 "Can't setup checksum in net_tx_action\n");
1468 /* We have to set this flag to trigger the callback */
1469 if (skb_shinfo(skb)->destructor_arg)
1470 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1327 kfree_skb(skb); 1471 kfree_skb(skb);
1328 continue; 1472 continue;
1329 } 1473 }
@@ -1349,17 +1493,126 @@ static int xenvif_tx_submit(struct xenvif *vif)
1349 1493
1350 work_done++; 1494 work_done++;
1351 1495
1496 /* Set this flag right before netif_receive_skb, otherwise
1497 * someone might think this packet already left netback, and
1498 * do a skb_copy_ubufs while we are still in control of the
1499 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1500 */
1501 if (skb_shinfo(skb)->destructor_arg) {
1502 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1503 vif->tx_zerocopy_sent++;
1504 }
1505
1352 netif_receive_skb(skb); 1506 netif_receive_skb(skb);
1353 } 1507 }
1354 1508
1355 return work_done; 1509 return work_done;
1356} 1510}
1357 1511
1512void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1513{
1514 unsigned long flags;
1515 pending_ring_idx_t index;
1516 struct xenvif *vif = ubuf_to_vif(ubuf);
1517
1518 /* This is the only place where we grab this lock, to protect callbacks
1519 * from each other.
1520 */
1521 spin_lock_irqsave(&vif->callback_lock, flags);
1522 do {
1523 u16 pending_idx = ubuf->desc;
1524 ubuf = (struct ubuf_info *) ubuf->ctx;
1525 BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
1526 MAX_PENDING_REQS);
1527 index = pending_index(vif->dealloc_prod);
1528 vif->dealloc_ring[index] = pending_idx;
1529 /* Sync with xenvif_tx_dealloc_action:
1530 * insert idx then incr producer.
1531 */
1532 smp_wmb();
1533 vif->dealloc_prod++;
1534 } while (ubuf);
1535 wake_up(&vif->dealloc_wq);
1536 spin_unlock_irqrestore(&vif->callback_lock, flags);
1537
1538 if (likely(zerocopy_success))
1539 vif->tx_zerocopy_success++;
1540 else
1541 vif->tx_zerocopy_fail++;
1542}
1543
1544static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1545{
1546 struct gnttab_unmap_grant_ref *gop;
1547 pending_ring_idx_t dc, dp;
1548 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1549 unsigned int i = 0;
1550
1551 dc = vif->dealloc_cons;
1552 gop = vif->tx_unmap_ops;
1553
1554 /* Free up any grants we have finished using */
1555 do {
1556 dp = vif->dealloc_prod;
1557
1558 /* Ensure we see all indices enqueued by all
1559 * xenvif_zerocopy_callback().
1560 */
1561 smp_rmb();
1562
1563 while (dc != dp) {
1564 BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
1565 pending_idx =
1566 vif->dealloc_ring[pending_index(dc++)];
1567
1568 pending_idx_release[gop-vif->tx_unmap_ops] =
1569 pending_idx;
1570 vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
1571 vif->mmap_pages[pending_idx];
1572 gnttab_set_unmap_op(gop,
1573 idx_to_kaddr(vif, pending_idx),
1574 GNTMAP_host_map,
1575 vif->grant_tx_handle[pending_idx]);
1576 xenvif_grant_handle_reset(vif, pending_idx);
1577 ++gop;
1578 }
1579
1580 } while (dp != vif->dealloc_prod);
1581
1582 vif->dealloc_cons = dc;
1583
1584 if (gop - vif->tx_unmap_ops > 0) {
1585 int ret;
1586 ret = gnttab_unmap_refs(vif->tx_unmap_ops,
1587 NULL,
1588 vif->pages_to_unmap,
1589 gop - vif->tx_unmap_ops);
1590 if (ret) {
1591 netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1592 gop - vif->tx_unmap_ops, ret);
1593 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
1594 if (gop[i].status != GNTST_okay)
1595 netdev_err(vif->dev,
1596 " host_addr: %llx handle: %x status: %d\n",
1597 gop[i].host_addr,
1598 gop[i].handle,
1599 gop[i].status);
1600 }
1601 BUG();
1602 }
1603 }
1604
1605 for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
1606 xenvif_idx_release(vif, pending_idx_release[i],
1607 XEN_NETIF_RSP_OKAY);
1608}
1609
1610
1358/* Called after netfront has transmitted */ 1611/* Called after netfront has transmitted */
1359int xenvif_tx_action(struct xenvif *vif, int budget) 1612int xenvif_tx_action(struct xenvif *vif, int budget)
1360{ 1613{
1361 unsigned nr_gops; 1614 unsigned nr_gops;
1362 int work_done; 1615 int work_done, ret;
1363 1616
1364 if (unlikely(!tx_work_todo(vif))) 1617 if (unlikely(!tx_work_todo(vif)))
1365 return 0; 1618 return 0;
@@ -1369,7 +1622,11 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
1369 if (nr_gops == 0) 1622 if (nr_gops == 0)
1370 return 0; 1623 return 0;
1371 1624
1372 gnttab_batch_copy(vif->tx_copy_ops, nr_gops); 1625 ret = gnttab_map_refs(vif->tx_map_ops,
1626 NULL,
1627 vif->pages_to_map,
1628 nr_gops);
1629 BUG_ON(ret);
1373 1630
1374 work_done = xenvif_tx_submit(vif); 1631 work_done = xenvif_tx_submit(vif);
1375 1632
@@ -1380,45 +1637,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1380 u8 status) 1637 u8 status)
1381{ 1638{
1382 struct pending_tx_info *pending_tx_info; 1639 struct pending_tx_info *pending_tx_info;
1383 pending_ring_idx_t head; 1640 pending_ring_idx_t index;
1384 u16 peek; /* peek into next tx request */ 1641 unsigned long flags;
1385
1386 BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
1387
1388 /* Already complete? */
1389 if (vif->mmap_pages[pending_idx] == NULL)
1390 return;
1391 1642
1392 pending_tx_info = &vif->pending_tx_info[pending_idx]; 1643 pending_tx_info = &vif->pending_tx_info[pending_idx];
1393 1644 spin_lock_irqsave(&vif->response_lock, flags);
1394 head = pending_tx_info->head; 1645 make_tx_response(vif, &pending_tx_info->req, status);
1395 1646 index = pending_index(vif->pending_prod);
1396 BUG_ON(!pending_tx_is_head(vif, head)); 1647 vif->pending_ring[index] = pending_idx;
1397 BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx); 1648 /* TX shouldn't use the index before we give it back here */
1398 1649 mb();
1399 do { 1650 vif->pending_prod++;
1400 pending_ring_idx_t index; 1651 spin_unlock_irqrestore(&vif->response_lock, flags);
1401 pending_ring_idx_t idx = pending_index(head);
1402 u16 info_idx = vif->pending_ring[idx];
1403
1404 pending_tx_info = &vif->pending_tx_info[info_idx];
1405 make_tx_response(vif, &pending_tx_info->req, status);
1406
1407 /* Setting any number other than
1408 * INVALID_PENDING_RING_IDX indicates this slot is
1409 * starting a new packet / ending a previous packet.
1410 */
1411 pending_tx_info->head = 0;
1412
1413 index = pending_index(vif->pending_prod++);
1414 vif->pending_ring[index] = vif->pending_ring[info_idx];
1415
1416 peek = vif->pending_ring[pending_index(++head)];
1417
1418 } while (!pending_tx_is_head(vif, peek));
1419
1420 put_page(vif->mmap_pages[pending_idx]);
1421 vif->mmap_pages[pending_idx] = NULL;
1422} 1652}
1423 1653
1424 1654
@@ -1466,23 +1696,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1466 return resp; 1696 return resp;
1467} 1697}
1468 1698
1699void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1700{
1701 int ret;
1702 struct gnttab_unmap_grant_ref tx_unmap_op;
1703
1704 gnttab_set_unmap_op(&tx_unmap_op,
1705 idx_to_kaddr(vif, pending_idx),
1706 GNTMAP_host_map,
1707 vif->grant_tx_handle[pending_idx]);
1708 xenvif_grant_handle_reset(vif, pending_idx);
1709
1710 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1711 &vif->mmap_pages[pending_idx], 1);
1712 if (ret) {
1713 netdev_err(vif->dev,
1714 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1715 ret,
1716 pending_idx,
1717 tx_unmap_op.host_addr,
1718 tx_unmap_op.handle,
1719 tx_unmap_op.status);
1720 BUG();
1721 }
1722
1723 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1724}
1725
1469static inline int rx_work_todo(struct xenvif *vif) 1726static inline int rx_work_todo(struct xenvif *vif)
1470{ 1727{
1471 return !skb_queue_empty(&vif->rx_queue) && 1728 return (!skb_queue_empty(&vif->rx_queue) &&
1472 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots); 1729 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
1730 vif->rx_queue_purge;
1473} 1731}
1474 1732
1475static inline int tx_work_todo(struct xenvif *vif) 1733static inline int tx_work_todo(struct xenvif *vif)
1476{ 1734{
1477 1735
1478 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && 1736 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1479 (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1480 < MAX_PENDING_REQS))
1481 return 1; 1737 return 1;
1482 1738
1483 return 0; 1739 return 0;
1484} 1740}
1485 1741
1742static inline bool tx_dealloc_work_todo(struct xenvif *vif)
1743{
1744 return vif->dealloc_cons != vif->dealloc_prod;
1745}
1746
1486void xenvif_unmap_frontend_rings(struct xenvif *vif) 1747void xenvif_unmap_frontend_rings(struct xenvif *vif)
1487{ 1748{
1488 if (vif->tx.sring) 1749 if (vif->tx.sring)
@@ -1540,7 +1801,7 @@ static void xenvif_start_queue(struct xenvif *vif)
1540 netif_wake_queue(vif->dev); 1801 netif_wake_queue(vif->dev);
1541} 1802}
1542 1803
1543int xenvif_kthread(void *data) 1804int xenvif_kthread_guest_rx(void *data)
1544{ 1805{
1545 struct xenvif *vif = data; 1806 struct xenvif *vif = data;
1546 struct sk_buff *skb; 1807 struct sk_buff *skb;
@@ -1548,16 +1809,34 @@ int xenvif_kthread(void *data)
1548 while (!kthread_should_stop()) { 1809 while (!kthread_should_stop()) {
1549 wait_event_interruptible(vif->wq, 1810 wait_event_interruptible(vif->wq,
1550 rx_work_todo(vif) || 1811 rx_work_todo(vif) ||
1812 vif->disabled ||
1551 kthread_should_stop()); 1813 kthread_should_stop());
1814
1815 /* This frontend is found to be rogue, disable it in
1816 * kthread context. Currently this is only set when
1817 * netback finds out frontend sends malformed packet,
1818 * but we cannot disable the interface in softirq
1819 * context so we defer it here.
1820 */
1821 if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
1822 xenvif_carrier_off(vif);
1823
1552 if (kthread_should_stop()) 1824 if (kthread_should_stop())
1553 break; 1825 break;
1554 1826
1827 if (vif->rx_queue_purge) {
1828 skb_queue_purge(&vif->rx_queue);
1829 vif->rx_queue_purge = false;
1830 }
1831
1555 if (!skb_queue_empty(&vif->rx_queue)) 1832 if (!skb_queue_empty(&vif->rx_queue))
1556 xenvif_rx_action(vif); 1833 xenvif_rx_action(vif);
1557 1834
1558 if (skb_queue_empty(&vif->rx_queue) && 1835 if (skb_queue_empty(&vif->rx_queue) &&
1559 netif_queue_stopped(vif->dev)) 1836 netif_queue_stopped(vif->dev)) {
1837 del_timer_sync(&vif->wake_queue);
1560 xenvif_start_queue(vif); 1838 xenvif_start_queue(vif);
1839 }
1561 1840
1562 cond_resched(); 1841 cond_resched();
1563 } 1842 }
@@ -1569,6 +1848,28 @@ int xenvif_kthread(void *data)
1569 return 0; 1848 return 0;
1570} 1849}
1571 1850
1851int xenvif_dealloc_kthread(void *data)
1852{
1853 struct xenvif *vif = data;
1854
1855 while (!kthread_should_stop()) {
1856 wait_event_interruptible(vif->dealloc_wq,
1857 tx_dealloc_work_todo(vif) ||
1858 kthread_should_stop());
1859 if (kthread_should_stop())
1860 break;
1861
1862 xenvif_tx_dealloc_action(vif);
1863 cond_resched();
1864 }
1865
1866 /* Unmap anything remaining*/
1867 if (tx_dealloc_work_todo(vif))
1868 xenvif_tx_dealloc_action(vif);
1869
1870 return 0;
1871}
1872
1572static int __init netback_init(void) 1873static int __init netback_init(void)
1573{ 1874{
1574 int rc = 0; 1875 int rc = 0;
@@ -1586,6 +1887,8 @@ static int __init netback_init(void)
1586 if (rc) 1887 if (rc)
1587 goto failed_init; 1888 goto failed_init;
1588 1889
1890 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
1891
1589 return 0; 1892 return 0;
1590 1893
1591failed_init: 1894failed_init: