aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-06 16:48:26 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-07 15:56:35 -0500
commitf53c3fe8dad725b014e9c7682720d8e3e2a8a5b3 (patch)
tree11cb77466fbb32cd1ca6f84a5ea0daca233a49c0 /drivers/net/xen-netback/interface.c
parent3e2234b3149f66bc4be2343a3a0f637d922e4a36 (diff)
xen-netback: Introduce TX grant mapping
This patch introduces grant mapping on netback TX path. It replaces grant copy operations, ditching grant copy coalescing along the way. Another solution for copy coalescing is introduced in "xen-netback: Handle guests with too many frags", older guests and Windows can broke before that patch applies. There is a callback (xenvif_zerocopy_callback) from core stack to release the slots back to the guests when kfree_skb or skb_orphan_frags called. It feeds a separate dealloc thread, as scheduling NAPI instance from there is inefficient, therefore we can't do dealloc from the instance. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c65
1 files changed, 61 insertions, 4 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index bc32627a22cb..1fe9fe523cc8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -38,6 +38,7 @@
38 38
39#include <xen/events.h> 39#include <xen/events.h>
40#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
41#include <xen/balloon.h>
41 42
42#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
43#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
@@ -87,7 +88,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
87 local_irq_save(flags); 88 local_irq_save(flags);
88 89
89 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); 90 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
90 if (!more_to_do) 91 if (!(more_to_do &&
92 xenvif_tx_pending_slots_available(vif)))
91 __napi_complete(napi); 93 __napi_complete(napi);
92 94
93 local_irq_restore(flags); 95 local_irq_restore(flags);
@@ -121,7 +123,9 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
121 BUG_ON(skb->dev != dev); 123 BUG_ON(skb->dev != dev);
122 124
123 /* Drop the packet if vif is not ready */ 125 /* Drop the packet if vif is not ready */
124 if (vif->task == NULL || !xenvif_schedulable(vif)) 126 if (vif->task == NULL ||
127 vif->dealloc_task == NULL ||
128 !xenvif_schedulable(vif))
125 goto drop; 129 goto drop;
126 130
127 /* At best we'll need one slot for the header and one for each 131 /* At best we'll need one slot for the header and one for each
@@ -343,8 +347,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
343 vif->pending_prod = MAX_PENDING_REQS; 347 vif->pending_prod = MAX_PENDING_REQS;
344 for (i = 0; i < MAX_PENDING_REQS; i++) 348 for (i = 0; i < MAX_PENDING_REQS; i++)
345 vif->pending_ring[i] = i; 349 vif->pending_ring[i] = i;
346 for (i = 0; i < MAX_PENDING_REQS; i++) 350 spin_lock_init(&vif->callback_lock);
347 vif->mmap_pages[i] = NULL; 351 spin_lock_init(&vif->response_lock);
352 /* If ballooning is disabled, this will consume real memory, so you
353 * better enable it. The long term solution would be to use just a
354 * bunch of valid page descriptors, without dependency on ballooning
355 */
356 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
357 vif->mmap_pages,
358 false);
359 if (err) {
360 netdev_err(dev, "Could not reserve mmap_pages\n");
361 return ERR_PTR(-ENOMEM);
362 }
363 for (i = 0; i < MAX_PENDING_REQS; i++) {
364 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
365 { .callback = xenvif_zerocopy_callback,
366 .ctx = NULL,
367 .desc = i };
368 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
369 }
348 370
349 /* 371 /*
350 * Initialise a dummy MAC address. We choose the numerically 372 * Initialise a dummy MAC address. We choose the numerically
@@ -382,12 +404,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
382 404
383 BUG_ON(vif->tx_irq); 405 BUG_ON(vif->tx_irq);
384 BUG_ON(vif->task); 406 BUG_ON(vif->task);
407 BUG_ON(vif->dealloc_task);
385 408
386 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 409 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
387 if (err < 0) 410 if (err < 0)
388 goto err; 411 goto err;
389 412
390 init_waitqueue_head(&vif->wq); 413 init_waitqueue_head(&vif->wq);
414 init_waitqueue_head(&vif->dealloc_wq);
391 415
392 if (tx_evtchn == rx_evtchn) { 416 if (tx_evtchn == rx_evtchn) {
393 /* feature-split-event-channels == 0 */ 417 /* feature-split-event-channels == 0 */
@@ -431,6 +455,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
431 455
432 vif->task = task; 456 vif->task = task;
433 457
458 task = kthread_create(xenvif_dealloc_kthread,
459 (void *)vif, "%s-dealloc", vif->dev->name);
460 if (IS_ERR(task)) {
461 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
462 err = PTR_ERR(task);
463 goto err_rx_unbind;
464 }
465
466 vif->dealloc_task = task;
467
434 rtnl_lock(); 468 rtnl_lock();
435 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 469 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
436 dev_set_mtu(vif->dev, ETH_DATA_LEN); 470 dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -441,6 +475,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
441 rtnl_unlock(); 475 rtnl_unlock();
442 476
443 wake_up_process(vif->task); 477 wake_up_process(vif->task);
478 wake_up_process(vif->dealloc_task);
444 479
445 return 0; 480 return 0;
446 481
@@ -478,6 +513,11 @@ void xenvif_disconnect(struct xenvif *vif)
478 vif->task = NULL; 513 vif->task = NULL;
479 } 514 }
480 515
516 if (vif->dealloc_task) {
517 kthread_stop(vif->dealloc_task);
518 vif->dealloc_task = NULL;
519 }
520
481 if (vif->tx_irq) { 521 if (vif->tx_irq) {
482 if (vif->tx_irq == vif->rx_irq) 522 if (vif->tx_irq == vif->rx_irq)
483 unbind_from_irqhandler(vif->tx_irq, vif); 523 unbind_from_irqhandler(vif->tx_irq, vif);
@@ -493,6 +533,23 @@ void xenvif_disconnect(struct xenvif *vif)
493 533
494void xenvif_free(struct xenvif *vif) 534void xenvif_free(struct xenvif *vif)
495{ 535{
536 int i, unmap_timeout = 0;
537
538 for (i = 0; i < MAX_PENDING_REQS; ++i) {
539 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
540 unmap_timeout++;
541 schedule_timeout(msecs_to_jiffies(1000));
542 if (unmap_timeout > 9 &&
543 net_ratelimit())
544 netdev_err(vif->dev,
545 "Page still granted! Index: %x\n",
546 i);
547 i = -1;
548 }
549 }
550
551 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
552
496 netif_napi_del(&vif->napi); 553 netif_napi_del(&vif->napi);
497 554
498 unregister_netdev(vif->dev); 555 unregister_netdev(vif->dev);