aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c56
1 files changed, 23 insertions, 33 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index bfd10cb9c8de..e29e15dca86e 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,23 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
47 * increasing the inflight counter. We need to increase the inflight
48 * counter because core driver calls into xenvif_zerocopy_callback
49 * which calls xenvif_skb_zerocopy_complete.
50 */
51void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
52 struct sk_buff *skb)
53{
54 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
55 atomic_inc(&queue->inflight_packets);
56}
57
58void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
59{
60 atomic_dec(&queue->inflight_packets);
61}
62
46static inline void xenvif_stop_queue(struct xenvif_queue *queue) 63static inline void xenvif_stop_queue(struct xenvif_queue *queue)
47{ 64{
48 struct net_device *dev = queue->vif->dev; 65 struct net_device *dev = queue->vif->dev;
@@ -524,9 +541,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
524 541
525 init_timer(&queue->rx_stalled); 542 init_timer(&queue->rx_stalled);
526 543
527 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
528 XENVIF_NAPI_WEIGHT);
529
530 return 0; 544 return 0;
531} 545}
532 546
@@ -560,6 +574,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
560 574
561 init_waitqueue_head(&queue->wq); 575 init_waitqueue_head(&queue->wq);
562 init_waitqueue_head(&queue->dealloc_wq); 576 init_waitqueue_head(&queue->dealloc_wq);
577 atomic_set(&queue->inflight_packets, 0);
563 578
564 if (tx_evtchn == rx_evtchn) { 579 if (tx_evtchn == rx_evtchn) {
565 /* feature-split-event-channels == 0 */ 580 /* feature-split-event-channels == 0 */
@@ -614,6 +629,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
614 wake_up_process(queue->task); 629 wake_up_process(queue->task);
615 wake_up_process(queue->dealloc_task); 630 wake_up_process(queue->dealloc_task);
616 631
632 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
633 XENVIF_NAPI_WEIGHT);
634
617 return 0; 635 return 0;
618 636
619err_rx_unbind: 637err_rx_unbind:
@@ -642,25 +660,6 @@ void xenvif_carrier_off(struct xenvif *vif)
642 rtnl_unlock(); 660 rtnl_unlock();
643} 661}
644 662
645static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
646 unsigned int worst_case_skb_lifetime)
647{
648 int i, unmap_timeout = 0;
649
650 for (i = 0; i < MAX_PENDING_REQS; ++i) {
651 if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
652 unmap_timeout++;
653 schedule_timeout(msecs_to_jiffies(1000));
654 if (unmap_timeout > worst_case_skb_lifetime &&
655 net_ratelimit())
656 netdev_err(queue->vif->dev,
657 "Page still granted! Index: %x\n",
658 i);
659 i = -1;
660 }
661 }
662}
663
664void xenvif_disconnect(struct xenvif *vif) 663void xenvif_disconnect(struct xenvif *vif)
665{ 664{
666 struct xenvif_queue *queue = NULL; 665 struct xenvif_queue *queue = NULL;
@@ -672,6 +671,8 @@ void xenvif_disconnect(struct xenvif *vif)
672 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 671 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
673 queue = &vif->queues[queue_index]; 672 queue = &vif->queues[queue_index];
674 673
674 netif_napi_del(&queue->napi);
675
675 if (queue->task) { 676 if (queue->task) {
676 del_timer_sync(&queue->rx_stalled); 677 del_timer_sync(&queue->rx_stalled);
677 kthread_stop(queue->task); 678 kthread_stop(queue->task);
@@ -704,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif)
704void xenvif_deinit_queue(struct xenvif_queue *queue) 705void xenvif_deinit_queue(struct xenvif_queue *queue)
705{ 706{
706 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); 707 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
707 netif_napi_del(&queue->napi);
708} 708}
709 709
710void xenvif_free(struct xenvif *vif) 710void xenvif_free(struct xenvif *vif)
@@ -712,21 +712,11 @@ void xenvif_free(struct xenvif *vif)
712 struct xenvif_queue *queue = NULL; 712 struct xenvif_queue *queue = NULL;
713 unsigned int num_queues = vif->num_queues; 713 unsigned int num_queues = vif->num_queues;
714 unsigned int queue_index; 714 unsigned int queue_index;
715 /* Here we want to avoid timeout messages if an skb can be legitimately
716 * stuck somewhere else. Realistically this could be an another vif's
717 * internal or QDisc queue. That another vif also has this
718 * rx_drain_timeout_msecs timeout, so give it time to drain out.
719 * Although if that other guest wakes up just before its timeout happens
720 * and takes only one skb from QDisc, it can hold onto other skbs for a
721 * longer period.
722 */
723 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
724 715
725 unregister_netdev(vif->dev); 716 unregister_netdev(vif->dev);
726 717
727 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 718 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
728 queue = &vif->queues[queue_index]; 719 queue = &vif->queues[queue_index];
729 xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
730 xenvif_deinit_queue(queue); 720 xenvif_deinit_queue(queue);
731 } 721 }
732 722