diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 62 |
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 48a55cda979b..e29e15dca86e 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -43,6 +43,23 @@ | |||
43 | #define XENVIF_QUEUE_LENGTH 32 | 43 | #define XENVIF_QUEUE_LENGTH 32 |
44 | #define XENVIF_NAPI_WEIGHT 64 | 44 | #define XENVIF_NAPI_WEIGHT 64 |
45 | 45 | ||
46 | /* This function is used to set SKBTX_DEV_ZEROCOPY as well as | ||
47 | * increasing the inflight counter. We need to increase the inflight | ||
48 | * counter because core driver calls into xenvif_zerocopy_callback | ||
49 | * which calls xenvif_skb_zerocopy_complete. | ||
50 | */ | ||
51 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, | ||
52 | struct sk_buff *skb) | ||
53 | { | ||
54 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | ||
55 | atomic_inc(&queue->inflight_packets); | ||
56 | } | ||
57 | |||
58 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) | ||
59 | { | ||
60 | atomic_dec(&queue->inflight_packets); | ||
61 | } | ||
62 | |||
46 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) | 63 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) |
47 | { | 64 | { |
48 | struct net_device *dev = queue->vif->dev; | 65 | struct net_device *dev = queue->vif->dev; |
@@ -78,12 +95,8 @@ int xenvif_poll(struct napi_struct *napi, int budget) | |||
78 | /* This vif is rogue, we pretend we've there is nothing to do | 95 | /* This vif is rogue, we pretend we've there is nothing to do |
79 | * for this vif to deschedule it from NAPI. But this interface | 96 | * for this vif to deschedule it from NAPI. But this interface |
80 | * will be turned off in thread context later. | 97 | * will be turned off in thread context later. |
81 | * Also, if a guest doesn't post enough slots to receive data on one of | ||
82 | * its queues, the carrier goes down and NAPI is descheduled here so | ||
83 | * the guest can't send more packets until it's ready to receive. | ||
84 | */ | 98 | */ |
85 | if (unlikely(queue->vif->disabled || | 99 | if (unlikely(queue->vif->disabled)) { |
86 | !netif_carrier_ok(queue->vif->dev))) { | ||
87 | napi_complete(napi); | 100 | napi_complete(napi); |
88 | return 0; | 101 | return 0; |
89 | } | 102 | } |
@@ -528,9 +541,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
528 | 541 | ||
529 | init_timer(&queue->rx_stalled); | 542 | init_timer(&queue->rx_stalled); |
530 | 543 | ||
531 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
532 | XENVIF_NAPI_WEIGHT); | ||
533 | |||
534 | return 0; | 544 | return 0; |
535 | } | 545 | } |
536 | 546 | ||
@@ -564,6 +574,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
564 | 574 | ||
565 | init_waitqueue_head(&queue->wq); | 575 | init_waitqueue_head(&queue->wq); |
566 | init_waitqueue_head(&queue->dealloc_wq); | 576 | init_waitqueue_head(&queue->dealloc_wq); |
577 | atomic_set(&queue->inflight_packets, 0); | ||
567 | 578 | ||
568 | if (tx_evtchn == rx_evtchn) { | 579 | if (tx_evtchn == rx_evtchn) { |
569 | /* feature-split-event-channels == 0 */ | 580 | /* feature-split-event-channels == 0 */ |
@@ -618,6 +629,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
618 | wake_up_process(queue->task); | 629 | wake_up_process(queue->task); |
619 | wake_up_process(queue->dealloc_task); | 630 | wake_up_process(queue->dealloc_task); |
620 | 631 | ||
632 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
633 | XENVIF_NAPI_WEIGHT); | ||
634 | |||
621 | return 0; | 635 | return 0; |
622 | 636 | ||
623 | err_rx_unbind: | 637 | err_rx_unbind: |
@@ -646,25 +660,6 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
646 | rtnl_unlock(); | 660 | rtnl_unlock(); |
647 | } | 661 | } |
648 | 662 | ||
649 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, | ||
650 | unsigned int worst_case_skb_lifetime) | ||
651 | { | ||
652 | int i, unmap_timeout = 0; | ||
653 | |||
654 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | ||
655 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | ||
656 | unmap_timeout++; | ||
657 | schedule_timeout(msecs_to_jiffies(1000)); | ||
658 | if (unmap_timeout > worst_case_skb_lifetime && | ||
659 | net_ratelimit()) | ||
660 | netdev_err(queue->vif->dev, | ||
661 | "Page still granted! Index: %x\n", | ||
662 | i); | ||
663 | i = -1; | ||
664 | } | ||
665 | } | ||
666 | } | ||
667 | |||
668 | void xenvif_disconnect(struct xenvif *vif) | 663 | void xenvif_disconnect(struct xenvif *vif) |
669 | { | 664 | { |
670 | struct xenvif_queue *queue = NULL; | 665 | struct xenvif_queue *queue = NULL; |
@@ -676,6 +671,8 @@ void xenvif_disconnect(struct xenvif *vif) | |||
676 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 671 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
677 | queue = &vif->queues[queue_index]; | 672 | queue = &vif->queues[queue_index]; |
678 | 673 | ||
674 | netif_napi_del(&queue->napi); | ||
675 | |||
679 | if (queue->task) { | 676 | if (queue->task) { |
680 | del_timer_sync(&queue->rx_stalled); | 677 | del_timer_sync(&queue->rx_stalled); |
681 | kthread_stop(queue->task); | 678 | kthread_stop(queue->task); |
@@ -708,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif) | |||
708 | void xenvif_deinit_queue(struct xenvif_queue *queue) | 705 | void xenvif_deinit_queue(struct xenvif_queue *queue) |
709 | { | 706 | { |
710 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | 707 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); |
711 | netif_napi_del(&queue->napi); | ||
712 | } | 708 | } |
713 | 709 | ||
714 | void xenvif_free(struct xenvif *vif) | 710 | void xenvif_free(struct xenvif *vif) |
@@ -716,21 +712,11 @@ void xenvif_free(struct xenvif *vif) | |||
716 | struct xenvif_queue *queue = NULL; | 712 | struct xenvif_queue *queue = NULL; |
717 | unsigned int num_queues = vif->num_queues; | 713 | unsigned int num_queues = vif->num_queues; |
718 | unsigned int queue_index; | 714 | unsigned int queue_index; |
719 | /* Here we want to avoid timeout messages if an skb can be legitimately | ||
720 | * stuck somewhere else. Realistically this could be an another vif's | ||
721 | * internal or QDisc queue. That another vif also has this | ||
722 | * rx_drain_timeout_msecs timeout, so give it time to drain out. | ||
723 | * Although if that other guest wakes up just before its timeout happens | ||
724 | * and takes only one skb from QDisc, it can hold onto other skbs for a | ||
725 | * longer period. | ||
726 | */ | ||
727 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000); | ||
728 | 715 | ||
729 | unregister_netdev(vif->dev); | 716 | unregister_netdev(vif->dev); |
730 | 717 | ||
731 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 718 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
732 | queue = &vif->queues[queue_index]; | 719 | queue = &vif->queues[queue_index]; |
733 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); | ||
734 | xenvif_deinit_queue(queue); | 720 | xenvif_deinit_queue(queue); |
735 | } | 721 | } |
736 | 722 | ||