diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 114 |
1 files changed, 55 insertions, 59 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 9e97c7ca0ddd..f379689dde30 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -43,6 +43,23 @@ | |||
43 | #define XENVIF_QUEUE_LENGTH 32 | 43 | #define XENVIF_QUEUE_LENGTH 32 |
44 | #define XENVIF_NAPI_WEIGHT 64 | 44 | #define XENVIF_NAPI_WEIGHT 64 |
45 | 45 | ||
46 | /* This function is used to set SKBTX_DEV_ZEROCOPY as well as | ||
47 | * increasing the inflight counter. We need to increase the inflight | ||
48 | * counter because core driver calls into xenvif_zerocopy_callback | ||
49 | * which calls xenvif_skb_zerocopy_complete. | ||
50 | */ | ||
51 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, | ||
52 | struct sk_buff *skb) | ||
53 | { | ||
54 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | ||
55 | atomic_inc(&queue->inflight_packets); | ||
56 | } | ||
57 | |||
58 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) | ||
59 | { | ||
60 | atomic_dec(&queue->inflight_packets); | ||
61 | } | ||
62 | |||
46 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) | 63 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) |
47 | { | 64 | { |
48 | struct net_device *dev = queue->vif->dev; | 65 | struct net_device *dev = queue->vif->dev; |
@@ -55,7 +72,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue) | |||
55 | 72 | ||
56 | int xenvif_schedulable(struct xenvif *vif) | 73 | int xenvif_schedulable(struct xenvif *vif) |
57 | { | 74 | { |
58 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 75 | return netif_running(vif->dev) && |
76 | test_bit(VIF_STATUS_CONNECTED, &vif->status); | ||
59 | } | 77 | } |
60 | 78 | ||
61 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 79 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
@@ -96,13 +114,22 @@ int xenvif_poll(struct napi_struct *napi, int budget) | |||
96 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | 114 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
97 | { | 115 | { |
98 | struct xenvif_queue *queue = dev_id; | 116 | struct xenvif_queue *queue = dev_id; |
117 | struct netdev_queue *net_queue = | ||
118 | netdev_get_tx_queue(queue->vif->dev, queue->id); | ||
99 | 119 | ||
120 | /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR | ||
121 | * the carrier went down and this queue was previously blocked | ||
122 | */ | ||
123 | if (unlikely(netif_tx_queue_stopped(net_queue) || | ||
124 | (!netif_carrier_ok(queue->vif->dev) && | ||
125 | test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) | ||
126 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); | ||
100 | xenvif_kick_thread(queue); | 127 | xenvif_kick_thread(queue); |
101 | 128 | ||
102 | return IRQ_HANDLED; | 129 | return IRQ_HANDLED; |
103 | } | 130 | } |
104 | 131 | ||
105 | static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | 132 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
106 | { | 133 | { |
107 | xenvif_tx_interrupt(irq, dev_id); | 134 | xenvif_tx_interrupt(irq, dev_id); |
108 | xenvif_rx_interrupt(irq, dev_id); | 135 | xenvif_rx_interrupt(irq, dev_id); |
@@ -124,16 +151,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue) | |||
124 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | 151 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); |
125 | } | 152 | } |
126 | 153 | ||
127 | /* Callback to wake the queue and drain it on timeout */ | 154 | /* Callback to wake the queue's thread and turn the carrier off on timeout */ |
128 | static void xenvif_wake_queue_callback(unsigned long data) | 155 | static void xenvif_rx_stalled(unsigned long data) |
129 | { | 156 | { |
130 | struct xenvif_queue *queue = (struct xenvif_queue *)data; | 157 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
131 | 158 | ||
132 | if (xenvif_queue_stopped(queue)) { | 159 | if (xenvif_queue_stopped(queue)) { |
133 | netdev_err(queue->vif->dev, "draining TX queue\n"); | 160 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); |
134 | queue->rx_queue_purge = true; | ||
135 | xenvif_kick_thread(queue); | 161 | xenvif_kick_thread(queue); |
136 | xenvif_wake_queue(queue); | ||
137 | } | 162 | } |
138 | } | 163 | } |
139 | 164 | ||
@@ -182,11 +207,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
182 | * drain. | 207 | * drain. |
183 | */ | 208 | */ |
184 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { | 209 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
185 | queue->wake_queue.function = xenvif_wake_queue_callback; | 210 | queue->rx_stalled.function = xenvif_rx_stalled; |
186 | queue->wake_queue.data = (unsigned long)queue; | 211 | queue->rx_stalled.data = (unsigned long)queue; |
187 | xenvif_stop_queue(queue); | 212 | xenvif_stop_queue(queue); |
188 | mod_timer(&queue->wake_queue, | 213 | mod_timer(&queue->rx_stalled, |
189 | jiffies + rx_drain_timeout_jiffies); | 214 | jiffies + rx_drain_timeout_jiffies); |
190 | } | 215 | } |
191 | 216 | ||
192 | skb_queue_tail(&queue->rx_queue, skb); | 217 | skb_queue_tail(&queue->rx_queue, skb); |
@@ -267,7 +292,7 @@ static void xenvif_down(struct xenvif *vif) | |||
267 | static int xenvif_open(struct net_device *dev) | 292 | static int xenvif_open(struct net_device *dev) |
268 | { | 293 | { |
269 | struct xenvif *vif = netdev_priv(dev); | 294 | struct xenvif *vif = netdev_priv(dev); |
270 | if (netif_carrier_ok(dev)) | 295 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
271 | xenvif_up(vif); | 296 | xenvif_up(vif); |
272 | netif_tx_start_all_queues(dev); | 297 | netif_tx_start_all_queues(dev); |
273 | return 0; | 298 | return 0; |
@@ -276,7 +301,7 @@ static int xenvif_open(struct net_device *dev) | |||
276 | static int xenvif_close(struct net_device *dev) | 301 | static int xenvif_close(struct net_device *dev) |
277 | { | 302 | { |
278 | struct xenvif *vif = netdev_priv(dev); | 303 | struct xenvif *vif = netdev_priv(dev); |
279 | if (netif_carrier_ok(dev)) | 304 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
280 | xenvif_down(vif); | 305 | xenvif_down(vif); |
281 | netif_tx_stop_all_queues(dev); | 306 | netif_tx_stop_all_queues(dev); |
282 | return 0; | 307 | return 0; |
@@ -418,8 +443,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
418 | * When the guest selects the desired number, it will be updated | 443 | * When the guest selects the desired number, it will be updated |
419 | * via netif_set_real_num_*_queues(). | 444 | * via netif_set_real_num_*_queues(). |
420 | */ | 445 | */ |
421 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, | 446 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, |
422 | xenvif_max_queues); | 447 | ether_setup, xenvif_max_queues); |
423 | if (dev == NULL) { | 448 | if (dev == NULL) { |
424 | pr_warn("Could not allocate netdev for %s\n", name); | 449 | pr_warn("Could not allocate netdev for %s\n", name); |
425 | return ERR_PTR(-ENOMEM); | 450 | return ERR_PTR(-ENOMEM); |
@@ -514,10 +539,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
514 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | 539 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
515 | } | 540 | } |
516 | 541 | ||
517 | init_timer(&queue->wake_queue); | 542 | init_timer(&queue->rx_stalled); |
518 | |||
519 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
520 | XENVIF_NAPI_WEIGHT); | ||
521 | 543 | ||
522 | return 0; | 544 | return 0; |
523 | } | 545 | } |
@@ -528,6 +550,7 @@ void xenvif_carrier_on(struct xenvif *vif) | |||
528 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 550 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
529 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 551 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
530 | netdev_update_features(vif->dev); | 552 | netdev_update_features(vif->dev); |
553 | set_bit(VIF_STATUS_CONNECTED, &vif->status); | ||
531 | netif_carrier_on(vif->dev); | 554 | netif_carrier_on(vif->dev); |
532 | if (netif_running(vif->dev)) | 555 | if (netif_running(vif->dev)) |
533 | xenvif_up(vif); | 556 | xenvif_up(vif); |
@@ -551,6 +574,10 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
551 | 574 | ||
552 | init_waitqueue_head(&queue->wq); | 575 | init_waitqueue_head(&queue->wq); |
553 | init_waitqueue_head(&queue->dealloc_wq); | 576 | init_waitqueue_head(&queue->dealloc_wq); |
577 | atomic_set(&queue->inflight_packets, 0); | ||
578 | |||
579 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
580 | XENVIF_NAPI_WEIGHT); | ||
554 | 581 | ||
555 | if (tx_evtchn == rx_evtchn) { | 582 | if (tx_evtchn == rx_evtchn) { |
556 | /* feature-split-event-channels == 0 */ | 583 | /* feature-split-event-channels == 0 */ |
@@ -625,29 +652,12 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
625 | struct net_device *dev = vif->dev; | 652 | struct net_device *dev = vif->dev; |
626 | 653 | ||
627 | rtnl_lock(); | 654 | rtnl_lock(); |
628 | netif_carrier_off(dev); /* discard queued packets */ | 655 | if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { |
629 | if (netif_running(dev)) | 656 | netif_carrier_off(dev); /* discard queued packets */ |
630 | xenvif_down(vif); | 657 | if (netif_running(dev)) |
631 | rtnl_unlock(); | 658 | xenvif_down(vif); |
632 | } | ||
633 | |||
634 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, | ||
635 | unsigned int worst_case_skb_lifetime) | ||
636 | { | ||
637 | int i, unmap_timeout = 0; | ||
638 | |||
639 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | ||
640 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | ||
641 | unmap_timeout++; | ||
642 | schedule_timeout(msecs_to_jiffies(1000)); | ||
643 | if (unmap_timeout > worst_case_skb_lifetime && | ||
644 | net_ratelimit()) | ||
645 | netdev_err(queue->vif->dev, | ||
646 | "Page still granted! Index: %x\n", | ||
647 | i); | ||
648 | i = -1; | ||
649 | } | ||
650 | } | 659 | } |
660 | rtnl_unlock(); | ||
651 | } | 661 | } |
652 | 662 | ||
653 | void xenvif_disconnect(struct xenvif *vif) | 663 | void xenvif_disconnect(struct xenvif *vif) |
@@ -656,14 +666,15 @@ void xenvif_disconnect(struct xenvif *vif) | |||
656 | unsigned int num_queues = vif->num_queues; | 666 | unsigned int num_queues = vif->num_queues; |
657 | unsigned int queue_index; | 667 | unsigned int queue_index; |
658 | 668 | ||
659 | if (netif_carrier_ok(vif->dev)) | 669 | xenvif_carrier_off(vif); |
660 | xenvif_carrier_off(vif); | ||
661 | 670 | ||
662 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 671 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
663 | queue = &vif->queues[queue_index]; | 672 | queue = &vif->queues[queue_index]; |
664 | 673 | ||
674 | netif_napi_del(&queue->napi); | ||
675 | |||
665 | if (queue->task) { | 676 | if (queue->task) { |
666 | del_timer_sync(&queue->wake_queue); | 677 | del_timer_sync(&queue->rx_stalled); |
667 | kthread_stop(queue->task); | 678 | kthread_stop(queue->task); |
668 | queue->task = NULL; | 679 | queue->task = NULL; |
669 | } | 680 | } |
@@ -694,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif) | |||
694 | void xenvif_deinit_queue(struct xenvif_queue *queue) | 705 | void xenvif_deinit_queue(struct xenvif_queue *queue) |
695 | { | 706 | { |
696 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | 707 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); |
697 | netif_napi_del(&queue->napi); | ||
698 | } | 708 | } |
699 | 709 | ||
700 | void xenvif_free(struct xenvif *vif) | 710 | void xenvif_free(struct xenvif *vif) |
@@ -702,25 +712,11 @@ void xenvif_free(struct xenvif *vif) | |||
702 | struct xenvif_queue *queue = NULL; | 712 | struct xenvif_queue *queue = NULL; |
703 | unsigned int num_queues = vif->num_queues; | 713 | unsigned int num_queues = vif->num_queues; |
704 | unsigned int queue_index; | 714 | unsigned int queue_index; |
705 | /* Here we want to avoid timeout messages if an skb can be legitimately | ||
706 | * stuck somewhere else. Realistically this could be an another vif's | ||
707 | * internal or QDisc queue. That another vif also has this | ||
708 | * rx_drain_timeout_msecs timeout, but the timer only ditches the | ||
709 | * internal queue. After that, the QDisc queue can put in worst case | ||
710 | * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's | ||
711 | * internal queue, so we need several rounds of such timeouts until we | ||
712 | * can be sure that no another vif should have skb's from us. We are | ||
713 | * not sending more skb's, so newly stuck packets are not interesting | ||
714 | * for us here. | ||
715 | */ | ||
716 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | ||
717 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | ||
718 | 715 | ||
719 | unregister_netdev(vif->dev); | 716 | unregister_netdev(vif->dev); |
720 | 717 | ||
721 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 718 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
722 | queue = &vif->queues[queue_index]; | 719 | queue = &vif->queues[queue_index]; |
723 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); | ||
724 | xenvif_deinit_queue(queue); | 720 | xenvif_deinit_queue(queue); |
725 | } | 721 | } |
726 | 722 | ||