aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c68
1 files changed, 37 insertions, 31 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9e97c7ca0ddd..bfd10cb9c8de 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue)
55 55
56int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
57{ 57{
58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) &&
59 test_bit(VIF_STATUS_CONNECTED, &vif->status);
59} 60}
60 61
61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 62static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -96,13 +97,22 @@ int xenvif_poll(struct napi_struct *napi, int budget)
96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 97static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
97{ 98{
98 struct xenvif_queue *queue = dev_id; 99 struct xenvif_queue *queue = dev_id;
100 struct netdev_queue *net_queue =
101 netdev_get_tx_queue(queue->vif->dev, queue->id);
99 102
103 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
104 * the carrier went down and this queue was previously blocked
105 */
106 if (unlikely(netif_tx_queue_stopped(net_queue) ||
107 (!netif_carrier_ok(queue->vif->dev) &&
108 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
109 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
100 xenvif_kick_thread(queue); 110 xenvif_kick_thread(queue);
101 111
102 return IRQ_HANDLED; 112 return IRQ_HANDLED;
103} 113}
104 114
105static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 115irqreturn_t xenvif_interrupt(int irq, void *dev_id)
106{ 116{
107 xenvif_tx_interrupt(irq, dev_id); 117 xenvif_tx_interrupt(irq, dev_id);
108 xenvif_rx_interrupt(irq, dev_id); 118 xenvif_rx_interrupt(irq, dev_id);
@@ -124,16 +134,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 134 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125} 135}
126 136
127/* Callback to wake the queue and drain it on timeout */ 137/* Callback to wake the queue's thread and turn the carrier off on timeout */
128static void xenvif_wake_queue_callback(unsigned long data) 138static void xenvif_rx_stalled(unsigned long data)
129{ 139{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data; 140 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131 141
132 if (xenvif_queue_stopped(queue)) { 142 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n"); 143 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue); 144 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
137 } 145 }
138} 146}
139 147
@@ -182,11 +190,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
182 * drain. 190 * drain.
183 */ 191 */
184 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { 192 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
185 queue->wake_queue.function = xenvif_wake_queue_callback; 193 queue->rx_stalled.function = xenvif_rx_stalled;
186 queue->wake_queue.data = (unsigned long)queue; 194 queue->rx_stalled.data = (unsigned long)queue;
187 xenvif_stop_queue(queue); 195 xenvif_stop_queue(queue);
188 mod_timer(&queue->wake_queue, 196 mod_timer(&queue->rx_stalled,
189 jiffies + rx_drain_timeout_jiffies); 197 jiffies + rx_drain_timeout_jiffies);
190 } 198 }
191 199
192 skb_queue_tail(&queue->rx_queue, skb); 200 skb_queue_tail(&queue->rx_queue, skb);
@@ -267,7 +275,7 @@ static void xenvif_down(struct xenvif *vif)
267static int xenvif_open(struct net_device *dev) 275static int xenvif_open(struct net_device *dev)
268{ 276{
269 struct xenvif *vif = netdev_priv(dev); 277 struct xenvif *vif = netdev_priv(dev);
270 if (netif_carrier_ok(dev)) 278 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
271 xenvif_up(vif); 279 xenvif_up(vif);
272 netif_tx_start_all_queues(dev); 280 netif_tx_start_all_queues(dev);
273 return 0; 281 return 0;
@@ -276,7 +284,7 @@ static int xenvif_open(struct net_device *dev)
276static int xenvif_close(struct net_device *dev) 284static int xenvif_close(struct net_device *dev)
277{ 285{
278 struct xenvif *vif = netdev_priv(dev); 286 struct xenvif *vif = netdev_priv(dev);
279 if (netif_carrier_ok(dev)) 287 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
280 xenvif_down(vif); 288 xenvif_down(vif);
281 netif_tx_stop_all_queues(dev); 289 netif_tx_stop_all_queues(dev);
282 return 0; 290 return 0;
@@ -418,8 +426,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
418 * When the guest selects the desired number, it will be updated 426 * When the guest selects the desired number, it will be updated
419 * via netif_set_real_num_*_queues(). 427 * via netif_set_real_num_*_queues().
420 */ 428 */
421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 429 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
422 xenvif_max_queues); 430 ether_setup, xenvif_max_queues);
423 if (dev == NULL) { 431 if (dev == NULL) {
424 pr_warn("Could not allocate netdev for %s\n", name); 432 pr_warn("Could not allocate netdev for %s\n", name);
425 return ERR_PTR(-ENOMEM); 433 return ERR_PTR(-ENOMEM);
@@ -514,7 +522,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
514 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 522 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
515 } 523 }
516 524
517 init_timer(&queue->wake_queue); 525 init_timer(&queue->rx_stalled);
518 526
519 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, 527 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
520 XENVIF_NAPI_WEIGHT); 528 XENVIF_NAPI_WEIGHT);
@@ -528,6 +536,7 @@ void xenvif_carrier_on(struct xenvif *vif)
528 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 536 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
529 dev_set_mtu(vif->dev, ETH_DATA_LEN); 537 dev_set_mtu(vif->dev, ETH_DATA_LEN);
530 netdev_update_features(vif->dev); 538 netdev_update_features(vif->dev);
539 set_bit(VIF_STATUS_CONNECTED, &vif->status);
531 netif_carrier_on(vif->dev); 540 netif_carrier_on(vif->dev);
532 if (netif_running(vif->dev)) 541 if (netif_running(vif->dev))
533 xenvif_up(vif); 542 xenvif_up(vif);
@@ -625,9 +634,11 @@ void xenvif_carrier_off(struct xenvif *vif)
625 struct net_device *dev = vif->dev; 634 struct net_device *dev = vif->dev;
626 635
627 rtnl_lock(); 636 rtnl_lock();
628 netif_carrier_off(dev); /* discard queued packets */ 637 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
629 if (netif_running(dev)) 638 netif_carrier_off(dev); /* discard queued packets */
630 xenvif_down(vif); 639 if (netif_running(dev))
640 xenvif_down(vif);
641 }
631 rtnl_unlock(); 642 rtnl_unlock();
632} 643}
633 644
@@ -656,14 +667,13 @@ void xenvif_disconnect(struct xenvif *vif)
656 unsigned int num_queues = vif->num_queues; 667 unsigned int num_queues = vif->num_queues;
657 unsigned int queue_index; 668 unsigned int queue_index;
658 669
659 if (netif_carrier_ok(vif->dev)) 670 xenvif_carrier_off(vif);
660 xenvif_carrier_off(vif);
661 671
662 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 672 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
663 queue = &vif->queues[queue_index]; 673 queue = &vif->queues[queue_index];
664 674
665 if (queue->task) { 675 if (queue->task) {
666 del_timer_sync(&queue->wake_queue); 676 del_timer_sync(&queue->rx_stalled);
667 kthread_stop(queue->task); 677 kthread_stop(queue->task);
668 queue->task = NULL; 678 queue->task = NULL;
669 } 679 }
@@ -705,16 +715,12 @@ void xenvif_free(struct xenvif *vif)
705 /* Here we want to avoid timeout messages if an skb can be legitimately 715 /* Here we want to avoid timeout messages if an skb can be legitimately
706 * stuck somewhere else. Realistically this could be an another vif's 716 * stuck somewhere else. Realistically this could be an another vif's
707 * internal or QDisc queue. That another vif also has this 717 * internal or QDisc queue. That another vif also has this
708 * rx_drain_timeout_msecs timeout, but the timer only ditches the 718 * rx_drain_timeout_msecs timeout, so give it time to drain out.
709 * internal queue. After that, the QDisc queue can put in worst case 719 * Although if that other guest wakes up just before its timeout happens
710 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's 720 * and takes only one skb from QDisc, it can hold onto other skbs for a
711 * internal queue, so we need several rounds of such timeouts until we 721 * longer period.
712 * can be sure that no another vif should have skb's from us. We are
713 * not sending more skb's, so newly stuck packets are not interesting
714 * for us here.
715 */ 722 */
716 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 723 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
717 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
718 724
719 unregister_netdev(vif->dev); 725 unregister_netdev(vif->dev);
720 726