diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 49 |
1 files changed, 28 insertions, 21 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fbdadb3d8220..48a55cda979b 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -78,8 +78,12 @@ int xenvif_poll(struct napi_struct *napi, int budget) | |||
| 78 | /* This vif is rogue, we pretend we've there is nothing to do | 78 | /* This vif is rogue, we pretend we've there is nothing to do |
| 79 | * for this vif to deschedule it from NAPI. But this interface | 79 | * for this vif to deschedule it from NAPI. But this interface |
| 80 | * will be turned off in thread context later. | 80 | * will be turned off in thread context later. |
| 81 | * Also, if a guest doesn't post enough slots to receive data on one of | ||
| 82 | * its queues, the carrier goes down and NAPI is descheduled here so | ||
| 83 | * the guest can't send more packets until it's ready to receive. | ||
| 81 | */ | 84 | */ |
| 82 | if (unlikely(queue->vif->disabled)) { | 85 | if (unlikely(queue->vif->disabled || |
| 86 | !netif_carrier_ok(queue->vif->dev))) { | ||
| 83 | napi_complete(napi); | 87 | napi_complete(napi); |
| 84 | return 0; | 88 | return 0; |
| 85 | } | 89 | } |
| @@ -97,7 +101,16 @@ int xenvif_poll(struct napi_struct *napi, int budget) | |||
| 97 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | 101 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
| 98 | { | 102 | { |
| 99 | struct xenvif_queue *queue = dev_id; | 103 | struct xenvif_queue *queue = dev_id; |
| 104 | struct netdev_queue *net_queue = | ||
| 105 | netdev_get_tx_queue(queue->vif->dev, queue->id); | ||
| 100 | 106 | ||
| 107 | /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR | ||
| 108 | * the carrier went down and this queue was previously blocked | ||
| 109 | */ | ||
| 110 | if (unlikely(netif_tx_queue_stopped(net_queue) || | ||
| 111 | (!netif_carrier_ok(queue->vif->dev) && | ||
| 112 | test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) | ||
| 113 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); | ||
| 101 | xenvif_kick_thread(queue); | 114 | xenvif_kick_thread(queue); |
| 102 | 115 | ||
| 103 | return IRQ_HANDLED; | 116 | return IRQ_HANDLED; |
| @@ -125,16 +138,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue) | |||
| 125 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | 138 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); |
| 126 | } | 139 | } |
| 127 | 140 | ||
| 128 | /* Callback to wake the queue and drain it on timeout */ | 141 | /* Callback to wake the queue's thread and turn the carrier off on timeout */ |
| 129 | static void xenvif_wake_queue_callback(unsigned long data) | 142 | static void xenvif_rx_stalled(unsigned long data) |
| 130 | { | 143 | { |
| 131 | struct xenvif_queue *queue = (struct xenvif_queue *)data; | 144 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
| 132 | 145 | ||
| 133 | if (xenvif_queue_stopped(queue)) { | 146 | if (xenvif_queue_stopped(queue)) { |
| 134 | netdev_err(queue->vif->dev, "draining TX queue\n"); | 147 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); |
| 135 | queue->rx_queue_purge = true; | ||
| 136 | xenvif_kick_thread(queue); | 148 | xenvif_kick_thread(queue); |
| 137 | xenvif_wake_queue(queue); | ||
| 138 | } | 149 | } |
| 139 | } | 150 | } |
| 140 | 151 | ||
| @@ -183,11 +194,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 183 | * drain. | 194 | * drain. |
| 184 | */ | 195 | */ |
| 185 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { | 196 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
| 186 | queue->wake_queue.function = xenvif_wake_queue_callback; | 197 | queue->rx_stalled.function = xenvif_rx_stalled; |
| 187 | queue->wake_queue.data = (unsigned long)queue; | 198 | queue->rx_stalled.data = (unsigned long)queue; |
| 188 | xenvif_stop_queue(queue); | 199 | xenvif_stop_queue(queue); |
| 189 | mod_timer(&queue->wake_queue, | 200 | mod_timer(&queue->rx_stalled, |
| 190 | jiffies + rx_drain_timeout_jiffies); | 201 | jiffies + rx_drain_timeout_jiffies); |
| 191 | } | 202 | } |
| 192 | 203 | ||
| 193 | skb_queue_tail(&queue->rx_queue, skb); | 204 | skb_queue_tail(&queue->rx_queue, skb); |
| @@ -515,7 +526,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
| 515 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | 526 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
| 516 | } | 527 | } |
| 517 | 528 | ||
| 518 | init_timer(&queue->wake_queue); | 529 | init_timer(&queue->rx_stalled); |
| 519 | 530 | ||
| 520 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | 531 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, |
| 521 | XENVIF_NAPI_WEIGHT); | 532 | XENVIF_NAPI_WEIGHT); |
| @@ -666,7 +677,7 @@ void xenvif_disconnect(struct xenvif *vif) | |||
| 666 | queue = &vif->queues[queue_index]; | 677 | queue = &vif->queues[queue_index]; |
| 667 | 678 | ||
| 668 | if (queue->task) { | 679 | if (queue->task) { |
| 669 | del_timer_sync(&queue->wake_queue); | 680 | del_timer_sync(&queue->rx_stalled); |
| 670 | kthread_stop(queue->task); | 681 | kthread_stop(queue->task); |
| 671 | queue->task = NULL; | 682 | queue->task = NULL; |
| 672 | } | 683 | } |
| @@ -708,16 +719,12 @@ void xenvif_free(struct xenvif *vif) | |||
| 708 | /* Here we want to avoid timeout messages if an skb can be legitimately | 719 | /* Here we want to avoid timeout messages if an skb can be legitimately |
| 709 | * stuck somewhere else. Realistically this could be an another vif's | 720 | * stuck somewhere else. Realistically this could be an another vif's |
| 710 | * internal or QDisc queue. That another vif also has this | 721 | * internal or QDisc queue. That another vif also has this |
| 711 | * rx_drain_timeout_msecs timeout, but the timer only ditches the | 722 | * rx_drain_timeout_msecs timeout, so give it time to drain out. |
| 712 | * internal queue. After that, the QDisc queue can put in worst case | 723 | * Although if that other guest wakes up just before its timeout happens |
| 713 | * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's | 724 | * and takes only one skb from QDisc, it can hold onto other skbs for a |
| 714 | * internal queue, so we need several rounds of such timeouts until we | 725 | * longer period. |
| 715 | * can be sure that no another vif should have skb's from us. We are | ||
| 716 | * not sending more skb's, so newly stuck packets are not interesting | ||
| 717 | * for us here. | ||
| 718 | */ | 726 | */ |
| 719 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | 727 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000); |
| 720 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | ||
| 721 | 728 | ||
| 722 | unregister_netdev(vif->dev); | 729 | unregister_netdev(vif->dev); |
| 723 | 730 | ||
