diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 117 |
1 files changed, 48 insertions, 69 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 852da34b8961..bfd10cb9c8de 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue) | |||
55 | 55 | ||
56 | int xenvif_schedulable(struct xenvif *vif) | 56 | int xenvif_schedulable(struct xenvif *vif) |
57 | { | 57 | { |
58 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 58 | return netif_running(vif->dev) && |
59 | test_bit(VIF_STATUS_CONNECTED, &vif->status); | ||
59 | } | 60 | } |
60 | 61 | ||
61 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 62 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
@@ -96,13 +97,22 @@ int xenvif_poll(struct napi_struct *napi, int budget) | |||
96 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | 97 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
97 | { | 98 | { |
98 | struct xenvif_queue *queue = dev_id; | 99 | struct xenvif_queue *queue = dev_id; |
100 | struct netdev_queue *net_queue = | ||
101 | netdev_get_tx_queue(queue->vif->dev, queue->id); | ||
99 | 102 | ||
103 | /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR | ||
104 | * the carrier went down and this queue was previously blocked | ||
105 | */ | ||
106 | if (unlikely(netif_tx_queue_stopped(net_queue) || | ||
107 | (!netif_carrier_ok(queue->vif->dev) && | ||
108 | test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) | ||
109 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); | ||
100 | xenvif_kick_thread(queue); | 110 | xenvif_kick_thread(queue); |
101 | 111 | ||
102 | return IRQ_HANDLED; | 112 | return IRQ_HANDLED; |
103 | } | 113 | } |
104 | 114 | ||
105 | static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | 115 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
106 | { | 116 | { |
107 | xenvif_tx_interrupt(irq, dev_id); | 117 | xenvif_tx_interrupt(irq, dev_id); |
108 | xenvif_rx_interrupt(irq, dev_id); | 118 | xenvif_rx_interrupt(irq, dev_id); |
@@ -124,45 +134,22 @@ void xenvif_wake_queue(struct xenvif_queue *queue) | |||
124 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); | 134 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); |
125 | } | 135 | } |
126 | 136 | ||
127 | /* Callback to wake the queue and drain it on timeout */ | 137 | /* Callback to wake the queue's thread and turn the carrier off on timeout */ |
128 | static void xenvif_wake_queue_callback(unsigned long data) | 138 | static void xenvif_rx_stalled(unsigned long data) |
129 | { | 139 | { |
130 | struct xenvif_queue *queue = (struct xenvif_queue *)data; | 140 | struct xenvif_queue *queue = (struct xenvif_queue *)data; |
131 | 141 | ||
132 | if (xenvif_queue_stopped(queue)) { | 142 | if (xenvif_queue_stopped(queue)) { |
133 | netdev_err(queue->vif->dev, "draining TX queue\n"); | 143 | set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); |
134 | queue->rx_queue_purge = true; | ||
135 | xenvif_kick_thread(queue); | 144 | xenvif_kick_thread(queue); |
136 | xenvif_wake_queue(queue); | ||
137 | } | 145 | } |
138 | } | 146 | } |
139 | 147 | ||
140 | static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
141 | void *accel_priv, select_queue_fallback_t fallback) | ||
142 | { | ||
143 | unsigned int num_queues = dev->real_num_tx_queues; | ||
144 | u32 hash; | ||
145 | u16 queue_index; | ||
146 | |||
147 | /* First, check if there is only one queue to optimise the | ||
148 | * single-queue or old frontend scenario. | ||
149 | */ | ||
150 | if (num_queues == 1) { | ||
151 | queue_index = 0; | ||
152 | } else { | ||
153 | /* Use skb_get_hash to obtain an L4 hash if available */ | ||
154 | hash = skb_get_hash(skb); | ||
155 | queue_index = hash % num_queues; | ||
156 | } | ||
157 | |||
158 | return queue_index; | ||
159 | } | ||
160 | |||
161 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 148 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
162 | { | 149 | { |
163 | struct xenvif *vif = netdev_priv(dev); | 150 | struct xenvif *vif = netdev_priv(dev); |
164 | struct xenvif_queue *queue = NULL; | 151 | struct xenvif_queue *queue = NULL; |
165 | unsigned int num_queues = dev->real_num_tx_queues; | 152 | unsigned int num_queues = vif->num_queues; |
166 | u16 index; | 153 | u16 index; |
167 | int min_slots_needed; | 154 | int min_slots_needed; |
168 | 155 | ||
@@ -203,11 +190,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
203 | * drain. | 190 | * drain. |
204 | */ | 191 | */ |
205 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { | 192 | if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { |
206 | queue->wake_queue.function = xenvif_wake_queue_callback; | 193 | queue->rx_stalled.function = xenvif_rx_stalled; |
207 | queue->wake_queue.data = (unsigned long)queue; | 194 | queue->rx_stalled.data = (unsigned long)queue; |
208 | xenvif_stop_queue(queue); | 195 | xenvif_stop_queue(queue); |
209 | mod_timer(&queue->wake_queue, | 196 | mod_timer(&queue->rx_stalled, |
210 | jiffies + rx_drain_timeout_jiffies); | 197 | jiffies + rx_drain_timeout_jiffies); |
211 | } | 198 | } |
212 | 199 | ||
213 | skb_queue_tail(&queue->rx_queue, skb); | 200 | skb_queue_tail(&queue->rx_queue, skb); |
@@ -225,7 +212,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | |||
225 | { | 212 | { |
226 | struct xenvif *vif = netdev_priv(dev); | 213 | struct xenvif *vif = netdev_priv(dev); |
227 | struct xenvif_queue *queue = NULL; | 214 | struct xenvif_queue *queue = NULL; |
228 | unsigned int num_queues = dev->real_num_tx_queues; | 215 | unsigned int num_queues = vif->num_queues; |
229 | unsigned long rx_bytes = 0; | 216 | unsigned long rx_bytes = 0; |
230 | unsigned long rx_packets = 0; | 217 | unsigned long rx_packets = 0; |
231 | unsigned long tx_bytes = 0; | 218 | unsigned long tx_bytes = 0; |
@@ -256,7 +243,7 @@ out: | |||
256 | static void xenvif_up(struct xenvif *vif) | 243 | static void xenvif_up(struct xenvif *vif) |
257 | { | 244 | { |
258 | struct xenvif_queue *queue = NULL; | 245 | struct xenvif_queue *queue = NULL; |
259 | unsigned int num_queues = vif->dev->real_num_tx_queues; | 246 | unsigned int num_queues = vif->num_queues; |
260 | unsigned int queue_index; | 247 | unsigned int queue_index; |
261 | 248 | ||
262 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 249 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
@@ -272,7 +259,7 @@ static void xenvif_up(struct xenvif *vif) | |||
272 | static void xenvif_down(struct xenvif *vif) | 259 | static void xenvif_down(struct xenvif *vif) |
273 | { | 260 | { |
274 | struct xenvif_queue *queue = NULL; | 261 | struct xenvif_queue *queue = NULL; |
275 | unsigned int num_queues = vif->dev->real_num_tx_queues; | 262 | unsigned int num_queues = vif->num_queues; |
276 | unsigned int queue_index; | 263 | unsigned int queue_index; |
277 | 264 | ||
278 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 265 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
@@ -288,7 +275,7 @@ static void xenvif_down(struct xenvif *vif) | |||
288 | static int xenvif_open(struct net_device *dev) | 275 | static int xenvif_open(struct net_device *dev) |
289 | { | 276 | { |
290 | struct xenvif *vif = netdev_priv(dev); | 277 | struct xenvif *vif = netdev_priv(dev); |
291 | if (netif_carrier_ok(dev)) | 278 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
292 | xenvif_up(vif); | 279 | xenvif_up(vif); |
293 | netif_tx_start_all_queues(dev); | 280 | netif_tx_start_all_queues(dev); |
294 | return 0; | 281 | return 0; |
@@ -297,7 +284,7 @@ static int xenvif_open(struct net_device *dev) | |||
297 | static int xenvif_close(struct net_device *dev) | 284 | static int xenvif_close(struct net_device *dev) |
298 | { | 285 | { |
299 | struct xenvif *vif = netdev_priv(dev); | 286 | struct xenvif *vif = netdev_priv(dev); |
300 | if (netif_carrier_ok(dev)) | 287 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
301 | xenvif_down(vif); | 288 | xenvif_down(vif); |
302 | netif_tx_stop_all_queues(dev); | 289 | netif_tx_stop_all_queues(dev); |
303 | return 0; | 290 | return 0; |
@@ -379,7 +366,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
379 | struct ethtool_stats *stats, u64 * data) | 366 | struct ethtool_stats *stats, u64 * data) |
380 | { | 367 | { |
381 | struct xenvif *vif = netdev_priv(dev); | 368 | struct xenvif *vif = netdev_priv(dev); |
382 | unsigned int num_queues = dev->real_num_tx_queues; | 369 | unsigned int num_queues = vif->num_queues; |
383 | int i; | 370 | int i; |
384 | unsigned int queue_index; | 371 | unsigned int queue_index; |
385 | struct xenvif_stats *vif_stats; | 372 | struct xenvif_stats *vif_stats; |
@@ -424,7 +411,6 @@ static const struct net_device_ops xenvif_netdev_ops = { | |||
424 | .ndo_fix_features = xenvif_fix_features, | 411 | .ndo_fix_features = xenvif_fix_features, |
425 | .ndo_set_mac_address = eth_mac_addr, | 412 | .ndo_set_mac_address = eth_mac_addr, |
426 | .ndo_validate_addr = eth_validate_addr, | 413 | .ndo_validate_addr = eth_validate_addr, |
427 | .ndo_select_queue = xenvif_select_queue, | ||
428 | }; | 414 | }; |
429 | 415 | ||
430 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | 416 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, |
@@ -438,10 +424,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
438 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); | 424 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); |
439 | /* Allocate a netdev with the max. supported number of queues. | 425 | /* Allocate a netdev with the max. supported number of queues. |
440 | * When the guest selects the desired number, it will be updated | 426 | * When the guest selects the desired number, it will be updated |
441 | * via netif_set_real_num_tx_queues(). | 427 | * via netif_set_real_num_*_queues(). |
442 | */ | 428 | */ |
443 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, | 429 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, |
444 | xenvif_max_queues); | 430 | ether_setup, xenvif_max_queues); |
445 | if (dev == NULL) { | 431 | if (dev == NULL) { |
446 | pr_warn("Could not allocate netdev for %s\n", name); | 432 | pr_warn("Could not allocate netdev for %s\n", name); |
447 | return ERR_PTR(-ENOMEM); | 433 | return ERR_PTR(-ENOMEM); |
@@ -458,11 +444,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
458 | vif->dev = dev; | 444 | vif->dev = dev; |
459 | vif->disabled = false; | 445 | vif->disabled = false; |
460 | 446 | ||
461 | /* Start out with no queues. The call below does not require | 447 | /* Start out with no queues. */ |
462 | * rtnl_lock() as it happens before register_netdev(). | ||
463 | */ | ||
464 | vif->queues = NULL; | 448 | vif->queues = NULL; |
465 | netif_set_real_num_tx_queues(dev, 0); | 449 | vif->num_queues = 0; |
466 | 450 | ||
467 | dev->netdev_ops = &xenvif_netdev_ops; | 451 | dev->netdev_ops = &xenvif_netdev_ops; |
468 | dev->hw_features = NETIF_F_SG | | 452 | dev->hw_features = NETIF_F_SG | |
@@ -538,7 +522,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
538 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; | 522 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
539 | } | 523 | } |
540 | 524 | ||
541 | init_timer(&queue->wake_queue); | 525 | init_timer(&queue->rx_stalled); |
542 | 526 | ||
543 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | 527 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, |
544 | XENVIF_NAPI_WEIGHT); | 528 | XENVIF_NAPI_WEIGHT); |
@@ -552,6 +536,7 @@ void xenvif_carrier_on(struct xenvif *vif) | |||
552 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 536 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
553 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 537 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
554 | netdev_update_features(vif->dev); | 538 | netdev_update_features(vif->dev); |
539 | set_bit(VIF_STATUS_CONNECTED, &vif->status); | ||
555 | netif_carrier_on(vif->dev); | 540 | netif_carrier_on(vif->dev); |
556 | if (netif_running(vif->dev)) | 541 | if (netif_running(vif->dev)) |
557 | xenvif_up(vif); | 542 | xenvif_up(vif); |
@@ -649,9 +634,11 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
649 | struct net_device *dev = vif->dev; | 634 | struct net_device *dev = vif->dev; |
650 | 635 | ||
651 | rtnl_lock(); | 636 | rtnl_lock(); |
652 | netif_carrier_off(dev); /* discard queued packets */ | 637 | if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { |
653 | if (netif_running(dev)) | 638 | netif_carrier_off(dev); /* discard queued packets */ |
654 | xenvif_down(vif); | 639 | if (netif_running(dev)) |
640 | xenvif_down(vif); | ||
641 | } | ||
655 | rtnl_unlock(); | 642 | rtnl_unlock(); |
656 | } | 643 | } |
657 | 644 | ||
@@ -677,17 +664,16 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, | |||
677 | void xenvif_disconnect(struct xenvif *vif) | 664 | void xenvif_disconnect(struct xenvif *vif) |
678 | { | 665 | { |
679 | struct xenvif_queue *queue = NULL; | 666 | struct xenvif_queue *queue = NULL; |
680 | unsigned int num_queues = vif->dev->real_num_tx_queues; | 667 | unsigned int num_queues = vif->num_queues; |
681 | unsigned int queue_index; | 668 | unsigned int queue_index; |
682 | 669 | ||
683 | if (netif_carrier_ok(vif->dev)) | 670 | xenvif_carrier_off(vif); |
684 | xenvif_carrier_off(vif); | ||
685 | 671 | ||
686 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 672 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
687 | queue = &vif->queues[queue_index]; | 673 | queue = &vif->queues[queue_index]; |
688 | 674 | ||
689 | if (queue->task) { | 675 | if (queue->task) { |
690 | del_timer_sync(&queue->wake_queue); | 676 | del_timer_sync(&queue->rx_stalled); |
691 | kthread_stop(queue->task); | 677 | kthread_stop(queue->task); |
692 | queue->task = NULL; | 678 | queue->task = NULL; |
693 | } | 679 | } |
@@ -724,21 +710,17 @@ void xenvif_deinit_queue(struct xenvif_queue *queue) | |||
724 | void xenvif_free(struct xenvif *vif) | 710 | void xenvif_free(struct xenvif *vif) |
725 | { | 711 | { |
726 | struct xenvif_queue *queue = NULL; | 712 | struct xenvif_queue *queue = NULL; |
727 | unsigned int num_queues = vif->dev->real_num_tx_queues; | 713 | unsigned int num_queues = vif->num_queues; |
728 | unsigned int queue_index; | 714 | unsigned int queue_index; |
729 | /* Here we want to avoid timeout messages if an skb can be legitimately | 715 | /* Here we want to avoid timeout messages if an skb can be legitimately |
730 | * stuck somewhere else. Realistically this could be an another vif's | 716 | * stuck somewhere else. Realistically this could be an another vif's |
731 | * internal or QDisc queue. That another vif also has this | 717 | * internal or QDisc queue. That another vif also has this |
732 | * rx_drain_timeout_msecs timeout, but the timer only ditches the | 718 | * rx_drain_timeout_msecs timeout, so give it time to drain out. |
733 | * internal queue. After that, the QDisc queue can put in worst case | 719 | * Although if that other guest wakes up just before its timeout happens |
734 | * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's | 720 | * and takes only one skb from QDisc, it can hold onto other skbs for a |
735 | * internal queue, so we need several rounds of such timeouts until we | 721 | * longer period. |
736 | * can be sure that no another vif should have skb's from us. We are | ||
737 | * not sending more skb's, so newly stuck packets are not interesting | ||
738 | * for us here. | ||
739 | */ | 722 | */ |
740 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * | 723 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000); |
741 | DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS)); | ||
742 | 724 | ||
743 | unregister_netdev(vif->dev); | 725 | unregister_netdev(vif->dev); |
744 | 726 | ||
@@ -748,12 +730,9 @@ void xenvif_free(struct xenvif *vif) | |||
748 | xenvif_deinit_queue(queue); | 730 | xenvif_deinit_queue(queue); |
749 | } | 731 | } |
750 | 732 | ||
751 | /* Free the array of queues. The call below does not require | ||
752 | * rtnl_lock() because it happens after unregister_netdev(). | ||
753 | */ | ||
754 | netif_set_real_num_tx_queues(vif->dev, 0); | ||
755 | vfree(vif->queues); | 733 | vfree(vif->queues); |
756 | vif->queues = NULL; | 734 | vif->queues = NULL; |
735 | vif->num_queues = 0; | ||
757 | 736 | ||
758 | free_netdev(vif->dev); | 737 | free_netdev(vif->dev); |
759 | 738 | ||