aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c123
1 files changed, 53 insertions, 70 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 852da34b8961..48a55cda979b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue)
55 55
56int xenvif_schedulable(struct xenvif *vif) 56int xenvif_schedulable(struct xenvif *vif)
57{ 57{
58 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 58 return netif_running(vif->dev) &&
59 test_bit(VIF_STATUS_CONNECTED, &vif->status);
59} 60}
60 61
61static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 62static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -77,8 +78,12 @@ int xenvif_poll(struct napi_struct *napi, int budget)
77 /* This vif is rogue, we pretend we've there is nothing to do 78 /* This vif is rogue, we pretend we've there is nothing to do
78 * for this vif to deschedule it from NAPI. But this interface 79 * for this vif to deschedule it from NAPI. But this interface
79 * will be turned off in thread context later. 80 * will be turned off in thread context later.
81 * Also, if a guest doesn't post enough slots to receive data on one of
82 * its queues, the carrier goes down and NAPI is descheduled here so
83 * the guest can't send more packets until it's ready to receive.
80 */ 84 */
81 if (unlikely(queue->vif->disabled)) { 85 if (unlikely(queue->vif->disabled ||
86 !netif_carrier_ok(queue->vif->dev))) {
82 napi_complete(napi); 87 napi_complete(napi);
83 return 0; 88 return 0;
84 } 89 }
@@ -96,13 +101,22 @@ int xenvif_poll(struct napi_struct *napi, int budget)
96static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
97{ 102{
98 struct xenvif_queue *queue = dev_id; 103 struct xenvif_queue *queue = dev_id;
104 struct netdev_queue *net_queue =
105 netdev_get_tx_queue(queue->vif->dev, queue->id);
99 106
107 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
108 * the carrier went down and this queue was previously blocked
109 */
110 if (unlikely(netif_tx_queue_stopped(net_queue) ||
111 (!netif_carrier_ok(queue->vif->dev) &&
112 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
113 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
100 xenvif_kick_thread(queue); 114 xenvif_kick_thread(queue);
101 115
102 return IRQ_HANDLED; 116 return IRQ_HANDLED;
103} 117}
104 118
105static irqreturn_t xenvif_interrupt(int irq, void *dev_id) 119irqreturn_t xenvif_interrupt(int irq, void *dev_id)
106{ 120{
107 xenvif_tx_interrupt(irq, dev_id); 121 xenvif_tx_interrupt(irq, dev_id);
108 xenvif_rx_interrupt(irq, dev_id); 122 xenvif_rx_interrupt(irq, dev_id);
@@ -124,45 +138,22 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
124 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 138 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
125} 139}
126 140
127/* Callback to wake the queue and drain it on timeout */ 141/* Callback to wake the queue's thread and turn the carrier off on timeout */
128static void xenvif_wake_queue_callback(unsigned long data) 142static void xenvif_rx_stalled(unsigned long data)
129{ 143{
130 struct xenvif_queue *queue = (struct xenvif_queue *)data; 144 struct xenvif_queue *queue = (struct xenvif_queue *)data;
131 145
132 if (xenvif_queue_stopped(queue)) { 146 if (xenvif_queue_stopped(queue)) {
133 netdev_err(queue->vif->dev, "draining TX queue\n"); 147 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
134 queue->rx_queue_purge = true;
135 xenvif_kick_thread(queue); 148 xenvif_kick_thread(queue);
136 xenvif_wake_queue(queue);
137 } 149 }
138} 150}
139 151
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 152static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
162{ 153{
163 struct xenvif *vif = netdev_priv(dev); 154 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL; 155 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues; 156 unsigned int num_queues = vif->num_queues;
166 u16 index; 157 u16 index;
167 int min_slots_needed; 158 int min_slots_needed;
168 159
@@ -203,11 +194,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
203 * drain. 194 * drain.
204 */ 195 */
205 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { 196 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
206 queue->wake_queue.function = xenvif_wake_queue_callback; 197 queue->rx_stalled.function = xenvif_rx_stalled;
207 queue->wake_queue.data = (unsigned long)queue; 198 queue->rx_stalled.data = (unsigned long)queue;
208 xenvif_stop_queue(queue); 199 xenvif_stop_queue(queue);
209 mod_timer(&queue->wake_queue, 200 mod_timer(&queue->rx_stalled,
210 jiffies + rx_drain_timeout_jiffies); 201 jiffies + rx_drain_timeout_jiffies);
211 } 202 }
212 203
213 skb_queue_tail(&queue->rx_queue, skb); 204 skb_queue_tail(&queue->rx_queue, skb);
@@ -225,7 +216,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
225{ 216{
226 struct xenvif *vif = netdev_priv(dev); 217 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL; 218 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues; 219 unsigned int num_queues = vif->num_queues;
229 unsigned long rx_bytes = 0; 220 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0; 221 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0; 222 unsigned long tx_bytes = 0;
@@ -256,7 +247,7 @@ out:
256static void xenvif_up(struct xenvif *vif) 247static void xenvif_up(struct xenvif *vif)
257{ 248{
258 struct xenvif_queue *queue = NULL; 249 struct xenvif_queue *queue = NULL;
259 unsigned int num_queues = vif->dev->real_num_tx_queues; 250 unsigned int num_queues = vif->num_queues;
260 unsigned int queue_index; 251 unsigned int queue_index;
261 252
262 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 253 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +263,7 @@ static void xenvif_up(struct xenvif *vif)
272static void xenvif_down(struct xenvif *vif) 263static void xenvif_down(struct xenvif *vif)
273{ 264{
274 struct xenvif_queue *queue = NULL; 265 struct xenvif_queue *queue = NULL;
275 unsigned int num_queues = vif->dev->real_num_tx_queues; 266 unsigned int num_queues = vif->num_queues;
276 unsigned int queue_index; 267 unsigned int queue_index;
277 268
278 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 269 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -288,7 +279,7 @@ static void xenvif_down(struct xenvif *vif)
288static int xenvif_open(struct net_device *dev) 279static int xenvif_open(struct net_device *dev)
289{ 280{
290 struct xenvif *vif = netdev_priv(dev); 281 struct xenvif *vif = netdev_priv(dev);
291 if (netif_carrier_ok(dev)) 282 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
292 xenvif_up(vif); 283 xenvif_up(vif);
293 netif_tx_start_all_queues(dev); 284 netif_tx_start_all_queues(dev);
294 return 0; 285 return 0;
@@ -297,7 +288,7 @@ static int xenvif_open(struct net_device *dev)
297static int xenvif_close(struct net_device *dev) 288static int xenvif_close(struct net_device *dev)
298{ 289{
299 struct xenvif *vif = netdev_priv(dev); 290 struct xenvif *vif = netdev_priv(dev);
300 if (netif_carrier_ok(dev)) 291 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
301 xenvif_down(vif); 292 xenvif_down(vif);
302 netif_tx_stop_all_queues(dev); 293 netif_tx_stop_all_queues(dev);
303 return 0; 294 return 0;
@@ -379,7 +370,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
379 struct ethtool_stats *stats, u64 * data) 370 struct ethtool_stats *stats, u64 * data)
380{ 371{
381 struct xenvif *vif = netdev_priv(dev); 372 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues; 373 unsigned int num_queues = vif->num_queues;
383 int i; 374 int i;
384 unsigned int queue_index; 375 unsigned int queue_index;
385 struct xenvif_stats *vif_stats; 376 struct xenvif_stats *vif_stats;
@@ -424,7 +415,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
424 .ndo_fix_features = xenvif_fix_features, 415 .ndo_fix_features = xenvif_fix_features,
425 .ndo_set_mac_address = eth_mac_addr, 416 .ndo_set_mac_address = eth_mac_addr,
426 .ndo_validate_addr = eth_validate_addr, 417 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
428}; 418};
429 419
430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 420struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,10 +428,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 428 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
439 /* Allocate a netdev with the max. supported number of queues. 429 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated 430 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues(). 431 * via netif_set_real_num_*_queues().
442 */ 432 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 433 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
444 xenvif_max_queues); 434 ether_setup, xenvif_max_queues);
445 if (dev == NULL) { 435 if (dev == NULL) {
446 pr_warn("Could not allocate netdev for %s\n", name); 436 pr_warn("Could not allocate netdev for %s\n", name);
447 return ERR_PTR(-ENOMEM); 437 return ERR_PTR(-ENOMEM);
@@ -458,11 +448,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
458 vif->dev = dev; 448 vif->dev = dev;
459 vif->disabled = false; 449 vif->disabled = false;
460 450
461 /* Start out with no queues. The call below does not require 451 /* Start out with no queues. */
462 * rtnl_lock() as it happens before register_netdev().
463 */
464 vif->queues = NULL; 452 vif->queues = NULL;
465 netif_set_real_num_tx_queues(dev, 0); 453 vif->num_queues = 0;
466 454
467 dev->netdev_ops = &xenvif_netdev_ops; 455 dev->netdev_ops = &xenvif_netdev_ops;
468 dev->hw_features = NETIF_F_SG | 456 dev->hw_features = NETIF_F_SG |
@@ -538,7 +526,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
538 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 526 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
539 } 527 }
540 528
541 init_timer(&queue->wake_queue); 529 init_timer(&queue->rx_stalled);
542 530
543 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, 531 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
544 XENVIF_NAPI_WEIGHT); 532 XENVIF_NAPI_WEIGHT);
@@ -552,6 +540,7 @@ void xenvif_carrier_on(struct xenvif *vif)
552 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 540 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
553 dev_set_mtu(vif->dev, ETH_DATA_LEN); 541 dev_set_mtu(vif->dev, ETH_DATA_LEN);
554 netdev_update_features(vif->dev); 542 netdev_update_features(vif->dev);
543 set_bit(VIF_STATUS_CONNECTED, &vif->status);
555 netif_carrier_on(vif->dev); 544 netif_carrier_on(vif->dev);
556 if (netif_running(vif->dev)) 545 if (netif_running(vif->dev))
557 xenvif_up(vif); 546 xenvif_up(vif);
@@ -649,9 +638,11 @@ void xenvif_carrier_off(struct xenvif *vif)
649 struct net_device *dev = vif->dev; 638 struct net_device *dev = vif->dev;
650 639
651 rtnl_lock(); 640 rtnl_lock();
652 netif_carrier_off(dev); /* discard queued packets */ 641 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
653 if (netif_running(dev)) 642 netif_carrier_off(dev); /* discard queued packets */
654 xenvif_down(vif); 643 if (netif_running(dev))
644 xenvif_down(vif);
645 }
655 rtnl_unlock(); 646 rtnl_unlock();
656} 647}
657 648
@@ -677,17 +668,16 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
677void xenvif_disconnect(struct xenvif *vif) 668void xenvif_disconnect(struct xenvif *vif)
678{ 669{
679 struct xenvif_queue *queue = NULL; 670 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues; 671 unsigned int num_queues = vif->num_queues;
681 unsigned int queue_index; 672 unsigned int queue_index;
682 673
683 if (netif_carrier_ok(vif->dev)) 674 xenvif_carrier_off(vif);
684 xenvif_carrier_off(vif);
685 675
686 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 676 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
687 queue = &vif->queues[queue_index]; 677 queue = &vif->queues[queue_index];
688 678
689 if (queue->task) { 679 if (queue->task) {
690 del_timer_sync(&queue->wake_queue); 680 del_timer_sync(&queue->rx_stalled);
691 kthread_stop(queue->task); 681 kthread_stop(queue->task);
692 queue->task = NULL; 682 queue->task = NULL;
693 } 683 }
@@ -724,21 +714,17 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
724void xenvif_free(struct xenvif *vif) 714void xenvif_free(struct xenvif *vif)
725{ 715{
726 struct xenvif_queue *queue = NULL; 716 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues; 717 unsigned int num_queues = vif->num_queues;
728 unsigned int queue_index; 718 unsigned int queue_index;
729 /* Here we want to avoid timeout messages if an skb can be legitimately 719 /* Here we want to avoid timeout messages if an skb can be legitimately
730 * stuck somewhere else. Realistically this could be an another vif's 720 * stuck somewhere else. Realistically this could be an another vif's
731 * internal or QDisc queue. That another vif also has this 721 * internal or QDisc queue. That another vif also has this
732 * rx_drain_timeout_msecs timeout, but the timer only ditches the 722 * rx_drain_timeout_msecs timeout, so give it time to drain out.
733 * internal queue. After that, the QDisc queue can put in worst case 723 * Although if that other guest wakes up just before its timeout happens
734 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's 724 * and takes only one skb from QDisc, it can hold onto other skbs for a
735 * internal queue, so we need several rounds of such timeouts until we 725 * longer period.
736 * can be sure that no another vif should have skb's from us. We are
737 * not sending more skb's, so newly stuck packets are not interesting
738 * for us here.
739 */ 726 */
740 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) * 727 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
741 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
742 728
743 unregister_netdev(vif->dev); 729 unregister_netdev(vif->dev);
744 730
@@ -748,12 +734,9 @@ void xenvif_free(struct xenvif *vif)
748 xenvif_deinit_queue(queue); 734 xenvif_deinit_queue(queue);
749 } 735 }
750 736
751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues); 737 vfree(vif->queues);
756 vif->queues = NULL; 738 vif->queues = NULL;
739 vif->num_queues = 0;
757 740
758 free_netdev(vif->dev); 741 free_netdev(vif->dev);
759 742