aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Liu <wei.liu2@citrix.com>2014-06-23 05:50:17 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-25 18:59:47 -0400
commitf7b50c4e7ced702d80d3b873d81a2cdafb580f13 (patch)
tree9f641ef3e6b47cf727b8003603d008e4b5507bc2
parent66c965f5e1b702da2b5871a909b47034c62195d8 (diff)
xen-netback: bookkeep number of active queues in our own module
The original code uses netdev->real_num_tx_queues to bookkeep number of queues and invokes netif_set_real_num_tx_queues to set the number of queues. However, netif_set_real_num_tx_queues doesn't allow real_num_tx_queues to be smaller than 1, which means setting the number to 0 will not work and real_num_tx_queues is untouched. This is bogus when xenvif_free is invoked before any number of queues is allocated. That function needs to iterate through all queues to free resources. Using the wrong number of queues results in NULL pointer dereference. So we bookkeep the number of queues in xen-netback to solve this problem. This fixes a regression introduced by multiqueue patchset in 3.16-rc1. There's another bug in original code that the real number of RX queues is never set. In current Xen multiqueue design, the number of TX queues and RX queues are in fact the same. We need to set the numbers of TX and RX queues to the same value. Also remove xenvif_select_queue and leave queue selection to core driver, as suggested by David Miller. Reported-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Wei Liu <wei.liu2@citrix.com> CC: Ian Campbell <ian.campbell@citrix.com> CC: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c49
-rw-r--r--drivers/net/xen-netback/xenbus.c28
3 files changed, 26 insertions, 52 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4dd7c4a1923b..2532ce85d718 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -222,6 +222,7 @@ struct xenvif {
222 222
223 /* Queues */ 223 /* Queues */
224 struct xenvif_queue *queues; 224 struct xenvif_queue *queues;
225 unsigned int num_queues; /* active queues, resource allocated */
225 226
226 /* Miscellaneous private stuff. */ 227 /* Miscellaneous private stuff. */
227 struct net_device *dev; 228 struct net_device *dev;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 852da34b8961..9e97c7ca0ddd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
137 } 137 }
138} 138}
139 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 140static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
162{ 141{
163 struct xenvif *vif = netdev_priv(dev); 142 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL; 143 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues; 144 unsigned int num_queues = vif->num_queues;
166 u16 index; 145 u16 index;
167 int min_slots_needed; 146 int min_slots_needed;
168 147
@@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
225{ 204{
226 struct xenvif *vif = netdev_priv(dev); 205 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL; 206 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues; 207 unsigned int num_queues = vif->num_queues;
229 unsigned long rx_bytes = 0; 208 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0; 209 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0; 210 unsigned long tx_bytes = 0;
@@ -256,7 +235,7 @@ out:
256static void xenvif_up(struct xenvif *vif) 235static void xenvif_up(struct xenvif *vif)
257{ 236{
258 struct xenvif_queue *queue = NULL; 237 struct xenvif_queue *queue = NULL;
259 unsigned int num_queues = vif->dev->real_num_tx_queues; 238 unsigned int num_queues = vif->num_queues;
260 unsigned int queue_index; 239 unsigned int queue_index;
261 240
262 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 241 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
272static void xenvif_down(struct xenvif *vif) 251static void xenvif_down(struct xenvif *vif)
273{ 252{
274 struct xenvif_queue *queue = NULL; 253 struct xenvif_queue *queue = NULL;
275 unsigned int num_queues = vif->dev->real_num_tx_queues; 254 unsigned int num_queues = vif->num_queues;
276 unsigned int queue_index; 255 unsigned int queue_index;
277 256
278 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 257 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
379 struct ethtool_stats *stats, u64 * data) 358 struct ethtool_stats *stats, u64 * data)
380{ 359{
381 struct xenvif *vif = netdev_priv(dev); 360 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues; 361 unsigned int num_queues = vif->num_queues;
383 int i; 362 int i;
384 unsigned int queue_index; 363 unsigned int queue_index;
385 struct xenvif_stats *vif_stats; 364 struct xenvif_stats *vif_stats;
@@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
424 .ndo_fix_features = xenvif_fix_features, 403 .ndo_fix_features = xenvif_fix_features,
425 .ndo_set_mac_address = eth_mac_addr, 404 .ndo_set_mac_address = eth_mac_addr,
426 .ndo_validate_addr = eth_validate_addr, 405 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
428}; 406};
429 407
430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 408struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,7 +416,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 416 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
439 /* Allocate a netdev with the max. supported number of queues. 417 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated 418 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues(). 419 * via netif_set_real_num_*_queues().
442 */ 420 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues); 422 xenvif_max_queues);
@@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
458 vif->dev = dev; 436 vif->dev = dev;
459 vif->disabled = false; 437 vif->disabled = false;
460 438
461 /* Start out with no queues. The call below does not require 439 /* Start out with no queues. */
462 * rtnl_lock() as it happens before register_netdev().
463 */
464 vif->queues = NULL; 440 vif->queues = NULL;
465 netif_set_real_num_tx_queues(dev, 0); 441 vif->num_queues = 0;
466 442
467 dev->netdev_ops = &xenvif_netdev_ops; 443 dev->netdev_ops = &xenvif_netdev_ops;
468 dev->hw_features = NETIF_F_SG | 444 dev->hw_features = NETIF_F_SG |
@@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
677void xenvif_disconnect(struct xenvif *vif) 653void xenvif_disconnect(struct xenvif *vif)
678{ 654{
679 struct xenvif_queue *queue = NULL; 655 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues; 656 unsigned int num_queues = vif->num_queues;
681 unsigned int queue_index; 657 unsigned int queue_index;
682 658
683 if (netif_carrier_ok(vif->dev)) 659 if (netif_carrier_ok(vif->dev))
@@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
724void xenvif_free(struct xenvif *vif) 700void xenvif_free(struct xenvif *vif)
725{ 701{
726 struct xenvif_queue *queue = NULL; 702 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues; 703 unsigned int num_queues = vif->num_queues;
728 unsigned int queue_index; 704 unsigned int queue_index;
729 /* Here we want to avoid timeout messages if an skb can be legitimately 705 /* Here we want to avoid timeout messages if an skb can be legitimately
730 * stuck somewhere else. Realistically this could be an another vif's 706 * stuck somewhere else. Realistically this could be an another vif's
@@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
748 xenvif_deinit_queue(queue); 724 xenvif_deinit_queue(queue);
749 } 725 }
750 726
751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues); 727 vfree(vif->queues);
756 vif->queues = NULL; 728 vif->queues = NULL;
729 vif->num_queues = 0;
757 730
758 free_netdev(vif->dev); 731 free_netdev(vif->dev);
759 732
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 96c63dc2509e..3d85acd84bad 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -527,9 +527,7 @@ static void connect(struct backend_info *be)
527 /* Use the number of queues requested by the frontend */ 527 /* Use the number of queues requested by the frontend */
528 be->vif->queues = vzalloc(requested_num_queues * 528 be->vif->queues = vzalloc(requested_num_queues *
529 sizeof(struct xenvif_queue)); 529 sizeof(struct xenvif_queue));
530 rtnl_lock(); 530 be->vif->num_queues = requested_num_queues;
531 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
532 rtnl_unlock();
533 531
534 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 532 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
535 queue = &be->vif->queues[queue_index]; 533 queue = &be->vif->queues[queue_index];
@@ -546,9 +544,7 @@ static void connect(struct backend_info *be)
546 * earlier queues can be destroyed using the regular 544 * earlier queues can be destroyed using the regular
547 * disconnect logic. 545 * disconnect logic.
548 */ 546 */
549 rtnl_lock(); 547 be->vif->num_queues = queue_index;
550 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
551 rtnl_unlock();
552 goto err; 548 goto err;
553 } 549 }
554 550
@@ -561,13 +557,19 @@ static void connect(struct backend_info *be)
561 * and also clean up any previously initialised queues. 557 * and also clean up any previously initialised queues.
562 */ 558 */
563 xenvif_deinit_queue(queue); 559 xenvif_deinit_queue(queue);
564 rtnl_lock(); 560 be->vif->num_queues = queue_index;
565 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
566 rtnl_unlock();
567 goto err; 561 goto err;
568 } 562 }
569 } 563 }
570 564
565 /* Initialisation completed, tell core driver the number of
566 * active queues.
567 */
568 rtnl_lock();
569 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
570 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
571 rtnl_unlock();
572
571 xenvif_carrier_on(be->vif); 573 xenvif_carrier_on(be->vif);
572 574
573 unregister_hotplug_status_watch(be); 575 unregister_hotplug_status_watch(be);
@@ -582,13 +584,11 @@ static void connect(struct backend_info *be)
582 return; 584 return;
583 585
584err: 586err:
585 if (be->vif->dev->real_num_tx_queues > 0) 587 if (be->vif->num_queues > 0)
586 xenvif_disconnect(be->vif); /* Clean up existing queues */ 588 xenvif_disconnect(be->vif); /* Clean up existing queues */
587 vfree(be->vif->queues); 589 vfree(be->vif->queues);
588 be->vif->queues = NULL; 590 be->vif->queues = NULL;
589 rtnl_lock(); 591 be->vif->num_queues = 0;
590 netif_set_real_num_tx_queues(be->vif->dev, 0);
591 rtnl_unlock();
592 return; 592 return;
593} 593}
594 594
@@ -596,7 +596,7 @@ err:
596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
597{ 597{
598 struct xenbus_device *dev = be->dev; 598 struct xenbus_device *dev = be->dev;
599 unsigned int num_queues = queue->vif->dev->real_num_tx_queues; 599 unsigned int num_queues = queue->vif->num_queues;
600 unsigned long tx_ring_ref, rx_ring_ref; 600 unsigned long tx_ring_ref, rx_ring_ref;
601 unsigned int tx_evtchn, rx_evtchn; 601 unsigned int tx_evtchn, rx_evtchn;
602 int err; 602 int err;