aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c49
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/net/xen-netback/xenbus.c28
4 files changed, 89 insertions, 75 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4dd7c4a1923b..2532ce85d718 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -222,6 +222,7 @@ struct xenvif {
222 222
223 /* Queues */ 223 /* Queues */
224 struct xenvif_queue *queues; 224 struct xenvif_queue *queues;
225 unsigned int num_queues; /* active queues, resource allocated */
225 226
226 /* Miscellaneous private stuff. */ 227 /* Miscellaneous private stuff. */
227 struct net_device *dev; 228 struct net_device *dev;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 852da34b8961..9e97c7ca0ddd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
137 } 137 }
138} 138}
139 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 140static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
162{ 141{
163 struct xenvif *vif = netdev_priv(dev); 142 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL; 143 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues; 144 unsigned int num_queues = vif->num_queues;
166 u16 index; 145 u16 index;
167 int min_slots_needed; 146 int min_slots_needed;
168 147
@@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
225{ 204{
226 struct xenvif *vif = netdev_priv(dev); 205 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL; 206 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues; 207 unsigned int num_queues = vif->num_queues;
229 unsigned long rx_bytes = 0; 208 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0; 209 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0; 210 unsigned long tx_bytes = 0;
@@ -256,7 +235,7 @@ out:
256static void xenvif_up(struct xenvif *vif) 235static void xenvif_up(struct xenvif *vif)
257{ 236{
258 struct xenvif_queue *queue = NULL; 237 struct xenvif_queue *queue = NULL;
259 unsigned int num_queues = vif->dev->real_num_tx_queues; 238 unsigned int num_queues = vif->num_queues;
260 unsigned int queue_index; 239 unsigned int queue_index;
261 240
262 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 241 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
272static void xenvif_down(struct xenvif *vif) 251static void xenvif_down(struct xenvif *vif)
273{ 252{
274 struct xenvif_queue *queue = NULL; 253 struct xenvif_queue *queue = NULL;
275 unsigned int num_queues = vif->dev->real_num_tx_queues; 254 unsigned int num_queues = vif->num_queues;
276 unsigned int queue_index; 255 unsigned int queue_index;
277 256
278 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 257 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
379 struct ethtool_stats *stats, u64 * data) 358 struct ethtool_stats *stats, u64 * data)
380{ 359{
381 struct xenvif *vif = netdev_priv(dev); 360 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues; 361 unsigned int num_queues = vif->num_queues;
383 int i; 362 int i;
384 unsigned int queue_index; 363 unsigned int queue_index;
385 struct xenvif_stats *vif_stats; 364 struct xenvif_stats *vif_stats;
@@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
424 .ndo_fix_features = xenvif_fix_features, 403 .ndo_fix_features = xenvif_fix_features,
425 .ndo_set_mac_address = eth_mac_addr, 404 .ndo_set_mac_address = eth_mac_addr,
426 .ndo_validate_addr = eth_validate_addr, 405 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
428}; 406};
429 407
430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 408struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,7 +416,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 416 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
439 /* Allocate a netdev with the max. supported number of queues. 417 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated 418 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues(). 419 * via netif_set_real_num_*_queues().
442 */ 420 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues); 422 xenvif_max_queues);
@@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
458 vif->dev = dev; 436 vif->dev = dev;
459 vif->disabled = false; 437 vif->disabled = false;
460 438
461 /* Start out with no queues. The call below does not require 439 /* Start out with no queues. */
462 * rtnl_lock() as it happens before register_netdev().
463 */
464 vif->queues = NULL; 440 vif->queues = NULL;
465 netif_set_real_num_tx_queues(dev, 0); 441 vif->num_queues = 0;
466 442
467 dev->netdev_ops = &xenvif_netdev_ops; 443 dev->netdev_ops = &xenvif_netdev_ops;
468 dev->hw_features = NETIF_F_SG | 444 dev->hw_features = NETIF_F_SG |
@@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
677void xenvif_disconnect(struct xenvif *vif) 653void xenvif_disconnect(struct xenvif *vif)
678{ 654{
679 struct xenvif_queue *queue = NULL; 655 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues; 656 unsigned int num_queues = vif->num_queues;
681 unsigned int queue_index; 657 unsigned int queue_index;
682 658
683 if (netif_carrier_ok(vif->dev)) 659 if (netif_carrier_ok(vif->dev))
@@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
724void xenvif_free(struct xenvif *vif) 700void xenvif_free(struct xenvif *vif)
725{ 701{
726 struct xenvif_queue *queue = NULL; 702 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues; 703 unsigned int num_queues = vif->num_queues;
728 unsigned int queue_index; 704 unsigned int queue_index;
729 /* Here we want to avoid timeout messages if an skb can be legitimately 705 /* Here we want to avoid timeout messages if an skb can be legitimately
730 * stuck somewhere else. Realistically this could be an another vif's 706 * stuck somewhere else. Realistically this could be an another vif's
@@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
748 xenvif_deinit_queue(queue); 724 xenvif_deinit_queue(queue);
749 } 725 }
750 726
751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues); 727 vfree(vif->queues);
756 vif->queues = NULL; 728 vif->queues = NULL;
729 vif->num_queues = 0;
757 730
758 free_netdev(vif->dev); 731 free_netdev(vif->dev);
759 732
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1030{ 1030{
1031 struct gnttab_map_grant_ref *gop_map = *gopp_map; 1031 struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033 /* This always points to the shinfo of the skb being checked, which
1034 * could be either the first or the one on the frag_list
1035 */
1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1036 struct skb_shared_info *shinfo = skb_shinfo(skb);
1037 /* If this is non-NULL, we are currently checking the frag_list skb, and
1038 * this points to the shinfo of the first one
1039 */
1040 struct skb_shared_info *first_shinfo = NULL;
1034 int nr_frags = shinfo->nr_frags; 1041 int nr_frags = shinfo->nr_frags;
1042 const bool sharedslot = nr_frags &&
1043 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1035 int i, err; 1044 int i, err;
1036 struct sk_buff *first_skb = NULL;
1037 1045
1038 /* Check status of header. */ 1046 /* Check status of header. */
1039 err = (*gopp_copy)->status; 1047 err = (*gopp_copy)->status;
1040 (*gopp_copy)++;
1041 if (unlikely(err)) { 1048 if (unlikely(err)) {
1042 if (net_ratelimit()) 1049 if (net_ratelimit())
1043 netdev_dbg(queue->vif->dev, 1050 netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1045 (*gopp_copy)->status, 1052 (*gopp_copy)->status,
1046 pending_idx, 1053 pending_idx,
1047 (*gopp_copy)->source.u.ref); 1054 (*gopp_copy)->source.u.ref);
1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1055 /* The first frag might still have this slot mapped */
1056 if (!sharedslot)
1057 xenvif_idx_release(queue, pending_idx,
1058 XEN_NETIF_RSP_ERROR);
1049 } 1059 }
1060 (*gopp_copy)++;
1050 1061
1051check_frags: 1062check_frags:
1052 for (i = 0; i < nr_frags; i++, gop_map++) { 1063 for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
1062 pending_idx, 1073 pending_idx,
1063 gop_map->handle); 1074 gop_map->handle);
1064 /* Had a previous error? Invalidate this fragment. */ 1075 /* Had a previous error? Invalidate this fragment. */
1065 if (unlikely(err)) 1076 if (unlikely(err)) {
1066 xenvif_idx_unmap(queue, pending_idx); 1077 xenvif_idx_unmap(queue, pending_idx);
1078 /* If the mapping of the first frag was OK, but
1079 * the header's copy failed, and they are
1080 * sharing a slot, send an error
1081 */
1082 if (i == 0 && sharedslot)
1083 xenvif_idx_release(queue, pending_idx,
1084 XEN_NETIF_RSP_ERROR);
1085 else
1086 xenvif_idx_release(queue, pending_idx,
1087 XEN_NETIF_RSP_OKAY);
1088 }
1067 continue; 1089 continue;
1068 } 1090 }
1069 1091
@@ -1075,42 +1097,53 @@ check_frags:
1075 gop_map->status, 1097 gop_map->status,
1076 pending_idx, 1098 pending_idx,
1077 gop_map->ref); 1099 gop_map->ref);
1100
1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1101 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1079 1102
1080 /* Not the first error? Preceding frags already invalidated. */ 1103 /* Not the first error? Preceding frags already invalidated. */
1081 if (err) 1104 if (err)
1082 continue; 1105 continue;
1083 /* First error: invalidate preceding fragments. */ 1106
1107 /* First error: if the header haven't shared a slot with the
1108 * first frag, release it as well.
1109 */
1110 if (!sharedslot)
1111 xenvif_idx_release(queue,
1112 XENVIF_TX_CB(skb)->pending_idx,
1113 XEN_NETIF_RSP_OKAY);
1114
1115 /* Invalidate preceding fragments of this skb. */
1084 for (j = 0; j < i; j++) { 1116 for (j = 0; j < i; j++) {
1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1117 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1086 xenvif_idx_unmap(queue, pending_idx); 1118 xenvif_idx_unmap(queue, pending_idx);
1119 xenvif_idx_release(queue, pending_idx,
1120 XEN_NETIF_RSP_OKAY);
1121 }
1122
1123 /* And if we found the error while checking the frag_list, unmap
1124 * the first skb's frags
1125 */
1126 if (first_shinfo) {
1127 for (j = 0; j < first_shinfo->nr_frags; j++) {
1128 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129 xenvif_idx_unmap(queue, pending_idx);
1130 xenvif_idx_release(queue, pending_idx,
1131 XEN_NETIF_RSP_OKAY);
1132 }
1087 } 1133 }
1088 1134
1089 /* Remember the error: invalidate all subsequent fragments. */ 1135 /* Remember the error: invalidate all subsequent fragments. */
1090 err = newerr; 1136 err = newerr;
1091 } 1137 }
1092 1138
1093 if (skb_has_frag_list(skb)) { 1139 if (skb_has_frag_list(skb) && !first_shinfo) {
1094 first_skb = skb; 1140 first_shinfo = skb_shinfo(skb);
1095 skb = shinfo->frag_list; 1141 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1096 shinfo = skb_shinfo(skb);
1097 nr_frags = shinfo->nr_frags; 1142 nr_frags = shinfo->nr_frags;
1098 1143
1099 goto check_frags; 1144 goto check_frags;
1100 } 1145 }
1101 1146
1102 /* There was a mapping error in the frag_list skb. We have to unmap
1103 * the first skb's frags
1104 */
1105 if (first_skb && err) {
1106 int j;
1107 shinfo = skb_shinfo(first_skb);
1108 for (j = 0; j < shinfo->nr_frags; j++) {
1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1110 xenvif_idx_unmap(queue, pending_idx);
1111 }
1112 }
1113
1114 *gopp_map = gop_map; 1147 *gopp_map = gop_map;
1115 return err; 1148 return err;
1116} 1149}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1518 1551
1519 /* Check the remap error code. */ 1552 /* Check the remap error code. */
1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1553 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1554 /* If there was an error, xenvif_tx_check_gop is
1555 * expected to release all the frags which were mapped,
1556 * so kfree_skb shouldn't do it again
1557 */
1521 skb_shinfo(skb)->nr_frags = 0; 1558 skb_shinfo(skb)->nr_frags = 0;
1559 if (skb_has_frag_list(skb)) {
1560 struct sk_buff *nskb =
1561 skb_shinfo(skb)->frag_list;
1562 skb_shinfo(nskb)->nr_frags = 0;
1563 }
1522 kfree_skb(skb); 1564 kfree_skb(skb);
1523 continue; 1565 continue;
1524 } 1566 }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1822 tx_unmap_op.status); 1864 tx_unmap_op.status);
1823 BUG(); 1865 BUG();
1824 } 1866 }
1825
1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1827} 1867}
1828 1868
1829static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 96c63dc2509e..3d85acd84bad 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -527,9 +527,7 @@ static void connect(struct backend_info *be)
527 /* Use the number of queues requested by the frontend */ 527 /* Use the number of queues requested by the frontend */
528 be->vif->queues = vzalloc(requested_num_queues * 528 be->vif->queues = vzalloc(requested_num_queues *
529 sizeof(struct xenvif_queue)); 529 sizeof(struct xenvif_queue));
530 rtnl_lock(); 530 be->vif->num_queues = requested_num_queues;
531 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
532 rtnl_unlock();
533 531
534 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 532 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
535 queue = &be->vif->queues[queue_index]; 533 queue = &be->vif->queues[queue_index];
@@ -546,9 +544,7 @@ static void connect(struct backend_info *be)
546 * earlier queues can be destroyed using the regular 544 * earlier queues can be destroyed using the regular
547 * disconnect logic. 545 * disconnect logic.
548 */ 546 */
549 rtnl_lock(); 547 be->vif->num_queues = queue_index;
550 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
551 rtnl_unlock();
552 goto err; 548 goto err;
553 } 549 }
554 550
@@ -561,13 +557,19 @@ static void connect(struct backend_info *be)
561 * and also clean up any previously initialised queues. 557 * and also clean up any previously initialised queues.
562 */ 558 */
563 xenvif_deinit_queue(queue); 559 xenvif_deinit_queue(queue);
564 rtnl_lock(); 560 be->vif->num_queues = queue_index;
565 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
566 rtnl_unlock();
567 goto err; 561 goto err;
568 } 562 }
569 } 563 }
570 564
565 /* Initialisation completed, tell core driver the number of
566 * active queues.
567 */
568 rtnl_lock();
569 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
570 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
571 rtnl_unlock();
572
571 xenvif_carrier_on(be->vif); 573 xenvif_carrier_on(be->vif);
572 574
573 unregister_hotplug_status_watch(be); 575 unregister_hotplug_status_watch(be);
@@ -582,13 +584,11 @@ static void connect(struct backend_info *be)
582 return; 584 return;
583 585
584err: 586err:
585 if (be->vif->dev->real_num_tx_queues > 0) 587 if (be->vif->num_queues > 0)
586 xenvif_disconnect(be->vif); /* Clean up existing queues */ 588 xenvif_disconnect(be->vif); /* Clean up existing queues */
587 vfree(be->vif->queues); 589 vfree(be->vif->queues);
588 be->vif->queues = NULL; 590 be->vif->queues = NULL;
589 rtnl_lock(); 591 be->vif->num_queues = 0;
590 netif_set_real_num_tx_queues(be->vif->dev, 0);
591 rtnl_unlock();
592 return; 592 return;
593} 593}
594 594
@@ -596,7 +596,7 @@ err:
596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
597{ 597{
598 struct xenbus_device *dev = be->dev; 598 struct xenbus_device *dev = be->dev;
599 unsigned int num_queues = queue->vif->dev->real_num_tx_queues; 599 unsigned int num_queues = queue->vif->num_queues;
600 unsigned long tx_ring_ref, rx_ring_ref; 600 unsigned long tx_ring_ref, rx_ring_ref;
601 unsigned int tx_evtchn, rx_evtchn; 601 unsigned int tx_evtchn, rx_evtchn;
602 int err; 602 int err;