aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2016-03-10 07:30:27 -0500
committerDavid S. Miller <davem@davemloft.net>2016-03-13 22:08:01 -0400
commit562abd39a1902745bdcab266c7824cd6c5bc34d3 (patch)
treee4c6dd06060b89a11b76deb2cabd965a6d8582b0
parent6b8abef5f833b03be1b5af491193477ad609ad35 (diff)
xen-netback: support multiple extra info fragments passed from frontend
The code does not currently support a frontend passing multiple extra info fragments to the backend in a tx request. The xenvif_get_extras() function handles multiple extra_info fragments but make_tx_response() assumes there is only ever a single extra info fragment. This patch modifies xenvif_get_extras() to pass back a count of extra info fragments, which is then passed to make_tx_response() (after possibly being stashed in pending_tx_info for deferred responses). Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/netback.c65
2 files changed, 43 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 112825200d41..f44b38846420 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t;
52 52
53struct pending_tx_info { 53struct pending_tx_info {
54 struct xen_netif_tx_request req; /* tx request */ 54 struct xen_netif_tx_request req; /* tx request */
55 unsigned int extra_count;
55 /* Callback data for released SKBs. The callback is always 56 /* Callback data for released SKBs. The callback is always
56 * xenvif_zerocopy_callback, desc contains the pending_idx, which is 57 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
57 * also an index in pending_tx_info array. It is initialized in 58 * also an index in pending_tx_info array. It is initialized in
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 61b97c34bb3b..b42f26029225 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
95 95
96static void make_tx_response(struct xenvif_queue *queue, 96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp, 97 struct xen_netif_tx_request *txp,
98 unsigned int extra_count,
98 s8 st); 99 s8 st);
99static void push_tx_responses(struct xenvif_queue *queue); 100static void push_tx_responses(struct xenvif_queue *queue);
100 101
@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data)
696} 697}
697 698
698static void xenvif_tx_err(struct xenvif_queue *queue, 699static void xenvif_tx_err(struct xenvif_queue *queue,
699 struct xen_netif_tx_request *txp, RING_IDX end) 700 struct xen_netif_tx_request *txp,
701 unsigned int extra_count, RING_IDX end)
700{ 702{
701 RING_IDX cons = queue->tx.req_cons; 703 RING_IDX cons = queue->tx.req_cons;
702 unsigned long flags; 704 unsigned long flags;
703 705
704 do { 706 do {
705 spin_lock_irqsave(&queue->response_lock, flags); 707 spin_lock_irqsave(&queue->response_lock, flags);
706 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 708 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
707 push_tx_responses(queue); 709 push_tx_responses(queue);
708 spin_unlock_irqrestore(&queue->response_lock, flags); 710 spin_unlock_irqrestore(&queue->response_lock, flags);
709 if (cons == end) 711 if (cons == end)
@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
724 726
725static int xenvif_count_requests(struct xenvif_queue *queue, 727static int xenvif_count_requests(struct xenvif_queue *queue,
726 struct xen_netif_tx_request *first, 728 struct xen_netif_tx_request *first,
729 unsigned int extra_count,
727 struct xen_netif_tx_request *txp, 730 struct xen_netif_tx_request *txp,
728 int work_to_do) 731 int work_to_do)
729{ 732{
@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
812 } while (more_data); 815 } while (more_data);
813 816
814 if (drop_err) { 817 if (drop_err) {
815 xenvif_tx_err(queue, first, cons + slots); 818 xenvif_tx_err(queue, first, extra_count, cons + slots);
816 return drop_err; 819 return drop_err;
817 } 820 }
818 821
@@ -827,9 +830,10 @@ struct xenvif_tx_cb {
827#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) 830#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
828 831
829static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, 832static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
830 u16 pending_idx, 833 u16 pending_idx,
831 struct xen_netif_tx_request *txp, 834 struct xen_netif_tx_request *txp,
832 struct gnttab_map_grant_ref *mop) 835 unsigned int extra_count,
836 struct gnttab_map_grant_ref *mop)
833{ 837{
834 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; 838 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
835 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), 839 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
838 842
839 memcpy(&queue->pending_tx_info[pending_idx].req, txp, 843 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
840 sizeof(*txp)); 844 sizeof(*txp));
845 queue->pending_tx_info[pending_idx].extra_count = extra_count;
841} 846}
842 847
843static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) 848static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
880 shinfo->nr_frags++, txp++, gop++) { 885 shinfo->nr_frags++, txp++, gop++) {
881 index = pending_index(queue->pending_cons++); 886 index = pending_index(queue->pending_cons++);
882 pending_idx = queue->pending_ring[index]; 887 pending_idx = queue->pending_ring[index];
883 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); 888 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
884 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); 889 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
885 } 890 }
886 891
@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
893 shinfo->nr_frags++, txp++, gop++) { 898 shinfo->nr_frags++, txp++, gop++) {
894 index = pending_index(queue->pending_cons++); 899 index = pending_index(queue->pending_cons++);
895 pending_idx = queue->pending_ring[index]; 900 pending_idx = queue->pending_ring[index];
896 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); 901 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
902 gop);
897 frag_set_pending_idx(&frags[shinfo->nr_frags], 903 frag_set_pending_idx(&frags[shinfo->nr_frags],
898 pending_idx); 904 pending_idx);
899 } 905 }
@@ -1095,8 +1101,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1095} 1101}
1096 1102
1097static int xenvif_get_extras(struct xenvif_queue *queue, 1103static int xenvif_get_extras(struct xenvif_queue *queue,
1098 struct xen_netif_extra_info *extras, 1104 struct xen_netif_extra_info *extras,
1099 int work_to_do) 1105 unsigned int *extra_count,
1106 int work_to_do)
1100{ 1107{
1101 struct xen_netif_extra_info extra; 1108 struct xen_netif_extra_info extra;
1102 RING_IDX cons = queue->tx.req_cons; 1109 RING_IDX cons = queue->tx.req_cons;
@@ -1109,9 +1116,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1109 } 1116 }
1110 1117
1111 RING_COPY_REQUEST(&queue->tx, cons, &extra); 1118 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1119
1120 queue->tx.req_cons = ++cons;
1121 (*extra_count)++;
1122
1112 if (unlikely(!extra.type || 1123 if (unlikely(!extra.type ||
1113 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1124 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1114 queue->tx.req_cons = ++cons;
1115 netdev_err(queue->vif->dev, 1125 netdev_err(queue->vif->dev,
1116 "Invalid extra type: %d\n", extra.type); 1126 "Invalid extra type: %d\n", extra.type);
1117 xenvif_fatal_tx_err(queue->vif); 1127 xenvif_fatal_tx_err(queue->vif);
@@ -1119,7 +1129,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1119 } 1129 }
1120 1130
1121 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); 1131 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1122 queue->tx.req_cons = ++cons;
1123 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 1132 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1124 1133
1125 return work_to_do; 1134 return work_to_do;
@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 struct xen_netif_tx_request txreq; 1303 struct xen_netif_tx_request txreq;
1295 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; 1304 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1296 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1305 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1306 unsigned int extra_count;
1297 u16 pending_idx; 1307 u16 pending_idx;
1298 RING_IDX idx; 1308 RING_IDX idx;
1299 int work_to_do; 1309 int work_to_do;
@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1330 queue->tx.req_cons = ++idx; 1340 queue->tx.req_cons = ++idx;
1331 1341
1332 memset(extras, 0, sizeof(extras)); 1342 memset(extras, 0, sizeof(extras));
1343 extra_count = 0;
1333 if (txreq.flags & XEN_NETTXF_extra_info) { 1344 if (txreq.flags & XEN_NETTXF_extra_info) {
1334 work_to_do = xenvif_get_extras(queue, extras, 1345 work_to_do = xenvif_get_extras(queue, extras,
1346 &extra_count,
1335 work_to_do); 1347 work_to_do);
1336 idx = queue->tx.req_cons; 1348 idx = queue->tx.req_cons;
1337 if (unlikely(work_to_do < 0)) 1349 if (unlikely(work_to_do < 0))
@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1344 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; 1356 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1345 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); 1357 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1346 1358
1347 make_tx_response(queue, &txreq, 1359 make_tx_response(queue, &txreq, extra_count,
1348 (ret == 0) ? 1360 (ret == 0) ?
1349 XEN_NETIF_RSP_OKAY : 1361 XEN_NETIF_RSP_OKAY :
1350 XEN_NETIF_RSP_ERROR); 1362 XEN_NETIF_RSP_ERROR);
@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1358 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; 1370 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1359 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); 1371 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1360 1372
1361 make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); 1373 make_tx_response(queue, &txreq, extra_count,
1374 XEN_NETIF_RSP_OKAY);
1362 push_tx_responses(queue); 1375 push_tx_responses(queue);
1363 continue; 1376 continue;
1364 } 1377 }
1365 1378
1366 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); 1379 ret = xenvif_count_requests(queue, &txreq, extra_count,
1380 txfrags, work_to_do);
1367 if (unlikely(ret < 0)) 1381 if (unlikely(ret < 0))
1368 break; 1382 break;
1369 1383
@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1372 if (unlikely(txreq.size < ETH_HLEN)) { 1386 if (unlikely(txreq.size < ETH_HLEN)) {
1373 netdev_dbg(queue->vif->dev, 1387 netdev_dbg(queue->vif->dev,
1374 "Bad packet size: %d\n", txreq.size); 1388 "Bad packet size: %d\n", txreq.size);
1375 xenvif_tx_err(queue, &txreq, idx); 1389 xenvif_tx_err(queue, &txreq, extra_count, idx);
1376 break; 1390 break;
1377 } 1391 }
1378 1392
@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1397 if (unlikely(skb == NULL)) { 1411 if (unlikely(skb == NULL)) {
1398 netdev_dbg(queue->vif->dev, 1412 netdev_dbg(queue->vif->dev,
1399 "Can't allocate a skb in start_xmit.\n"); 1413 "Can't allocate a skb in start_xmit.\n");
1400 xenvif_tx_err(queue, &txreq, idx); 1414 xenvif_tx_err(queue, &txreq, extra_count, idx);
1401 break; 1415 break;
1402 } 1416 }
1403 1417
@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1416 nskb = xenvif_alloc_skb(0); 1430 nskb = xenvif_alloc_skb(0);
1417 if (unlikely(nskb == NULL)) { 1431 if (unlikely(nskb == NULL)) {
1418 kfree_skb(skb); 1432 kfree_skb(skb);
1419 xenvif_tx_err(queue, &txreq, idx); 1433 xenvif_tx_err(queue, &txreq, extra_count, idx);
1420 if (net_ratelimit()) 1434 if (net_ratelimit())
1421 netdev_err(queue->vif->dev, 1435 netdev_err(queue->vif->dev,
1422 "Can't allocate the frag_list skb.\n"); 1436 "Can't allocate the frag_list skb.\n");
@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1457 if (data_len < txreq.size) { 1471 if (data_len < txreq.size) {
1458 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1472 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1459 pending_idx); 1473 pending_idx);
1460 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1474 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1475 extra_count, gop);
1461 gop++; 1476 gop++;
1462 } else { 1477 } else {
1463 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1478 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1464 INVALID_PENDING_IDX); 1479 INVALID_PENDING_IDX);
1465 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, 1480 memcpy(&queue->pending_tx_info[pending_idx].req,
1466 sizeof(txreq)); 1481 &txreq, sizeof(txreq));
1482 queue->pending_tx_info[pending_idx].extra_count =
1483 extra_count;
1467 } 1484 }
1468 1485
1469 queue->pending_cons++; 1486 queue->pending_cons++;
@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1804 1821
1805 spin_lock_irqsave(&queue->response_lock, flags); 1822 spin_lock_irqsave(&queue->response_lock, flags);
1806 1823
1807 make_tx_response(queue, &pending_tx_info->req, status); 1824 make_tx_response(queue, &pending_tx_info->req,
1825 pending_tx_info->extra_count, status);
1808 1826
1809 /* Release the pending index before pusing the Tx response so 1827 /* Release the pending index before pusing the Tx response so
1810 * its available before a new Tx request is pushed by the 1828 * its available before a new Tx request is pushed by the
@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1821 1839
1822static void make_tx_response(struct xenvif_queue *queue, 1840static void make_tx_response(struct xenvif_queue *queue,
1823 struct xen_netif_tx_request *txp, 1841 struct xen_netif_tx_request *txp,
1842 unsigned int extra_count,
1824 s8 st) 1843 s8 st)
1825{ 1844{
1826 RING_IDX i = queue->tx.rsp_prod_pvt; 1845 RING_IDX i = queue->tx.rsp_prod_pvt;
@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue,
1830 resp->id = txp->id; 1849 resp->id = txp->id;
1831 resp->status = st; 1850 resp->status = st;
1832 1851
1833 if (txp->flags & XEN_NETTXF_extra_info) 1852 while (extra_count-- != 0)
1834 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; 1853 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1835 1854
1836 queue->tx.rsp_prod_pvt = ++i; 1855 queue->tx.rsp_prod_pvt = ++i;