aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorZoltan Kiss <zoltan.kiss@citrix.com>2014-03-06 16:48:23 -0500
committerDavid S. Miller <davem@davemloft.net>2014-03-07 15:56:34 -0500
commit8f13dd9612286cc0d38d32ff9543763b7c74f6a5 (patch)
tree90f0d6b5accf80a8a3cc0390c04a142f0c7b0bf1 /drivers/net/xen-netback/netback.c
parent31c70d5956fc3d1abf83e9ab5e1d8237dea59498 (diff)
xen-netback: Use skb->cb for pending_idx
Storing the pending_idx at the first byte of the linear buffer never looked good, skb->cb is a more proper place for this. It also prevents the header to be directly grant copied there, and we don't have the pending_idx after we copied the header here, so it's time to change it. It also introduces helpers for the RX side Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c42
1 files changed, 25 insertions, 17 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e5284bca2d90..43ae4bad50c4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -455,10 +455,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
455 } 455 }
456} 456}
457 457
458struct skb_cb_overlay { 458struct xenvif_rx_cb {
459 int meta_slots_used; 459 int meta_slots_used;
460}; 460};
461 461
462#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
463
462void xenvif_kick_thread(struct xenvif *vif) 464void xenvif_kick_thread(struct xenvif *vif)
463{ 465{
464 wake_up(&vif->wq); 466 wake_up(&vif->wq);
@@ -474,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
474 LIST_HEAD(notify); 476 LIST_HEAD(notify);
475 int ret; 477 int ret;
476 unsigned long offset; 478 unsigned long offset;
477 struct skb_cb_overlay *sco;
478 bool need_to_notify = false; 479 bool need_to_notify = false;
479 480
480 struct netrx_pending_operations npo = { 481 struct netrx_pending_operations npo = {
@@ -513,9 +514,8 @@ static void xenvif_rx_action(struct xenvif *vif)
513 } else 514 } else
514 vif->rx_last_skb_slots = 0; 515 vif->rx_last_skb_slots = 0;
515 516
516 sco = (struct skb_cb_overlay *)skb->cb; 517 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
517 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 518 BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed);
518 BUG_ON(sco->meta_slots_used > max_slots_needed);
519 519
520 __skb_queue_tail(&rxq, skb); 520 __skb_queue_tail(&rxq, skb);
521 } 521 }
@@ -529,7 +529,6 @@ static void xenvif_rx_action(struct xenvif *vif)
529 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 529 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
530 530
531 while ((skb = __skb_dequeue(&rxq)) != NULL) { 531 while ((skb = __skb_dequeue(&rxq)) != NULL) {
532 sco = (struct skb_cb_overlay *)skb->cb;
533 532
534 if ((1 << vif->meta[npo.meta_cons].gso_type) & 533 if ((1 << vif->meta[npo.meta_cons].gso_type) &
535 vif->gso_prefix_mask) { 534 vif->gso_prefix_mask) {
@@ -540,19 +539,21 @@ static void xenvif_rx_action(struct xenvif *vif)
540 539
541 resp->offset = vif->meta[npo.meta_cons].gso_size; 540 resp->offset = vif->meta[npo.meta_cons].gso_size;
542 resp->id = vif->meta[npo.meta_cons].id; 541 resp->id = vif->meta[npo.meta_cons].id;
543 resp->status = sco->meta_slots_used; 542 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
544 543
545 npo.meta_cons++; 544 npo.meta_cons++;
546 sco->meta_slots_used--; 545 XENVIF_RX_CB(skb)->meta_slots_used--;
547 } 546 }
548 547
549 548
550 vif->dev->stats.tx_bytes += skb->len; 549 vif->dev->stats.tx_bytes += skb->len;
551 vif->dev->stats.tx_packets++; 550 vif->dev->stats.tx_packets++;
552 551
553 status = xenvif_check_gop(vif, sco->meta_slots_used, &npo); 552 status = xenvif_check_gop(vif,
553 XENVIF_RX_CB(skb)->meta_slots_used,
554 &npo);
554 555
555 if (sco->meta_slots_used == 1) 556 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
556 flags = 0; 557 flags = 0;
557 else 558 else
558 flags = XEN_NETRXF_more_data; 559 flags = XEN_NETRXF_more_data;
@@ -589,13 +590,13 @@ static void xenvif_rx_action(struct xenvif *vif)
589 590
590 xenvif_add_frag_responses(vif, status, 591 xenvif_add_frag_responses(vif, status,
591 vif->meta + npo.meta_cons + 1, 592 vif->meta + npo.meta_cons + 1,
592 sco->meta_slots_used); 593 XENVIF_RX_CB(skb)->meta_slots_used);
593 594
594 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); 595 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
595 596
596 need_to_notify |= !!ret; 597 need_to_notify |= !!ret;
597 598
598 npo.meta_cons += sco->meta_slots_used; 599 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
599 dev_kfree_skb(skb); 600 dev_kfree_skb(skb);
600 } 601 }
601 602
@@ -772,6 +773,13 @@ static struct page *xenvif_alloc_page(struct xenvif *vif,
772 return page; 773 return page;
773} 774}
774 775
776
777struct xenvif_tx_cb {
778 u16 pending_idx;
779};
780
781#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
782
775static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif, 783static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
776 struct sk_buff *skb, 784 struct sk_buff *skb,
777 struct xen_netif_tx_request *txp, 785 struct xen_netif_tx_request *txp,
@@ -779,7 +787,7 @@ static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
779{ 787{
780 struct skb_shared_info *shinfo = skb_shinfo(skb); 788 struct skb_shared_info *shinfo = skb_shinfo(skb);
781 skb_frag_t *frags = shinfo->frags; 789 skb_frag_t *frags = shinfo->frags;
782 u16 pending_idx = *((u16 *)skb->data); 790 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
783 u16 head_idx = 0; 791 u16 head_idx = 0;
784 int slot, start; 792 int slot, start;
785 struct page *page; 793 struct page *page;
@@ -897,7 +905,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
897 struct gnttab_copy **gopp) 905 struct gnttab_copy **gopp)
898{ 906{
899 struct gnttab_copy *gop = *gopp; 907 struct gnttab_copy *gop = *gopp;
900 u16 pending_idx = *((u16 *)skb->data); 908 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
901 struct skb_shared_info *shinfo = skb_shinfo(skb); 909 struct skb_shared_info *shinfo = skb_shinfo(skb);
902 struct pending_tx_info *tx_info; 910 struct pending_tx_info *tx_info;
903 int nr_frags = shinfo->nr_frags; 911 int nr_frags = shinfo->nr_frags;
@@ -944,7 +952,7 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
944 continue; 952 continue;
945 953
946 /* First error: invalidate header and preceding fragments. */ 954 /* First error: invalidate header and preceding fragments. */
947 pending_idx = *((u16 *)skb->data); 955 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
948 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); 956 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
949 for (j = start; j < i; j++) { 957 for (j = start; j < i; j++) {
950 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 958 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
@@ -1236,7 +1244,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1236 memcpy(&vif->pending_tx_info[pending_idx].req, 1244 memcpy(&vif->pending_tx_info[pending_idx].req,
1237 &txreq, sizeof(txreq)); 1245 &txreq, sizeof(txreq));
1238 vif->pending_tx_info[pending_idx].head = index; 1246 vif->pending_tx_info[pending_idx].head = index;
1239 *((u16 *)skb->data) = pending_idx; 1247 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1240 1248
1241 __skb_put(skb, data_len); 1249 __skb_put(skb, data_len);
1242 1250
@@ -1283,7 +1291,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
1283 u16 pending_idx; 1291 u16 pending_idx;
1284 unsigned data_len; 1292 unsigned data_len;
1285 1293
1286 pending_idx = *((u16 *)skb->data); 1294 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1287 txp = &vif->pending_tx_info[pending_idx].req; 1295 txp = &vif->pending_tx_info[pending_idx].req;
1288 1296
1289 /* Check the remap error code. */ 1297 /* Check the remap error code. */