diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 46 |
1 files changed, 31 insertions, 15 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f7a31d2cb3f1..997cf0901ac2 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
96 | static void make_tx_response(struct xenvif_queue *queue, | 96 | static void make_tx_response(struct xenvif_queue *queue, |
97 | struct xen_netif_tx_request *txp, | 97 | struct xen_netif_tx_request *txp, |
98 | s8 st); | 98 | s8 st); |
99 | static void push_tx_responses(struct xenvif_queue *queue); | ||
99 | 100 | ||
100 | static inline int tx_work_todo(struct xenvif_queue *queue); | 101 | static inline int tx_work_todo(struct xenvif_queue *queue); |
101 | 102 | ||
@@ -657,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
657 | do { | 658 | do { |
658 | spin_lock_irqsave(&queue->response_lock, flags); | 659 | spin_lock_irqsave(&queue->response_lock, flags); |
659 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); | 660 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
661 | push_tx_responses(queue); | ||
660 | spin_unlock_irqrestore(&queue->response_lock, flags); | 662 | spin_unlock_irqrestore(&queue->response_lock, flags); |
661 | if (cons == end) | 663 | if (cons == end) |
662 | break; | 664 | break; |
@@ -1343,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1343 | { | 1345 | { |
1344 | unsigned int offset = skb_headlen(skb); | 1346 | unsigned int offset = skb_headlen(skb); |
1345 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1347 | skb_frag_t frags[MAX_SKB_FRAGS]; |
1346 | int i; | 1348 | int i, f; |
1347 | struct ubuf_info *uarg; | 1349 | struct ubuf_info *uarg; |
1348 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1350 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1349 | 1351 | ||
@@ -1383,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1383 | frags[i].page_offset = 0; | 1385 | frags[i].page_offset = 0; |
1384 | skb_frag_size_set(&frags[i], len); | 1386 | skb_frag_size_set(&frags[i], len); |
1385 | } | 1387 | } |
1386 | /* swap out with old one */ | ||
1387 | memcpy(skb_shinfo(skb)->frags, | ||
1388 | frags, | ||
1389 | i * sizeof(skb_frag_t)); | ||
1390 | skb_shinfo(skb)->nr_frags = i; | ||
1391 | skb->truesize += i * PAGE_SIZE; | ||
1392 | 1388 | ||
1393 | /* remove traces of mapped pages and frag_list */ | 1389 | /* Copied all the bits from the frag list -- free it. */ |
1394 | skb_frag_list_init(skb); | 1390 | skb_frag_list_init(skb); |
1391 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
1392 | kfree_skb(nskb); | ||
1393 | |||
1394 | /* Release all the original (foreign) frags. */ | ||
1395 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
1396 | skb_frag_unref(skb, f); | ||
1395 | uarg = skb_shinfo(skb)->destructor_arg; | 1397 | uarg = skb_shinfo(skb)->destructor_arg; |
1396 | /* increase inflight counter to offset decrement in callback */ | 1398 | /* increase inflight counter to offset decrement in callback */ |
1397 | atomic_inc(&queue->inflight_packets); | 1399 | atomic_inc(&queue->inflight_packets); |
1398 | uarg->callback(uarg, true); | 1400 | uarg->callback(uarg, true); |
1399 | skb_shinfo(skb)->destructor_arg = NULL; | 1401 | skb_shinfo(skb)->destructor_arg = NULL; |
1400 | 1402 | ||
1401 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1403 | /* Fill the skb with the new (local) frags. */ |
1402 | kfree_skb(nskb); | 1404 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
1405 | skb_shinfo(skb)->nr_frags = i; | ||
1406 | skb->truesize += i * PAGE_SIZE; | ||
1403 | 1407 | ||
1404 | return 0; | 1408 | return 0; |
1405 | } | 1409 | } |
@@ -1652,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
1652 | unsigned long flags; | 1656 | unsigned long flags; |
1653 | 1657 | ||
1654 | pending_tx_info = &queue->pending_tx_info[pending_idx]; | 1658 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
1659 | |||
1655 | spin_lock_irqsave(&queue->response_lock, flags); | 1660 | spin_lock_irqsave(&queue->response_lock, flags); |
1661 | |||
1656 | make_tx_response(queue, &pending_tx_info->req, status); | 1662 | make_tx_response(queue, &pending_tx_info->req, status); |
1657 | index = pending_index(queue->pending_prod); | 1663 | |
1664 | /* Release the pending index before pusing the Tx response so | ||
1665 | * its available before a new Tx request is pushed by the | ||
1666 | * frontend. | ||
1667 | */ | ||
1668 | index = pending_index(queue->pending_prod++); | ||
1658 | queue->pending_ring[index] = pending_idx; | 1669 | queue->pending_ring[index] = pending_idx; |
1659 | /* TX shouldn't use the index before we give it back here */ | 1670 | |
1660 | mb(); | 1671 | push_tx_responses(queue); |
1661 | queue->pending_prod++; | 1672 | |
1662 | spin_unlock_irqrestore(&queue->response_lock, flags); | 1673 | spin_unlock_irqrestore(&queue->response_lock, flags); |
1663 | } | 1674 | } |
1664 | 1675 | ||
@@ -1669,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1669 | { | 1680 | { |
1670 | RING_IDX i = queue->tx.rsp_prod_pvt; | 1681 | RING_IDX i = queue->tx.rsp_prod_pvt; |
1671 | struct xen_netif_tx_response *resp; | 1682 | struct xen_netif_tx_response *resp; |
1672 | int notify; | ||
1673 | 1683 | ||
1674 | resp = RING_GET_RESPONSE(&queue->tx, i); | 1684 | resp = RING_GET_RESPONSE(&queue->tx, i); |
1675 | resp->id = txp->id; | 1685 | resp->id = txp->id; |
@@ -1679,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1679 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; | 1689 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; |
1680 | 1690 | ||
1681 | queue->tx.rsp_prod_pvt = ++i; | 1691 | queue->tx.rsp_prod_pvt = ++i; |
1692 | } | ||
1693 | |||
1694 | static void push_tx_responses(struct xenvif_queue *queue) | ||
1695 | { | ||
1696 | int notify; | ||
1697 | |||
1682 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | 1698 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); |
1683 | if (notify) | 1699 | if (notify) |
1684 | notify_remote_via_irq(queue->tx_irq); | 1700 | notify_remote_via_irq(queue->tx_irq); |