diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r-- | drivers/net/xen-netback/netback.c | 51 |
1 files changed, 33 insertions, 18 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f7a31d2cb3f1..cab9f5257f57 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
655 | unsigned long flags; | 655 | unsigned long flags; |
656 | 656 | ||
657 | do { | 657 | do { |
658 | int notify; | ||
659 | |||
658 | spin_lock_irqsave(&queue->response_lock, flags); | 660 | spin_lock_irqsave(&queue->response_lock, flags); |
659 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); | 661 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
662 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | ||
660 | spin_unlock_irqrestore(&queue->response_lock, flags); | 663 | spin_unlock_irqrestore(&queue->response_lock, flags); |
664 | if (notify) | ||
665 | notify_remote_via_irq(queue->tx_irq); | ||
666 | |||
661 | if (cons == end) | 667 | if (cons == end) |
662 | break; | 668 | break; |
663 | txp = RING_GET_REQUEST(&queue->tx, cons++); | 669 | txp = RING_GET_REQUEST(&queue->tx, cons++); |
@@ -1343,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1343 | { | 1349 | { |
1344 | unsigned int offset = skb_headlen(skb); | 1350 | unsigned int offset = skb_headlen(skb); |
1345 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1351 | skb_frag_t frags[MAX_SKB_FRAGS]; |
1346 | int i; | 1352 | int i, f; |
1347 | struct ubuf_info *uarg; | 1353 | struct ubuf_info *uarg; |
1348 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1349 | 1355 | ||
@@ -1383,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1383 | frags[i].page_offset = 0; | 1389 | frags[i].page_offset = 0; |
1384 | skb_frag_size_set(&frags[i], len); | 1390 | skb_frag_size_set(&frags[i], len); |
1385 | } | 1391 | } |
1386 | /* swap out with old one */ | ||
1387 | memcpy(skb_shinfo(skb)->frags, | ||
1388 | frags, | ||
1389 | i * sizeof(skb_frag_t)); | ||
1390 | skb_shinfo(skb)->nr_frags = i; | ||
1391 | skb->truesize += i * PAGE_SIZE; | ||
1392 | 1392 | ||
1393 | /* remove traces of mapped pages and frag_list */ | 1393 | /* Copied all the bits from the frag list -- free it. */ |
1394 | skb_frag_list_init(skb); | 1394 | skb_frag_list_init(skb); |
1395 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
1396 | kfree_skb(nskb); | ||
1397 | |||
1398 | /* Release all the original (foreign) frags. */ | ||
1399 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
1400 | skb_frag_unref(skb, f); | ||
1395 | uarg = skb_shinfo(skb)->destructor_arg; | 1401 | uarg = skb_shinfo(skb)->destructor_arg; |
1396 | /* increase inflight counter to offset decrement in callback */ | 1402 | /* increase inflight counter to offset decrement in callback */ |
1397 | atomic_inc(&queue->inflight_packets); | 1403 | atomic_inc(&queue->inflight_packets); |
1398 | uarg->callback(uarg, true); | 1404 | uarg->callback(uarg, true); |
1399 | skb_shinfo(skb)->destructor_arg = NULL; | 1405 | skb_shinfo(skb)->destructor_arg = NULL; |
1400 | 1406 | ||
1401 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1407 | /* Fill the skb with the new (local) frags. */ |
1402 | kfree_skb(nskb); | 1408 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
1409 | skb_shinfo(skb)->nr_frags = i; | ||
1410 | skb->truesize += i * PAGE_SIZE; | ||
1403 | 1411 | ||
1404 | return 0; | 1412 | return 0; |
1405 | } | 1413 | } |
@@ -1649,17 +1657,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
1649 | { | 1657 | { |
1650 | struct pending_tx_info *pending_tx_info; | 1658 | struct pending_tx_info *pending_tx_info; |
1651 | pending_ring_idx_t index; | 1659 | pending_ring_idx_t index; |
1660 | int notify; | ||
1652 | unsigned long flags; | 1661 | unsigned long flags; |
1653 | 1662 | ||
1654 | pending_tx_info = &queue->pending_tx_info[pending_idx]; | 1663 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
1664 | |||
1655 | spin_lock_irqsave(&queue->response_lock, flags); | 1665 | spin_lock_irqsave(&queue->response_lock, flags); |
1666 | |||
1656 | make_tx_response(queue, &pending_tx_info->req, status); | 1667 | make_tx_response(queue, &pending_tx_info->req, status); |
1657 | index = pending_index(queue->pending_prod); | 1668 | |
1669 | /* Release the pending index before pusing the Tx response so | ||
1670 | * its available before a new Tx request is pushed by the | ||
1671 | * frontend. | ||
1672 | */ | ||
1673 | index = pending_index(queue->pending_prod++); | ||
1658 | queue->pending_ring[index] = pending_idx; | 1674 | queue->pending_ring[index] = pending_idx; |
1659 | /* TX shouldn't use the index before we give it back here */ | 1675 | |
1660 | mb(); | 1676 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); |
1661 | queue->pending_prod++; | 1677 | |
1662 | spin_unlock_irqrestore(&queue->response_lock, flags); | 1678 | spin_unlock_irqrestore(&queue->response_lock, flags); |
1679 | |||
1680 | if (notify) | ||
1681 | notify_remote_via_irq(queue->tx_irq); | ||
1663 | } | 1682 | } |
1664 | 1683 | ||
1665 | 1684 | ||
@@ -1669,7 +1688,6 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1669 | { | 1688 | { |
1670 | RING_IDX i = queue->tx.rsp_prod_pvt; | 1689 | RING_IDX i = queue->tx.rsp_prod_pvt; |
1671 | struct xen_netif_tx_response *resp; | 1690 | struct xen_netif_tx_response *resp; |
1672 | int notify; | ||
1673 | 1691 | ||
1674 | resp = RING_GET_RESPONSE(&queue->tx, i); | 1692 | resp = RING_GET_RESPONSE(&queue->tx, i); |
1675 | resp->id = txp->id; | 1693 | resp->id = txp->id; |
@@ -1679,9 +1697,6 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1679 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; | 1697 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; |
1680 | 1698 | ||
1681 | queue->tx.rsp_prod_pvt = ++i; | 1699 | queue->tx.rsp_prod_pvt = ++i; |
1682 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | ||
1683 | if (notify) | ||
1684 | notify_remote_via_irq(queue->tx_irq); | ||
1685 | } | 1700 | } |
1686 | 1701 | ||
1687 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, | 1702 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, |