diff options
Diffstat (limited to 'drivers/net/xen-netback')
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 3 | ||||
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 45 |
2 files changed, 25 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
| 340 | unsigned int num_queues = vif->num_queues; | 340 | unsigned int num_queues = vif->num_queues; |
| 341 | int i; | 341 | int i; |
| 342 | unsigned int queue_index; | 342 | unsigned int queue_index; |
| 343 | struct xenvif_stats *vif_stats; | ||
| 344 | 343 | ||
| 345 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | 344 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
| 346 | unsigned long accum = 0; | 345 | unsigned long accum = 0; |
| 347 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 346 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 348 | vif_stats = &vif->queues[queue_index].stats; | 347 | void *vif_stats = &vif->queues[queue_index].stats; |
| 349 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | 348 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
| 350 | } | 349 | } |
| 351 | data[i] = accum; | 350 | data[i] = accum; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c4d68d768408..997cf0901ac2 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
| 96 | static void make_tx_response(struct xenvif_queue *queue, | 96 | static void make_tx_response(struct xenvif_queue *queue, |
| 97 | struct xen_netif_tx_request *txp, | 97 | struct xen_netif_tx_request *txp, |
| 98 | s8 st); | 98 | s8 st); |
| 99 | static void push_tx_responses(struct xenvif_queue *queue); | ||
| 99 | 100 | ||
| 100 | static inline int tx_work_todo(struct xenvif_queue *queue); | 101 | static inline int tx_work_todo(struct xenvif_queue *queue); |
| 101 | 102 | ||
| @@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
| 655 | unsigned long flags; | 656 | unsigned long flags; |
| 656 | 657 | ||
| 657 | do { | 658 | do { |
| 658 | int notify; | ||
| 659 | |||
| 660 | spin_lock_irqsave(&queue->response_lock, flags); | 659 | spin_lock_irqsave(&queue->response_lock, flags); |
| 661 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); | 660 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
| 662 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | 661 | push_tx_responses(queue); |
| 663 | spin_unlock_irqrestore(&queue->response_lock, flags); | 662 | spin_unlock_irqrestore(&queue->response_lock, flags); |
| 664 | if (notify) | ||
| 665 | notify_remote_via_irq(queue->tx_irq); | ||
| 666 | |||
| 667 | if (cons == end) | 663 | if (cons == end) |
| 668 | break; | 664 | break; |
| 669 | txp = RING_GET_REQUEST(&queue->tx, cons++); | 665 | txp = RING_GET_REQUEST(&queue->tx, cons++); |
| @@ -1349,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
| 1349 | { | 1345 | { |
| 1350 | unsigned int offset = skb_headlen(skb); | 1346 | unsigned int offset = skb_headlen(skb); |
| 1351 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1347 | skb_frag_t frags[MAX_SKB_FRAGS]; |
| 1352 | int i; | 1348 | int i, f; |
| 1353 | struct ubuf_info *uarg; | 1349 | struct ubuf_info *uarg; |
| 1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1350 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
| 1355 | 1351 | ||
| @@ -1389,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
| 1389 | frags[i].page_offset = 0; | 1385 | frags[i].page_offset = 0; |
| 1390 | skb_frag_size_set(&frags[i], len); | 1386 | skb_frag_size_set(&frags[i], len); |
| 1391 | } | 1387 | } |
| 1392 | /* swap out with old one */ | ||
| 1393 | memcpy(skb_shinfo(skb)->frags, | ||
| 1394 | frags, | ||
| 1395 | i * sizeof(skb_frag_t)); | ||
| 1396 | skb_shinfo(skb)->nr_frags = i; | ||
| 1397 | skb->truesize += i * PAGE_SIZE; | ||
| 1398 | 1388 | ||
| 1399 | /* remove traces of mapped pages and frag_list */ | 1389 | /* Copied all the bits from the frag list -- free it. */ |
| 1400 | skb_frag_list_init(skb); | 1390 | skb_frag_list_init(skb); |
| 1391 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
| 1392 | kfree_skb(nskb); | ||
| 1393 | |||
| 1394 | /* Release all the original (foreign) frags. */ | ||
| 1395 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
| 1396 | skb_frag_unref(skb, f); | ||
| 1401 | uarg = skb_shinfo(skb)->destructor_arg; | 1397 | uarg = skb_shinfo(skb)->destructor_arg; |
| 1402 | /* increase inflight counter to offset decrement in callback */ | 1398 | /* increase inflight counter to offset decrement in callback */ |
| 1403 | atomic_inc(&queue->inflight_packets); | 1399 | atomic_inc(&queue->inflight_packets); |
| 1404 | uarg->callback(uarg, true); | 1400 | uarg->callback(uarg, true); |
| 1405 | skb_shinfo(skb)->destructor_arg = NULL; | 1401 | skb_shinfo(skb)->destructor_arg = NULL; |
| 1406 | 1402 | ||
| 1407 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1403 | /* Fill the skb with the new (local) frags. */ |
| 1408 | kfree_skb(nskb); | 1404 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
| 1405 | skb_shinfo(skb)->nr_frags = i; | ||
| 1406 | skb->truesize += i * PAGE_SIZE; | ||
| 1409 | 1407 | ||
| 1410 | return 0; | 1408 | return 0; |
| 1411 | } | 1409 | } |
| @@ -1655,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
| 1655 | { | 1653 | { |
| 1656 | struct pending_tx_info *pending_tx_info; | 1654 | struct pending_tx_info *pending_tx_info; |
| 1657 | pending_ring_idx_t index; | 1655 | pending_ring_idx_t index; |
| 1658 | int notify; | ||
| 1659 | unsigned long flags; | 1656 | unsigned long flags; |
| 1660 | 1657 | ||
| 1661 | pending_tx_info = &queue->pending_tx_info[pending_idx]; | 1658 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
| @@ -1671,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
| 1671 | index = pending_index(queue->pending_prod++); | 1668 | index = pending_index(queue->pending_prod++); |
| 1672 | queue->pending_ring[index] = pending_idx; | 1669 | queue->pending_ring[index] = pending_idx; |
| 1673 | 1670 | ||
| 1674 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | 1671 | push_tx_responses(queue); |
| 1675 | 1672 | ||
| 1676 | spin_unlock_irqrestore(&queue->response_lock, flags); | 1673 | spin_unlock_irqrestore(&queue->response_lock, flags); |
| 1677 | |||
| 1678 | if (notify) | ||
| 1679 | notify_remote_via_irq(queue->tx_irq); | ||
| 1680 | } | 1674 | } |
| 1681 | 1675 | ||
| 1682 | 1676 | ||
| @@ -1697,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
| 1697 | queue->tx.rsp_prod_pvt = ++i; | 1691 | queue->tx.rsp_prod_pvt = ++i; |
| 1698 | } | 1692 | } |
| 1699 | 1693 | ||
| 1694 | static void push_tx_responses(struct xenvif_queue *queue) | ||
| 1695 | { | ||
| 1696 | int notify; | ||
| 1697 | |||
| 1698 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | ||
| 1699 | if (notify) | ||
| 1700 | notify_remote_via_irq(queue->tx_irq); | ||
| 1701 | } | ||
| 1702 | |||
| 1700 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, | 1703 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, |
| 1701 | u16 id, | 1704 | u16 id, |
| 1702 | s8 st, | 1705 | s8 st, |
