aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0d2594395ffb..7d50711476fe 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -44,15 +44,15 @@
44#include <xen/xen.h> 44#include <xen/xen.h>
45#include <xen/events.h> 45#include <xen/events.h>
46#include <xen/interface/memory.h> 46#include <xen/interface/memory.h>
47#include <xen/page.h>
47 48
48#include <asm/xen/hypercall.h> 49#include <asm/xen/hypercall.h>
49#include <asm/xen/page.h>
50 50
51/* Provide an option to disable split event channels at load time as 51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are 52 * event channels are limited resource. Split event channels are
53 * enabled by default. 53 * enabled by default.
54 */ 54 */
55bool separate_tx_rx_irq = 1; 55bool separate_tx_rx_irq = true;
56module_param(separate_tx_rx_irq, bool, 0644); 56module_param(separate_tx_rx_irq, bool, 0644);
57 57
58/* The time that packets can stay on the guest Rx internal queue 58/* The time that packets can stay on the guest Rx internal queue
@@ -515,14 +515,9 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
515 515
516 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) 516 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
517 && (skb = xenvif_rx_dequeue(queue)) != NULL) { 517 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
518 RING_IDX old_req_cons;
519 RING_IDX ring_slots_used;
520
521 queue->last_rx_time = jiffies; 518 queue->last_rx_time = jiffies;
522 519
523 old_req_cons = queue->rx.req_cons;
524 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 520 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
525 ring_slots_used = queue->rx.req_cons - old_req_cons;
526 521
527 __skb_queue_tail(&rxq, skb); 522 __skb_queue_tail(&rxq, skb);
528 } 523 }
@@ -753,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
753 slots++; 748 slots++;
754 749
755 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 750 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
756 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", 751 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
757 txp->offset, txp->size); 752 txp->offset, txp->size);
758 xenvif_fatal_tx_err(queue->vif); 753 xenvif_fatal_tx_err(queue->vif);
759 return -EINVAL; 754 return -EINVAL;
@@ -879,7 +874,7 @@ static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
879 if (unlikely(queue->grant_tx_handle[pending_idx] != 874 if (unlikely(queue->grant_tx_handle[pending_idx] !=
880 NETBACK_INVALID_HANDLE)) { 875 NETBACK_INVALID_HANDLE)) {
881 netdev_err(queue->vif->dev, 876 netdev_err(queue->vif->dev,
882 "Trying to overwrite active handle! pending_idx: %x\n", 877 "Trying to overwrite active handle! pending_idx: 0x%x\n",
883 pending_idx); 878 pending_idx);
884 BUG(); 879 BUG();
885 } 880 }
@@ -892,7 +887,7 @@ static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
892 if (unlikely(queue->grant_tx_handle[pending_idx] == 887 if (unlikely(queue->grant_tx_handle[pending_idx] ==
893 NETBACK_INVALID_HANDLE)) { 888 NETBACK_INVALID_HANDLE)) {
894 netdev_err(queue->vif->dev, 889 netdev_err(queue->vif->dev,
895 "Trying to unmap invalid handle! pending_idx: %x\n", 890 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
896 pending_idx); 891 pending_idx);
897 BUG(); 892 BUG();
898 } 893 }
@@ -1248,7 +1243,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1248 /* No crossing a page as the payload mustn't fragment. */ 1243 /* No crossing a page as the payload mustn't fragment. */
1249 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1244 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1250 netdev_err(queue->vif->dev, 1245 netdev_err(queue->vif->dev,
1251 "txreq.offset: %x, size: %u, end: %lu\n", 1246 "txreq.offset: %u, size: %u, end: %lu\n",
1252 txreq.offset, txreq.size, 1247 txreq.offset, txreq.size,
1253 (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); 1248 (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
1254 xenvif_fatal_tx_err(queue->vif); 1249 xenvif_fatal_tx_err(queue->vif);
@@ -1571,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1571 smp_rmb(); 1566 smp_rmb();
1572 1567
1573 while (dc != dp) { 1568 while (dc != dp) {
1574 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1569 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1575 pending_idx = 1570 pending_idx =
1576 queue->dealloc_ring[pending_index(dc++)]; 1571 queue->dealloc_ring[pending_index(dc++)];
1577 1572
1578 pending_idx_release[gop-queue->tx_unmap_ops] = 1573 pending_idx_release[gop - queue->tx_unmap_ops] =
1579 pending_idx; 1574 pending_idx;
1580 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1575 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1581 queue->mmap_pages[pending_idx]; 1576 queue->mmap_pages[pending_idx];
1582 gnttab_set_unmap_op(gop, 1577 gnttab_set_unmap_op(gop,
1583 idx_to_kaddr(queue, pending_idx), 1578 idx_to_kaddr(queue, pending_idx),
@@ -1598,12 +1593,12 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1598 queue->pages_to_unmap, 1593 queue->pages_to_unmap,
1599 gop - queue->tx_unmap_ops); 1594 gop - queue->tx_unmap_ops);
1600 if (ret) { 1595 if (ret) {
1601 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", 1596 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1602 gop - queue->tx_unmap_ops, ret); 1597 gop - queue->tx_unmap_ops, ret);
1603 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { 1598 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1604 if (gop[i].status != GNTST_okay) 1599 if (gop[i].status != GNTST_okay)
1605 netdev_err(queue->vif->dev, 1600 netdev_err(queue->vif->dev,
1606 " host_addr: %llx handle: %x status: %d\n", 1601 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1607 gop[i].host_addr, 1602 gop[i].host_addr,
1608 gop[i].handle, 1603 gop[i].handle,
1609 gop[i].status); 1604 gop[i].status);
@@ -1736,7 +1731,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1736 &queue->mmap_pages[pending_idx], 1); 1731 &queue->mmap_pages[pending_idx], 1);
1737 if (ret) { 1732 if (ret) {
1738 netdev_err(queue->vif->dev, 1733 netdev_err(queue->vif->dev,
1739 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n", 1734 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1740 ret, 1735 ret,
1741 pending_idx, 1736 pending_idx,
1742 tx_unmap_op.host_addr, 1737 tx_unmap_op.host_addr,