aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/interface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r--drivers/net/xen-netback/interface.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 34ca4e58a43d..301cc037fda8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
37 38
38#include <xen/events.h> 39#include <xen/events.h>
39#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
@@ -46,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
46 return netif_running(vif->dev) && netif_carrier_ok(vif->dev); 47 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
47} 48}
48 49
49static int xenvif_rx_schedulable(struct xenvif *vif)
50{
51 return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
52}
53
54static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 50static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
55{ 51{
56 struct xenvif *vif = dev_id; 52 struct xenvif *vif = dev_id;
@@ -104,8 +100,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
104{ 100{
105 struct xenvif *vif = dev_id; 101 struct xenvif *vif = dev_id;
106 102
107 if (xenvif_rx_schedulable(vif)) 103 xenvif_kick_thread(vif);
108 netif_wake_queue(vif->dev);
109 104
110 return IRQ_HANDLED; 105 return IRQ_HANDLED;
111} 106}
@@ -121,24 +116,34 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
121static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 116static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
122{ 117{
123 struct xenvif *vif = netdev_priv(dev); 118 struct xenvif *vif = netdev_priv(dev);
119 int min_slots_needed;
124 120
125 BUG_ON(skb->dev != dev); 121 BUG_ON(skb->dev != dev);
126 122
127 /* Drop the packet if vif is not ready */ 123 /* Drop the packet if vif is not ready */
128 if (vif->task == NULL) 124 if (vif->task == NULL || !xenvif_schedulable(vif))
129 goto drop; 125 goto drop;
130 126
131 /* Drop the packet if the target domain has no receive buffers. */ 127 /* At best we'll need one slot for the header and one for each
132 if (!xenvif_rx_schedulable(vif)) 128 * frag.
133 goto drop; 129 */
130 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
134 131
135 /* Reserve ring slots for the worst-case number of fragments. */ 132 /* If the skb is GSO then we'll also need an extra slot for the
136 vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); 133 * metadata.
134 */
135 if (skb_is_gso(skb))
136 min_slots_needed++;
137 137
138 if (vif->can_queue && xenvif_must_stop_queue(vif)) 138 /* If the skb can't possibly fit in the remaining slots
139 netif_stop_queue(dev); 139 * then turn off the queue to give the ring a chance to
140 * drain.
141 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
143 xenvif_stop_queue(vif);
140 144
141 xenvif_queue_tx_skb(vif, skb); 145 skb_queue_tail(&vif->rx_queue, skb);
146 xenvif_kick_thread(vif);
142 147
143 return NETDEV_TX_OK; 148 return NETDEV_TX_OK;
144 149
@@ -148,12 +153,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
148 return NETDEV_TX_OK; 153 return NETDEV_TX_OK;
149} 154}
150 155
151void xenvif_notify_tx_completion(struct xenvif *vif)
152{
153 if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
154 netif_wake_queue(vif->dev);
155}
156
157static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 156static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
158{ 157{
159 struct xenvif *vif = netdev_priv(dev); 158 struct xenvif *vif = netdev_priv(dev);
@@ -387,6 +386,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
387 if (err < 0) 386 if (err < 0)
388 goto err; 387 goto err;
389 388
389 init_waitqueue_head(&vif->wq);
390
390 if (tx_evtchn == rx_evtchn) { 391 if (tx_evtchn == rx_evtchn) {
391 /* feature-split-event-channels == 0 */ 392 /* feature-split-event-channels == 0 */
392 err = bind_interdomain_evtchn_to_irqhandler( 393 err = bind_interdomain_evtchn_to_irqhandler(
@@ -419,7 +420,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
419 disable_irq(vif->rx_irq); 420 disable_irq(vif->rx_irq);
420 } 421 }
421 422
422 init_waitqueue_head(&vif->wq);
423 task = kthread_create(xenvif_kthread, 423 task = kthread_create(xenvif_kthread,
424 (void *)vif, "%s", vif->dev->name); 424 (void *)vif, "%s", vif->dev->name);
425 if (IS_ERR(task)) { 425 if (IS_ERR(task)) {