diff options
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
-rw-r--r-- | drivers/net/xen-netback/interface.c | 45 |
1 files changed, 22 insertions, 23 deletions
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fff8cddfed81..301cc037fda8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -47,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif) | |||
47 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); | 47 | return netif_running(vif->dev) && netif_carrier_ok(vif->dev); |
48 | } | 48 | } |
49 | 49 | ||
50 | static int xenvif_rx_schedulable(struct xenvif *vif) | ||
51 | { | ||
52 | return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif); | ||
53 | } | ||
54 | |||
55 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) | 50 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
56 | { | 51 | { |
57 | struct xenvif *vif = dev_id; | 52 | struct xenvif *vif = dev_id; |
@@ -105,8 +100,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) | |||
105 | { | 100 | { |
106 | struct xenvif *vif = dev_id; | 101 | struct xenvif *vif = dev_id; |
107 | 102 | ||
108 | if (xenvif_rx_schedulable(vif)) | 103 | xenvif_kick_thread(vif); |
109 | netif_wake_queue(vif->dev); | ||
110 | 104 | ||
111 | return IRQ_HANDLED; | 105 | return IRQ_HANDLED; |
112 | } | 106 | } |
@@ -122,24 +116,34 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id) | |||
122 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 116 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
123 | { | 117 | { |
124 | struct xenvif *vif = netdev_priv(dev); | 118 | struct xenvif *vif = netdev_priv(dev); |
119 | int min_slots_needed; | ||
125 | 120 | ||
126 | BUG_ON(skb->dev != dev); | 121 | BUG_ON(skb->dev != dev); |
127 | 122 | ||
128 | /* Drop the packet if vif is not ready */ | 123 | /* Drop the packet if vif is not ready */ |
129 | if (vif->task == NULL) | 124 | if (vif->task == NULL || !xenvif_schedulable(vif)) |
130 | goto drop; | 125 | goto drop; |
131 | 126 | ||
132 | /* Drop the packet if the target domain has no receive buffers. */ | 127 | /* At best we'll need one slot for the header and one for each |
133 | if (!xenvif_rx_schedulable(vif)) | 128 | * frag. |
134 | goto drop; | 129 | */ |
130 | min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; | ||
135 | 131 | ||
136 | /* Reserve ring slots for the worst-case number of fragments. */ | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
137 | vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb); | 133 | * metadata. |
134 | */ | ||
135 | if (skb_is_gso(skb)) | ||
136 | min_slots_needed++; | ||
138 | 137 | ||
139 | if (vif->can_queue && xenvif_must_stop_queue(vif)) | 138 | /* If the skb can't possibly fit in the remaining slots |
140 | netif_stop_queue(dev); | 139 | * then turn off the queue to give the ring a chance to |
140 | * drain. | ||
141 | */ | ||
142 | if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) | ||
143 | xenvif_stop_queue(vif); | ||
141 | 144 | ||
142 | xenvif_queue_tx_skb(vif, skb); | 145 | skb_queue_tail(&vif->rx_queue, skb); |
146 | xenvif_kick_thread(vif); | ||
143 | 147 | ||
144 | return NETDEV_TX_OK; | 148 | return NETDEV_TX_OK; |
145 | 149 | ||
@@ -149,12 +153,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
149 | return NETDEV_TX_OK; | 153 | return NETDEV_TX_OK; |
150 | } | 154 | } |
151 | 155 | ||
152 | void xenvif_notify_tx_completion(struct xenvif *vif) | ||
153 | { | ||
154 | if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) | ||
155 | netif_wake_queue(vif->dev); | ||
156 | } | ||
157 | |||
158 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | 156 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
159 | { | 157 | { |
160 | struct xenvif *vif = netdev_priv(dev); | 158 | struct xenvif *vif = netdev_priv(dev); |
@@ -388,6 +386,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
388 | if (err < 0) | 386 | if (err < 0) |
389 | goto err; | 387 | goto err; |
390 | 388 | ||
389 | init_waitqueue_head(&vif->wq); | ||
390 | |||
391 | if (tx_evtchn == rx_evtchn) { | 391 | if (tx_evtchn == rx_evtchn) { |
392 | /* feature-split-event-channels == 0 */ | 392 | /* feature-split-event-channels == 0 */ |
393 | err = bind_interdomain_evtchn_to_irqhandler( | 393 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -420,7 +420,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
420 | disable_irq(vif->rx_irq); | 420 | disable_irq(vif->rx_irq); |
421 | } | 421 | } |
422 | 422 | ||
423 | init_waitqueue_head(&vif->wq); | ||
424 | task = kthread_create(xenvif_kthread, | 423 | task = kthread_create(xenvif_kthread, |
425 | (void *)vif, "%s", vif->dev->name); | 424 | (void *)vif, "%s", vif->dev->name); |
426 | if (IS_ERR(task)) { | 425 | if (IS_ERR(task)) { |