aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-02-04 23:50:07 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-02-04 07:50:08 -0500
commit2cb9c6bafc58cf4066cb15f0ac6989a1015a02cc (patch)
tree5a81ecc2d80724054d2b8f5679a58e1d42e9322a /drivers
parenta48bd8f67003c342e147309a331f656a5e75a5e4 (diff)
virtio: free transmit skbs when notified, not on next xmit.
This fixes a potential dangling xmit problem. We also suppress refill interrupts until we need them. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/virtio_net.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bd4d26a36ead..a61c176607f4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,11 +56,13 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
56 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); 56 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
57} 57}
58 58
59static void skb_xmit_done(struct virtqueue *rvq) 59static void skb_xmit_done(struct virtqueue *svq)
60{ 60{
61 struct virtnet_info *vi = rvq->vdev->priv; 61 struct virtnet_info *vi = svq->vdev->priv;
62 62
63 /* In case we were waiting for output buffers. */ 63 /* Suppress further interrupts. */
64 svq->vq_ops->disable_cb(svq);
65 /* We were waiting for more output buffers. */
64 netif_wake_queue(vi->dev); 66 netif_wake_queue(vi->dev);
65} 67}
66 68
@@ -232,8 +234,6 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
232 234
233 pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest)); 235 pr_debug("%s: xmit %p %s\n", dev->name, skb, print_mac(mac, dest));
234 236
235 free_old_xmit_skbs(vi);
236
237 /* Encode metadata header at front. */ 237 /* Encode metadata header at front. */
238 hdr = skb_vnet_hdr(skb); 238 hdr = skb_vnet_hdr(skb);
239 if (skb->ip_summed == CHECKSUM_PARTIAL) { 239 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -266,11 +266,24 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
266 vnet_hdr_to_sg(sg, skb); 266 vnet_hdr_to_sg(sg, skb);
267 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 267 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
268 __skb_queue_head(&vi->send, skb); 268 __skb_queue_head(&vi->send, skb);
269
270again:
271 /* Free up any pending old buffers before queueing new ones. */
272 free_old_xmit_skbs(vi);
269 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 273 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
270 if (err) { 274 if (err) {
271 pr_debug("%s: virtio not prepared to send\n", dev->name); 275 pr_debug("%s: virtio not prepared to send\n", dev->name);
272 skb_unlink(skb, &vi->send);
273 netif_stop_queue(dev); 276 netif_stop_queue(dev);
277
278 /* Activate callback for using skbs: if this fails it
279 * means some were used in the meantime. */
280 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
281 printk("Unlikely: restart svq failed\n");
282 netif_start_queue(dev);
283 goto again;
284 }
285 __skb_unlink(skb, &vi->send);
286
274 return NETDEV_TX_BUSY; 287 return NETDEV_TX_BUSY;
275 } 288 }
276 vi->svq->vq_ops->kick(vi->svq); 289 vi->svq->vq_ops->kick(vi->svq);