aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c96
1 files changed, 68 insertions, 28 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 555b70c8b863..f926b5ab3d09 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -41,6 +41,9 @@ struct virtnet_info
41 struct net_device *dev; 41 struct net_device *dev;
42 struct napi_struct napi; 42 struct napi_struct napi;
43 43
44 /* The skb we couldn't send because buffers were full. */
45 struct sk_buff *last_xmit_skb;
46
44 /* Number of input buffers, and max we've ever had. */ 47 /* Number of input buffers, and max we've ever had. */
45 unsigned int num, max; 48 unsigned int num, max;
46 49
@@ -142,10 +145,10 @@ drop:
142static void try_fill_recv(struct virtnet_info *vi) 145static void try_fill_recv(struct virtnet_info *vi)
143{ 146{
144 struct sk_buff *skb; 147 struct sk_buff *skb;
145 struct scatterlist sg[1+MAX_SKB_FRAGS]; 148 struct scatterlist sg[2+MAX_SKB_FRAGS];
146 int num, err; 149 int num, err;
147 150
148 sg_init_table(sg, 1+MAX_SKB_FRAGS); 151 sg_init_table(sg, 2+MAX_SKB_FRAGS);
149 for (;;) { 152 for (;;) {
150 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); 153 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
151 if (unlikely(!skb)) 154 if (unlikely(!skb))
@@ -221,23 +224,22 @@ static void free_old_xmit_skbs(struct virtnet_info *vi)
221 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 224 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
222 pr_debug("Sent skb %p\n", skb); 225 pr_debug("Sent skb %p\n", skb);
223 __skb_unlink(skb, &vi->send); 226 __skb_unlink(skb, &vi->send);
224 vi->dev->stats.tx_bytes += len; 227 vi->dev->stats.tx_bytes += skb->len;
225 vi->dev->stats.tx_packets++; 228 vi->dev->stats.tx_packets++;
226 kfree_skb(skb); 229 kfree_skb(skb);
227 } 230 }
228} 231}
229 232
230static int start_xmit(struct sk_buff *skb, struct net_device *dev) 233static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
231{ 234{
232 struct virtnet_info *vi = netdev_priv(dev); 235 int num;
233 int num, err; 236 struct scatterlist sg[2+MAX_SKB_FRAGS];
234 struct scatterlist sg[1+MAX_SKB_FRAGS];
235 struct virtio_net_hdr *hdr; 237 struct virtio_net_hdr *hdr;
236 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 238 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
237 239
238 sg_init_table(sg, 1+MAX_SKB_FRAGS); 240 sg_init_table(sg, 2+MAX_SKB_FRAGS);
239 241
240 pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb, 242 pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb,
241 dest[0], dest[1], dest[2], 243 dest[0], dest[1], dest[2],
242 dest[3], dest[4], dest[5]); 244 dest[3], dest[4], dest[5]);
243 245
@@ -272,30 +274,51 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
272 274
273 vnet_hdr_to_sg(sg, skb); 275 vnet_hdr_to_sg(sg, skb);
274 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 276 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
275 __skb_queue_head(&vi->send, skb); 277
278 return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
279}
280
281static int start_xmit(struct sk_buff *skb, struct net_device *dev)
282{
283 struct virtnet_info *vi = netdev_priv(dev);
276 284
277again: 285again:
278 /* Free up any pending old buffers before queueing new ones. */ 286 /* Free up any pending old buffers before queueing new ones. */
279 free_old_xmit_skbs(vi); 287 free_old_xmit_skbs(vi);
280 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 288
281 if (err) { 289 /* If we has a buffer left over from last time, send it now. */
282 pr_debug("%s: virtio not prepared to send\n", dev->name); 290 if (vi->last_xmit_skb) {
283 netif_stop_queue(dev); 291 if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
284 292 /* Drop this skb: we only queue one. */
285 /* Activate callback for using skbs: if this returns false it 293 vi->dev->stats.tx_dropped++;
286 * means some were used in the meantime. */ 294 kfree_skb(skb);
287 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 295 goto stop_queue;
288 vi->svq->vq_ops->disable_cb(vi->svq);
289 netif_start_queue(dev);
290 goto again;
291 } 296 }
292 __skb_unlink(skb, &vi->send); 297 vi->last_xmit_skb = NULL;
298 }
293 299
294 return NETDEV_TX_BUSY; 300 /* Put new one in send queue and do transmit */
301 __skb_queue_head(&vi->send, skb);
302 if (xmit_skb(vi, skb) != 0) {
303 vi->last_xmit_skb = skb;
304 goto stop_queue;
295 } 305 }
306done:
296 vi->svq->vq_ops->kick(vi->svq); 307 vi->svq->vq_ops->kick(vi->svq);
297 308 return NETDEV_TX_OK;
298 return 0; 309
310stop_queue:
311 pr_debug("%s: virtio not prepared to send\n", dev->name);
312 netif_stop_queue(dev);
313
314 /* Activate callback for using skbs: if this returns false it
315 * means some were used in the meantime. */
316 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
317 vi->svq->vq_ops->disable_cb(vi->svq);
318 netif_start_queue(dev);
319 goto again;
320 }
321 goto done;
299} 322}
300 323
301#ifdef CONFIG_NET_POLL_CONTROLLER 324#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -355,17 +378,26 @@ static int virtnet_probe(struct virtio_device *vdev)
355 SET_NETDEV_DEV(dev, &vdev->dev); 378 SET_NETDEV_DEV(dev, &vdev->dev);
356 379
357 /* Do we support "hardware" checksums? */ 380 /* Do we support "hardware" checksums? */
358 if (csum && vdev->config->feature(vdev, VIRTIO_NET_F_CSUM)) { 381 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
359 /* This opens up the world of extra features. */ 382 /* This opens up the world of extra features. */
360 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 383 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
361 if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_GSO)) { 384 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
362 dev->features |= NETIF_F_TSO | NETIF_F_UFO 385 dev->features |= NETIF_F_TSO | NETIF_F_UFO
363 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 386 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
364 } 387 }
388 /* Individual feature bits: what can host handle? */
389 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
390 dev->features |= NETIF_F_TSO;
391 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
392 dev->features |= NETIF_F_TSO6;
393 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
394 dev->features |= NETIF_F_TSO_ECN;
395 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
396 dev->features |= NETIF_F_UFO;
365 } 397 }
366 398
367 /* Configuration may specify what MAC to use. Otherwise random. */ 399 /* Configuration may specify what MAC to use. Otherwise random. */
368 if (vdev->config->feature(vdev, VIRTIO_NET_F_MAC)) { 400 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
369 vdev->config->get(vdev, 401 vdev->config->get(vdev,
370 offsetof(struct virtio_net_config, mac), 402 offsetof(struct virtio_net_config, mac),
371 dev->dev_addr, dev->addr_len); 403 dev->dev_addr, dev->addr_len);
@@ -454,7 +486,15 @@ static struct virtio_device_id id_table[] = {
454 { 0 }, 486 { 0 },
455}; 487};
456 488
489static unsigned int features[] = {
490 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
491 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
492 VIRTIO_NET_F_HOST_ECN,
493};
494
457static struct virtio_driver virtio_net = { 495static struct virtio_driver virtio_net = {
496 .feature_table = features,
497 .feature_table_size = ARRAY_SIZE(features),
458 .driver.name = KBUILD_MODNAME, 498 .driver.name = KBUILD_MODNAME,
459 .driver.owner = THIS_MODULE, 499 .driver.owner = THIS_MODULE,
460 .id_table = id_table, 500 .id_table = id_table,