aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c153
1 files changed, 99 insertions, 54 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7bab4de658a9..d208f8604981 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -299,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
299 return skb; 299 return skb;
300} 300}
301 301
302static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) 302static struct sk_buff *receive_small(void *buf, unsigned int len)
303{ 303{
304 struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); 304 struct sk_buff * skb = buf;
305
306 len -= sizeof(struct virtio_net_hdr);
307 skb_trim(skb, len);
308
309 return skb;
310}
311
312static struct sk_buff *receive_big(struct net_device *dev,
313 struct receive_queue *rq,
314 void *buf,
315 unsigned int len)
316{
317 struct page *page = buf;
318 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
319
320 if (unlikely(!skb))
321 goto err;
322
323 return skb;
324
325err:
326 dev->stats.rx_dropped++;
327 give_pages(rq, page);
328 return NULL;
329}
330
331static struct sk_buff *receive_mergeable(struct net_device *dev,
332 struct receive_queue *rq,
333 void *buf,
334 unsigned int len)
335{
336 struct skb_vnet_hdr *hdr = buf;
337 int num_buf = hdr->mhdr.num_buffers;
338 struct page *page = virt_to_head_page(buf);
339 int offset = buf - page_address(page);
340 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
341 MERGE_BUFFER_LEN);
305 struct sk_buff *curr_skb = head_skb; 342 struct sk_buff *curr_skb = head_skb;
306 char *buf;
307 struct page *page;
308 int num_buf, len, offset;
309 343
310 num_buf = hdr->mhdr.num_buffers; 344 if (unlikely(!curr_skb))
345 goto err_skb;
346
311 while (--num_buf) { 347 while (--num_buf) {
312 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 348 int num_skb_frags;
349
313 buf = virtqueue_get_buf(rq->vq, &len); 350 buf = virtqueue_get_buf(rq->vq, &len);
314 if (unlikely(!buf)) { 351 if (unlikely(!buf)) {
315 pr_debug("%s: rx error: %d buffers missing\n", 352 pr_debug("%s: rx error: %d buffers out of %d missing\n",
316 head_skb->dev->name, hdr->mhdr.num_buffers); 353 dev->name, num_buf, hdr->mhdr.num_buffers);
317 head_skb->dev->stats.rx_length_errors++; 354 dev->stats.rx_length_errors++;
318 return -EINVAL; 355 goto err_buf;
319 } 356 }
320 if (unlikely(len > MERGE_BUFFER_LEN)) { 357 if (unlikely(len > MERGE_BUFFER_LEN)) {
321 pr_debug("%s: rx error: merge buffer too long\n", 358 pr_debug("%s: rx error: merge buffer too long\n",
322 head_skb->dev->name); 359 dev->name);
323 len = MERGE_BUFFER_LEN; 360 len = MERGE_BUFFER_LEN;
324 } 361 }
362
363 page = virt_to_head_page(buf);
364 --rq->num;
365
366 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
325 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 367 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
326 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 368 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
327 if (unlikely(!nskb)) { 369
328 head_skb->dev->stats.rx_dropped++; 370 if (unlikely(!nskb))
329 return -ENOMEM; 371 goto err_skb;
330 }
331 if (curr_skb == head_skb) 372 if (curr_skb == head_skb)
332 skb_shinfo(curr_skb)->frag_list = nskb; 373 skb_shinfo(curr_skb)->frag_list = nskb;
333 else 374 else
@@ -341,8 +382,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
341 head_skb->len += len; 382 head_skb->len += len;
342 head_skb->truesize += MERGE_BUFFER_LEN; 383 head_skb->truesize += MERGE_BUFFER_LEN;
343 } 384 }
344 page = virt_to_head_page(buf); 385 offset = buf - page_address(page);
345 offset = buf - (char *)page_address(page);
346 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 386 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
347 put_page(page); 387 put_page(page);
348 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 388 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
@@ -351,9 +391,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
351 skb_add_rx_frag(curr_skb, num_skb_frags, page, 391 skb_add_rx_frag(curr_skb, num_skb_frags, page,
352 offset, len, MERGE_BUFFER_LEN); 392 offset, len, MERGE_BUFFER_LEN);
353 } 393 }
394 }
395
396 return head_skb;
397
398err_skb:
399 put_page(page);
400 while (--num_buf) {
401 buf = virtqueue_get_buf(rq->vq, &len);
402 if (unlikely(!buf)) {
403 pr_debug("%s: rx error: %d buffers missing\n",
404 dev->name, num_buf);
405 dev->stats.rx_length_errors++;
406 break;
407 }
408 page = virt_to_head_page(buf);
409 put_page(page);
354 --rq->num; 410 --rq->num;
355 } 411 }
356 return 0; 412err_buf:
413 dev->stats.rx_dropped++;
414 dev_kfree_skb(head_skb);
415 return NULL;
357} 416}
358 417
359static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 418static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -362,48 +421,29 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
362 struct net_device *dev = vi->dev; 421 struct net_device *dev = vi->dev;
363 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 422 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
364 struct sk_buff *skb; 423 struct sk_buff *skb;
365 struct page *page;
366 struct skb_vnet_hdr *hdr; 424 struct skb_vnet_hdr *hdr;
367 425
368 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 426 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
369 pr_debug("%s: short packet %i\n", dev->name, len); 427 pr_debug("%s: short packet %i\n", dev->name, len);
370 dev->stats.rx_length_errors++; 428 dev->stats.rx_length_errors++;
371 if (vi->big_packets) 429 if (vi->mergeable_rx_bufs)
372 give_pages(rq, buf);
373 else if (vi->mergeable_rx_bufs)
374 put_page(virt_to_head_page(buf)); 430 put_page(virt_to_head_page(buf));
431 else if (vi->big_packets)
432 give_pages(rq, buf);
375 else 433 else
376 dev_kfree_skb(buf); 434 dev_kfree_skb(buf);
377 return; 435 return;
378 } 436 }
379 437
380 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 438 if (vi->mergeable_rx_bufs)
381 skb = buf; 439 skb = receive_mergeable(dev, rq, buf, len);
382 len -= sizeof(struct virtio_net_hdr); 440 else if (vi->big_packets)
383 skb_trim(skb, len); 441 skb = receive_big(dev, rq, buf, len);
384 } else if (vi->mergeable_rx_bufs) { 442 else
385 struct page *page = virt_to_head_page(buf); 443 skb = receive_small(buf, len);
386 skb = page_to_skb(rq, page, 444
387 (char *)buf - (char *)page_address(page), 445 if (unlikely(!skb))
388 len, MERGE_BUFFER_LEN); 446 return;
389 if (unlikely(!skb)) {
390 dev->stats.rx_dropped++;
391 put_page(page);
392 return;
393 }
394 if (receive_mergeable(rq, skb)) {
395 dev_kfree_skb(skb);
396 return;
397 }
398 } else {
399 page = buf;
400 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
401 if (unlikely(!skb)) {
402 dev->stats.rx_dropped++;
403 give_pages(rq, page);
404 return;
405 }
406 }
407 447
408 hdr = skb_vnet_hdr(skb); 448 hdr = skb_vnet_hdr(skb);
409 449
@@ -1084,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1084 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, 1124 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1085 VIRTIO_NET_CTRL_MAC_TABLE_SET, 1125 VIRTIO_NET_CTRL_MAC_TABLE_SET,
1086 sg, NULL)) 1126 sg, NULL))
1087 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); 1127 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1088 1128
1089 kfree(buf); 1129 kfree(buf);
1090} 1130}
@@ -1327,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1327 1367
1328static void virtnet_free_queues(struct virtnet_info *vi) 1368static void virtnet_free_queues(struct virtnet_info *vi)
1329{ 1369{
1370 int i;
1371
1372 for (i = 0; i < vi->max_queue_pairs; i++)
1373 netif_napi_del(&vi->rq[i].napi);
1374
1330 kfree(vi->rq); 1375 kfree(vi->rq);
1331 kfree(vi->sq); 1376 kfree(vi->sq);
1332} 1377}
@@ -1356,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
1356 struct virtqueue *vq = vi->rq[i].vq; 1401 struct virtqueue *vq = vi->rq[i].vq;
1357 1402
1358 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1403 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1359 if (vi->big_packets) 1404 if (vi->mergeable_rx_bufs)
1360 give_pages(&vi->rq[i], buf);
1361 else if (vi->mergeable_rx_bufs)
1362 put_page(virt_to_head_page(buf)); 1405 put_page(virt_to_head_page(buf));
1406 else if (vi->big_packets)
1407 give_pages(&vi->rq[i], buf);
1363 else 1408 else
1364 dev_kfree_skb(buf); 1409 dev_kfree_skb(buf);
1365 --vi->rq[i].num; 1410 --vi->rq[i].num;