aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-11-28 06:30:55 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-01 20:27:16 -0500
commit8fc3b9e9a229778e5af3aa453c44f1a3857ba769 (patch)
tree49d4b6bc829ca1d1cb587b2a6f9acbf82a4ea2bf /drivers/net/virtio_net.c
parent99e872ae1eacb560152c0123cf1cef571569e681 (diff)
virtio_net: fix error handling for mergeable buffers
Eric Dumazet noticed that if we encounter an error when processing a mergeable buffer, we don't dequeue all of the buffers from this packet, the result is almost sure to be loss of networking. Jason Wang noticed that we also leak a page and that we don't decrement the rq buf count, so we won't repost buffers (a resource leak). Fix both issues. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Michael Dalton <mwdalton@google.com> Reported-by: Eric Dumazet <edumazet@google.com> Reported-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c82
1 files changed, 51 insertions, 31 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index acda66169973..71a2eac7b039 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -299,35 +299,47 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
299 return skb; 299 return skb;
300} 300}
301 301
302static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) 302static struct sk_buff *receive_mergeable(struct net_device *dev,
303 struct receive_queue *rq,
304 void *buf,
305 unsigned int len)
303{ 306{
304 struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); 307 struct skb_vnet_hdr *hdr = buf;
308 int num_buf = hdr->mhdr.num_buffers;
309 struct page *page = virt_to_head_page(buf);
310 int offset = buf - page_address(page);
311 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
312 MERGE_BUFFER_LEN);
305 struct sk_buff *curr_skb = head_skb; 313 struct sk_buff *curr_skb = head_skb;
306 char *buf;
307 struct page *page;
308 int num_buf, len, offset;
309 314
310 num_buf = hdr->mhdr.num_buffers; 315 if (unlikely(!curr_skb))
316 goto err_skb;
317
311 while (--num_buf) { 318 while (--num_buf) {
312 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; 319 int num_skb_frags;
320
313 buf = virtqueue_get_buf(rq->vq, &len); 321 buf = virtqueue_get_buf(rq->vq, &len);
314 if (unlikely(!buf)) { 322 if (unlikely(!buf)) {
315 pr_debug("%s: rx error: %d buffers missing\n", 323 pr_debug("%s: rx error: %d buffers out of %d missing\n",
316 head_skb->dev->name, hdr->mhdr.num_buffers); 324 dev->name, num_buf, hdr->mhdr.num_buffers);
317 head_skb->dev->stats.rx_length_errors++; 325 dev->stats.rx_length_errors++;
318 return -EINVAL; 326 goto err_buf;
319 } 327 }
320 if (unlikely(len > MERGE_BUFFER_LEN)) { 328 if (unlikely(len > MERGE_BUFFER_LEN)) {
321 pr_debug("%s: rx error: merge buffer too long\n", 329 pr_debug("%s: rx error: merge buffer too long\n",
322 head_skb->dev->name); 330 dev->name);
323 len = MERGE_BUFFER_LEN; 331 len = MERGE_BUFFER_LEN;
324 } 332 }
333
334 page = virt_to_head_page(buf);
335 --rq->num;
336
337 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
325 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { 338 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
326 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); 339 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
327 if (unlikely(!nskb)) { 340
328 head_skb->dev->stats.rx_dropped++; 341 if (unlikely(!nskb))
329 return -ENOMEM; 342 goto err_skb;
330 }
331 if (curr_skb == head_skb) 343 if (curr_skb == head_skb)
332 skb_shinfo(curr_skb)->frag_list = nskb; 344 skb_shinfo(curr_skb)->frag_list = nskb;
333 else 345 else
@@ -341,8 +353,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
341 head_skb->len += len; 353 head_skb->len += len;
342 head_skb->truesize += MERGE_BUFFER_LEN; 354 head_skb->truesize += MERGE_BUFFER_LEN;
343 } 355 }
344 page = virt_to_head_page(buf); 356 offset = buf - page_address(page);
345 offset = buf - (char *)page_address(page);
346 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 357 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
347 put_page(page); 358 put_page(page);
348 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 359 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
@@ -351,9 +362,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
351 skb_add_rx_frag(curr_skb, num_skb_frags, page, 362 skb_add_rx_frag(curr_skb, num_skb_frags, page,
352 offset, len, MERGE_BUFFER_LEN); 363 offset, len, MERGE_BUFFER_LEN);
353 } 364 }
365 }
366
367 return head_skb;
368
369err_skb:
370 put_page(page);
371 while (--num_buf) {
372 buf = virtqueue_get_buf(rq->vq, &len);
373 if (unlikely(!buf)) {
374 pr_debug("%s: rx error: %d buffers missing\n",
375 dev->name, num_buf);
376 dev->stats.rx_length_errors++;
377 break;
378 }
379 page = virt_to_head_page(buf);
380 put_page(page);
354 --rq->num; 381 --rq->num;
355 } 382 }
356 return 0; 383err_buf:
384 dev->stats.rx_dropped++;
385 dev_kfree_skb(head_skb);
386 return NULL;
357} 387}
358 388
359static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) 389static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
@@ -382,19 +412,9 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
382 len -= sizeof(struct virtio_net_hdr); 412 len -= sizeof(struct virtio_net_hdr);
383 skb_trim(skb, len); 413 skb_trim(skb, len);
384 } else if (vi->mergeable_rx_bufs) { 414 } else if (vi->mergeable_rx_bufs) {
385 struct page *page = virt_to_head_page(buf); 415 skb = receive_mergeable(dev, rq, buf, len);
386 skb = page_to_skb(rq, page, 416 if (unlikely(!skb))
387 (char *)buf - (char *)page_address(page),
388 len, MERGE_BUFFER_LEN);
389 if (unlikely(!skb)) {
390 dev->stats.rx_dropped++;
391 put_page(page);
392 return; 417 return;
393 }
394 if (receive_mergeable(rq, skb)) {
395 dev_kfree_skb(skb);
396 return;
397 }
398 } else { 418 } else {
399 page = buf; 419 page = buf;
400 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); 420 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);