diff options
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 61 |
1 files changed, 46 insertions, 15 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2a6e81d5b579..bbedf03a2124 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -70,6 +70,9 @@ struct virtnet_info | |||
70 | struct sk_buff_head recv; | 70 | struct sk_buff_head recv; |
71 | struct sk_buff_head send; | 71 | struct sk_buff_head send; |
72 | 72 | ||
73 | /* Work struct for refilling if we run low on memory. */ | ||
74 | struct delayed_work refill; | ||
75 | |||
73 | /* Chain pages by the private ptr. */ | 76 | /* Chain pages by the private ptr. */ |
74 | struct page *pages; | 77 | struct page *pages; |
75 | }; | 78 | }; |
@@ -273,19 +276,22 @@ drop: | |||
273 | dev_kfree_skb(skb); | 276 | dev_kfree_skb(skb); |
274 | } | 277 | } |
275 | 278 | ||
276 | static void try_fill_recv_maxbufs(struct virtnet_info *vi) | 279 | static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) |
277 | { | 280 | { |
278 | struct sk_buff *skb; | 281 | struct sk_buff *skb; |
279 | struct scatterlist sg[2+MAX_SKB_FRAGS]; | 282 | struct scatterlist sg[2+MAX_SKB_FRAGS]; |
280 | int num, err, i; | 283 | int num, err, i; |
284 | bool oom = false; | ||
281 | 285 | ||
282 | sg_init_table(sg, 2+MAX_SKB_FRAGS); | 286 | sg_init_table(sg, 2+MAX_SKB_FRAGS); |
283 | for (;;) { | 287 | for (;;) { |
284 | struct virtio_net_hdr *hdr; | 288 | struct virtio_net_hdr *hdr; |
285 | 289 | ||
286 | skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); | 290 | skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); |
287 | if (unlikely(!skb)) | 291 | if (unlikely(!skb)) { |
292 | oom = true; | ||
288 | break; | 293 | break; |
294 | } | ||
289 | 295 | ||
290 | skb_reserve(skb, NET_IP_ALIGN); | 296 | skb_reserve(skb, NET_IP_ALIGN); |
291 | skb_put(skb, MAX_PACKET_LEN); | 297 | skb_put(skb, MAX_PACKET_LEN); |
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) | |||
296 | if (vi->big_packets) { | 302 | if (vi->big_packets) { |
297 | for (i = 0; i < MAX_SKB_FRAGS; i++) { | 303 | for (i = 0; i < MAX_SKB_FRAGS; i++) { |
298 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 304 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
299 | f->page = get_a_page(vi, GFP_ATOMIC); | 305 | f->page = get_a_page(vi, gfp); |
300 | if (!f->page) | 306 | if (!f->page) |
301 | break; | 307 | break; |
302 | 308 | ||
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi) | |||
325 | if (unlikely(vi->num > vi->max)) | 331 | if (unlikely(vi->num > vi->max)) |
326 | vi->max = vi->num; | 332 | vi->max = vi->num; |
327 | vi->rvq->vq_ops->kick(vi->rvq); | 333 | vi->rvq->vq_ops->kick(vi->rvq); |
334 | return !oom; | ||
328 | } | 335 | } |
329 | 336 | ||
330 | static void try_fill_recv(struct virtnet_info *vi) | 337 | /* Returns false if we couldn't fill entirely (OOM). */ |
338 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) | ||
331 | { | 339 | { |
332 | struct sk_buff *skb; | 340 | struct sk_buff *skb; |
333 | struct scatterlist sg[1]; | 341 | struct scatterlist sg[1]; |
334 | int err; | 342 | int err; |
343 | bool oom = false; | ||
335 | 344 | ||
336 | if (!vi->mergeable_rx_bufs) { | 345 | if (!vi->mergeable_rx_bufs) |
337 | try_fill_recv_maxbufs(vi); | 346 | return try_fill_recv_maxbufs(vi, gfp); |
338 | return; | ||
339 | } | ||
340 | 347 | ||
341 | for (;;) { | 348 | for (;;) { |
342 | skb_frag_t *f; | 349 | skb_frag_t *f; |
343 | 350 | ||
344 | skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); | 351 | skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); |
345 | if (unlikely(!skb)) | 352 | if (unlikely(!skb)) { |
353 | oom = true; | ||
346 | break; | 354 | break; |
355 | } | ||
347 | 356 | ||
348 | skb_reserve(skb, NET_IP_ALIGN); | 357 | skb_reserve(skb, NET_IP_ALIGN); |
349 | 358 | ||
350 | f = &skb_shinfo(skb)->frags[0]; | 359 | f = &skb_shinfo(skb)->frags[0]; |
351 | f->page = get_a_page(vi, GFP_ATOMIC); | 360 | f->page = get_a_page(vi, gfp); |
352 | if (!f->page) { | 361 | if (!f->page) { |
362 | oom = true; | ||
353 | kfree_skb(skb); | 363 | kfree_skb(skb); |
354 | break; | 364 | break; |
355 | } | 365 | } |
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi) | |||
373 | if (unlikely(vi->num > vi->max)) | 383 | if (unlikely(vi->num > vi->max)) |
374 | vi->max = vi->num; | 384 | vi->max = vi->num; |
375 | vi->rvq->vq_ops->kick(vi->rvq); | 385 | vi->rvq->vq_ops->kick(vi->rvq); |
386 | return !oom; | ||
376 | } | 387 | } |
377 | 388 | ||
378 | static void skb_recv_done(struct virtqueue *rvq) | 389 | static void skb_recv_done(struct virtqueue *rvq) |
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
385 | } | 396 | } |
386 | } | 397 | } |
387 | 398 | ||
399 | static void refill_work(struct work_struct *work) | ||
400 | { | ||
401 | struct virtnet_info *vi; | ||
402 | bool still_empty; | ||
403 | |||
404 | vi = container_of(work, struct virtnet_info, refill.work); | ||
405 | napi_disable(&vi->napi); | ||
406 | try_fill_recv(vi, GFP_KERNEL); | ||
407 | still_empty = (vi->num == 0); | ||
408 | napi_enable(&vi->napi); | ||
409 | |||
410 | /* In theory, this can happen: if we don't get any buffers in | ||
411 | * we will *never* try to fill again. */ | ||
412 | if (still_empty) | ||
413 | schedule_delayed_work(&vi->refill, HZ/2); | ||
414 | } | ||
415 | |||
388 | static int virtnet_poll(struct napi_struct *napi, int budget) | 416 | static int virtnet_poll(struct napi_struct *napi, int budget) |
389 | { | 417 | { |
390 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); | 418 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); |
@@ -400,10 +428,10 @@ again: | |||
400 | received++; | 428 | received++; |
401 | } | 429 | } |
402 | 430 | ||
403 | /* FIXME: If we oom and completely run out of inbufs, we need | 431 | if (vi->num < vi->max / 2) { |
404 | * to start a timer trying to fill more. */ | 432 | if (!try_fill_recv(vi, GFP_ATOMIC)) |
405 | if (vi->num < vi->max / 2) | 433 | schedule_delayed_work(&vi->refill, 0); |
406 | try_fill_recv(vi); | 434 | } |
407 | 435 | ||
408 | /* Out of packets? */ | 436 | /* Out of packets? */ |
409 | if (received < budget) { | 437 | if (received < budget) { |
@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
893 | vi->vdev = vdev; | 921 | vi->vdev = vdev; |
894 | vdev->priv = vi; | 922 | vdev->priv = vi; |
895 | vi->pages = NULL; | 923 | vi->pages = NULL; |
924 | INIT_DELAYED_WORK(&vi->refill, refill_work); | ||
896 | 925 | ||
897 | /* If they give us a callback when all buffers are done, we don't need | 926 | /* If they give us a callback when all buffers are done, we don't need |
898 | * the timer. */ | 927 | * the timer. */ |
@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
941 | } | 970 | } |
942 | 971 | ||
943 | /* Last of all, set up some receive buffers. */ | 972 | /* Last of all, set up some receive buffers. */ |
944 | try_fill_recv(vi); | 973 | try_fill_recv(vi, GFP_KERNEL); |
945 | 974 | ||
946 | /* If we didn't even get one input buffer, we're useless. */ | 975 | /* If we didn't even get one input buffer, we're useless. */ |
947 | if (vi->num == 0) { | 976 | if (vi->num == 0) { |
@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
958 | 987 | ||
959 | unregister: | 988 | unregister: |
960 | unregister_netdev(dev); | 989 | unregister_netdev(dev); |
990 | cancel_delayed_work_sync(&vi->refill); | ||
961 | free_vqs: | 991 | free_vqs: |
962 | vdev->config->del_vqs(vdev); | 992 | vdev->config->del_vqs(vdev); |
963 | free: | 993 | free: |
@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
986 | BUG_ON(vi->num != 0); | 1016 | BUG_ON(vi->num != 0); |
987 | 1017 | ||
988 | unregister_netdev(vi->dev); | 1018 | unregister_netdev(vi->dev); |
1019 | cancel_delayed_work_sync(&vi->refill); | ||
989 | 1020 | ||
990 | vdev->config->del_vqs(vi->vdev); | 1021 | vdev->config->del_vqs(vi->vdev); |
991 | 1022 | ||