aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-08-26 23:54:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-08-26 23:54:48 -0400
commit1e23502cc57cef33455ac7cb9111e3c6d991a894 (patch)
tree38b21cfd31bfd7d9d987cb4d7c1e51d673a9a8e7 /drivers
parente99b1f22f91cc5e2d06699b3d8958a0ff6cb24d9 (diff)
parent3161e453e496eb5643faad30fff5a5ab183da0fe (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: virtio: net refill on out-of-memory smc91x: fix compilation on SMP
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/virtio_net.c61
2 files changed, 47 insertions, 16 deletions
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 9da1fa12a67..7567f510eff 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -531,7 +531,7 @@ static inline void smc_rcv(struct net_device *dev)
531 local_irq_restore(flags); \ 531 local_irq_restore(flags); \
532 __ret; \ 532 __ret; \
533}) 533})
534#define smc_special_lock(lock, flags) spin_lock_irq(lock, flags) 534#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
535#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) 535#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
536#else 536#else
537#define smc_special_trylock(lock, flags) (1) 537#define smc_special_trylock(lock, flags) (1)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2a6e81d5b57..bbedf03a212 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,6 +70,9 @@ struct virtnet_info
70 struct sk_buff_head recv; 70 struct sk_buff_head recv;
71 struct sk_buff_head send; 71 struct sk_buff_head send;
72 72
73 /* Work struct for refilling if we run low on memory. */
74 struct delayed_work refill;
75
73 /* Chain pages by the private ptr. */ 76 /* Chain pages by the private ptr. */
74 struct page *pages; 77 struct page *pages;
75}; 78};
@@ -273,19 +276,22 @@ drop:
273 dev_kfree_skb(skb); 276 dev_kfree_skb(skb);
274} 277}
275 278
276static void try_fill_recv_maxbufs(struct virtnet_info *vi) 279static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
277{ 280{
278 struct sk_buff *skb; 281 struct sk_buff *skb;
279 struct scatterlist sg[2+MAX_SKB_FRAGS]; 282 struct scatterlist sg[2+MAX_SKB_FRAGS];
280 int num, err, i; 283 int num, err, i;
284 bool oom = false;
281 285
282 sg_init_table(sg, 2+MAX_SKB_FRAGS); 286 sg_init_table(sg, 2+MAX_SKB_FRAGS);
283 for (;;) { 287 for (;;) {
284 struct virtio_net_hdr *hdr; 288 struct virtio_net_hdr *hdr;
285 289
286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 290 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
287 if (unlikely(!skb)) 291 if (unlikely(!skb)) {
292 oom = true;
288 break; 293 break;
294 }
289 295
290 skb_reserve(skb, NET_IP_ALIGN); 296 skb_reserve(skb, NET_IP_ALIGN);
291 skb_put(skb, MAX_PACKET_LEN); 297 skb_put(skb, MAX_PACKET_LEN);
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
296 if (vi->big_packets) { 302 if (vi->big_packets) {
297 for (i = 0; i < MAX_SKB_FRAGS; i++) { 303 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 304 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, GFP_ATOMIC); 305 f->page = get_a_page(vi, gfp);
300 if (!f->page) 306 if (!f->page)
301 break; 307 break;
302 308
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
325 if (unlikely(vi->num > vi->max)) 331 if (unlikely(vi->num > vi->max))
326 vi->max = vi->num; 332 vi->max = vi->num;
327 vi->rvq->vq_ops->kick(vi->rvq); 333 vi->rvq->vq_ops->kick(vi->rvq);
334 return !oom;
328} 335}
329 336
330static void try_fill_recv(struct virtnet_info *vi) 337/* Returns false if we couldn't fill entirely (OOM). */
338static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
331{ 339{
332 struct sk_buff *skb; 340 struct sk_buff *skb;
333 struct scatterlist sg[1]; 341 struct scatterlist sg[1];
334 int err; 342 int err;
343 bool oom = false;
335 344
336 if (!vi->mergeable_rx_bufs) { 345 if (!vi->mergeable_rx_bufs)
337 try_fill_recv_maxbufs(vi); 346 return try_fill_recv_maxbufs(vi, gfp);
338 return;
339 }
340 347
341 for (;;) { 348 for (;;) {
342 skb_frag_t *f; 349 skb_frag_t *f;
343 350
344 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 351 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
345 if (unlikely(!skb)) 352 if (unlikely(!skb)) {
353 oom = true;
346 break; 354 break;
355 }
347 356
348 skb_reserve(skb, NET_IP_ALIGN); 357 skb_reserve(skb, NET_IP_ALIGN);
349 358
350 f = &skb_shinfo(skb)->frags[0]; 359 f = &skb_shinfo(skb)->frags[0];
351 f->page = get_a_page(vi, GFP_ATOMIC); 360 f->page = get_a_page(vi, gfp);
352 if (!f->page) { 361 if (!f->page) {
362 oom = true;
353 kfree_skb(skb); 363 kfree_skb(skb);
354 break; 364 break;
355 } 365 }
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
373 if (unlikely(vi->num > vi->max)) 383 if (unlikely(vi->num > vi->max))
374 vi->max = vi->num; 384 vi->max = vi->num;
375 vi->rvq->vq_ops->kick(vi->rvq); 385 vi->rvq->vq_ops->kick(vi->rvq);
386 return !oom;
376} 387}
377 388
378static void skb_recv_done(struct virtqueue *rvq) 389static void skb_recv_done(struct virtqueue *rvq)
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
385 } 396 }
386} 397}
387 398
399static void refill_work(struct work_struct *work)
400{
401 struct virtnet_info *vi;
402 bool still_empty;
403
404 vi = container_of(work, struct virtnet_info, refill.work);
405 napi_disable(&vi->napi);
406 try_fill_recv(vi, GFP_KERNEL);
407 still_empty = (vi->num == 0);
408 napi_enable(&vi->napi);
409
410 /* In theory, this can happen: if we don't get any buffers in
411 * we will *never* try to fill again. */
412 if (still_empty)
413 schedule_delayed_work(&vi->refill, HZ/2);
414}
415
388static int virtnet_poll(struct napi_struct *napi, int budget) 416static int virtnet_poll(struct napi_struct *napi, int budget)
389{ 417{
390 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 418 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
@@ -400,10 +428,10 @@ again:
400 received++; 428 received++;
401 } 429 }
402 430
403 /* FIXME: If we oom and completely run out of inbufs, we need 431 if (vi->num < vi->max / 2) {
404 * to start a timer trying to fill more. */ 432 if (!try_fill_recv(vi, GFP_ATOMIC))
405 if (vi->num < vi->max / 2) 433 schedule_delayed_work(&vi->refill, 0);
406 try_fill_recv(vi); 434 }
407 435
408 /* Out of packets? */ 436 /* Out of packets? */
409 if (received < budget) { 437 if (received < budget) {
@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev)
893 vi->vdev = vdev; 921 vi->vdev = vdev;
894 vdev->priv = vi; 922 vdev->priv = vi;
895 vi->pages = NULL; 923 vi->pages = NULL;
924 INIT_DELAYED_WORK(&vi->refill, refill_work);
896 925
897 /* If they give us a callback when all buffers are done, we don't need 926 /* If they give us a callback when all buffers are done, we don't need
898 * the timer. */ 927 * the timer. */
@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev)
941 } 970 }
942 971
943 /* Last of all, set up some receive buffers. */ 972 /* Last of all, set up some receive buffers. */
944 try_fill_recv(vi); 973 try_fill_recv(vi, GFP_KERNEL);
945 974
946 /* If we didn't even get one input buffer, we're useless. */ 975 /* If we didn't even get one input buffer, we're useless. */
947 if (vi->num == 0) { 976 if (vi->num == 0) {
@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev)
958 987
959unregister: 988unregister:
960 unregister_netdev(dev); 989 unregister_netdev(dev);
990 cancel_delayed_work_sync(&vi->refill);
961free_vqs: 991free_vqs:
962 vdev->config->del_vqs(vdev); 992 vdev->config->del_vqs(vdev);
963free: 993free:
@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev)
986 BUG_ON(vi->num != 0); 1016 BUG_ON(vi->num != 0);
987 1017
988 unregister_netdev(vi->dev); 1018 unregister_netdev(vi->dev);
1019 cancel_delayed_work_sync(&vi->refill);
989 1020
990 vdev->config->del_vqs(vi->vdev); 1021 vdev->config->del_vqs(vi->vdev);
991 1022