diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a30d750647e7..829d013745ab 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -360,18 +360,29 @@ refill: | |||
360 | goto end; | 360 | goto end; |
361 | } | 361 | } |
362 | nc->frag.size = PAGE_SIZE << order; | 362 | nc->frag.size = PAGE_SIZE << order; |
363 | recycle: | 363 | /* Even if we own the page, we do not use atomic_set(). |
364 | atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); | 364 | * This would break get_page_unless_zero() users. |
365 | */ | ||
366 | atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, | ||
367 | &nc->frag.page->_count); | ||
365 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | 368 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; |
366 | nc->frag.offset = 0; | 369 | nc->frag.offset = 0; |
367 | } | 370 | } |
368 | 371 | ||
369 | if (nc->frag.offset + fragsz > nc->frag.size) { | 372 | if (nc->frag.offset + fragsz > nc->frag.size) { |
370 | /* avoid unnecessary locked operations if possible */ | 373 | if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { |
371 | if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || | 374 | if (!atomic_sub_and_test(nc->pagecnt_bias, |
372 | atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) | 375 | &nc->frag.page->_count)) |
373 | goto recycle; | 376 | goto refill; |
374 | goto refill; | 377 | /* OK, page count is 0, we can safely set it */ |
378 | atomic_set(&nc->frag.page->_count, | ||
379 | NETDEV_PAGECNT_MAX_BIAS); | ||
380 | } else { | ||
381 | atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, | ||
382 | &nc->frag.page->_count); | ||
383 | } | ||
384 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | ||
385 | nc->frag.offset = 0; | ||
375 | } | 386 | } |
376 | 387 | ||
377 | data = page_address(nc->frag.page) + nc->frag.offset; | 388 | data = page_address(nc->frag.page) + nc->frag.offset; |