diff options
| author | Eric Dumazet <edumazet@google.com> | 2014-10-10 07:48:18 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-10-10 15:37:29 -0400 |
| commit | 4c450583d9d0a8241f0f62b80038ac47b43ff843 (patch) | |
| tree | 85ca97083049de5acf67f1a1b467b1db209e7cdc /net/core | |
| parent | 98226208c8a1fe5834e92d827a2a1e8051a17943 (diff) | |
net: fix races in page->_count manipulation
This is illegal to use atomic_set(&page->_count, ...) even if we 'own'
the page. Other entities in the kernel need to use get_page_unless_zero()
to get a reference to the page before testing page properties, so we could
loose a refcount increment.
The only case it is valid is when page->_count is 0
Fixes: 540eb7bf0bbed ("net: Update alloc frag to reduce get/put page usage and recycle pages")
Signed-off-by: Eric Dumaze <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/skbuff.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a30d750647e7..829d013745ab 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -360,18 +360,29 @@ refill: | |||
| 360 | goto end; | 360 | goto end; |
| 361 | } | 361 | } |
| 362 | nc->frag.size = PAGE_SIZE << order; | 362 | nc->frag.size = PAGE_SIZE << order; |
| 363 | recycle: | 363 | /* Even if we own the page, we do not use atomic_set(). |
| 364 | atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); | 364 | * This would break get_page_unless_zero() users. |
| 365 | */ | ||
| 366 | atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, | ||
| 367 | &nc->frag.page->_count); | ||
| 365 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | 368 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; |
| 366 | nc->frag.offset = 0; | 369 | nc->frag.offset = 0; |
| 367 | } | 370 | } |
| 368 | 371 | ||
| 369 | if (nc->frag.offset + fragsz > nc->frag.size) { | 372 | if (nc->frag.offset + fragsz > nc->frag.size) { |
| 370 | /* avoid unnecessary locked operations if possible */ | 373 | if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { |
| 371 | if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || | 374 | if (!atomic_sub_and_test(nc->pagecnt_bias, |
| 372 | atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) | 375 | &nc->frag.page->_count)) |
| 373 | goto recycle; | 376 | goto refill; |
| 374 | goto refill; | 377 | /* OK, page count is 0, we can safely set it */ |
| 378 | atomic_set(&nc->frag.page->_count, | ||
| 379 | NETDEV_PAGECNT_MAX_BIAS); | ||
| 380 | } else { | ||
| 381 | atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, | ||
| 382 | &nc->frag.page->_count); | ||
| 383 | } | ||
| 384 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; | ||
| 385 | nc->frag.offset = 0; | ||
| 375 | } | 386 | } |
| 376 | 387 | ||
| 377 | data = page_address(nc->frag.page) + nc->frag.offset; | 388 | data = page_address(nc->frag.page) + nc->frag.offset; |
