diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-30 16:06:46 -0400 |
---|---|---|
committer | Christoph Lameter <clameter@sgi.com> | 2007-08-10 00:57:15 -0400 |
commit | fcda3d89bf1366f6801447eab2d8a75ac5b9c4ce (patch) | |
tree | 47382ad57b48a1a0e46a6190991f88a9e6973e45 | |
parent | 6adb31c90c47262c8a25bf5097de9b3426caf3ae (diff) |
SLUB: Remove checks for MAX_PARTIAL from kmem_cache_shrink
The MAX_PARTIAL checks were supposed to be an optimization. However, slab
shrinking is a manually triggered process either through running slabinfo
or by the kernel calling kmem_cache_shrink.
If one really wants to shrink a slab then all operations should be done
regardless of the size of the partial list. This also fixes an issue that
could surface if the number of partial slabs was initially above MAX_PARTIAL
in kmem_cache_shrink and later drops below MAX_PARTIAL through the
elimination of empty slabs on the partial list (rare). In that case a few
slabs may be left off the partial list (and only be put back when they
are empty).
Signed-off-by: Christoph Lameter <clameter@sgi.com>
-rw-r--r-- | mm/slub.c | 9 |
1 files changed, 2 insertions, 7 deletions
@@ -2500,15 +2500,11 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2500 | slab_unlock(page); | 2500 | slab_unlock(page); |
2501 | discard_slab(s, page); | 2501 | discard_slab(s, page); |
2502 | } else { | 2502 | } else { |
2503 | if (n->nr_partial > MAX_PARTIAL) | 2503 | list_move(&page->lru, |
2504 | list_move(&page->lru, | 2504 | slabs_by_inuse + page->inuse); |
2505 | slabs_by_inuse + page->inuse); | ||
2506 | } | 2505 | } |
2507 | } | 2506 | } |
2508 | 2507 | ||
2509 | if (n->nr_partial <= MAX_PARTIAL) | ||
2510 | goto out; | ||
2511 | |||
2512 | /* | 2508 | /* |
2513 | * Rebuild the partial list with the slabs filled up most | 2509 | * Rebuild the partial list with the slabs filled up most |
2514 | * first and the least used slabs at the end. | 2510 | * first and the least used slabs at the end. |
@@ -2516,7 +2512,6 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2516 | for (i = s->objects - 1; i >= 0; i--) | 2512 | for (i = s->objects - 1; i >= 0; i--) |
2517 | list_splice(slabs_by_inuse + i, n->partial.prev); | 2513 | list_splice(slabs_by_inuse + i, n->partial.prev); |
2518 | 2514 | ||
2519 | out: | ||
2520 | spin_unlock_irqrestore(&n->list_lock, flags); | 2515 | spin_unlock_irqrestore(&n->list_lock, flags); |
2521 | } | 2516 | } |
2522 | 2517 | ||