diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-03-22 03:09:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:54:05 -0500 |
commit | aab2207cf8d9c343b6b5f0e4d27e1732f8618d14 (patch) | |
tree | deb851a556ac7d2339a5bc83985a33fe126321ee /mm/slab.c | |
parent | 35386e3b0f876bf194982f48f027af0c216499ce (diff) |
[PATCH] slab: make drain_array more universal by adding more parameters
And a parameter to drain_array to control the freeing of all objects and
then use drain_array() to replace instances of drain_array_locked with
drain_array. Doing so will avoid taking locks in those locations if the
arrays are empty.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 20 |
1 files changed, 11 insertions, 9 deletions
@@ -2126,6 +2126,10 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |||
2126 | static void drain_array_locked(struct kmem_cache *cachep, | 2126 | static void drain_array_locked(struct kmem_cache *cachep, |
2127 | struct array_cache *ac, int force, int node); | 2127 | struct array_cache *ac, int force, int node); |
2128 | 2128 | ||
2129 | static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | ||
2130 | struct array_cache *ac, | ||
2131 | int force, int node); | ||
2132 | |||
2129 | static void do_drain(void *arg) | 2133 | static void do_drain(void *arg) |
2130 | { | 2134 | { |
2131 | struct kmem_cache *cachep = arg; | 2135 | struct kmem_cache *cachep = arg; |
@@ -2150,9 +2154,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2150 | for_each_online_node(node) { | 2154 | for_each_online_node(node) { |
2151 | l3 = cachep->nodelists[node]; | 2155 | l3 = cachep->nodelists[node]; |
2152 | if (l3) { | 2156 | if (l3) { |
2153 | spin_lock_irq(&l3->list_lock); | 2157 | drain_array(cachep, l3, l3->shared, 1, node); |
2154 | drain_array_locked(cachep, l3->shared, 1, node); | ||
2155 | spin_unlock_irq(&l3->list_lock); | ||
2156 | if (l3->alien) | 2158 | if (l3->alien) |
2157 | drain_alien_cache(cachep, l3->alien); | 2159 | drain_alien_cache(cachep, l3->alien); |
2158 | } | 2160 | } |
@@ -3545,12 +3547,11 @@ static void drain_array_locked(struct kmem_cache *cachep, | |||
3545 | * necessary. | 3547 | * necessary. |
3546 | */ | 3548 | */ |
3547 | static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, | 3549 | static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, |
3548 | struct array_cache *ac) | 3550 | struct array_cache *ac, int force, int node) |
3549 | { | 3551 | { |
3550 | if (ac && ac->avail) { | 3552 | if (ac && ac->avail) { |
3551 | spin_lock_irq(&l3->list_lock); | 3553 | spin_lock_irq(&l3->list_lock); |
3552 | drain_array_locked(searchp, ac, 0, | 3554 | drain_array_locked(searchp, ac, force, node); |
3553 | numa_node_id()); | ||
3554 | spin_unlock_irq(&l3->list_lock); | 3555 | spin_unlock_irq(&l3->list_lock); |
3555 | } | 3556 | } |
3556 | } | 3557 | } |
@@ -3571,6 +3572,7 @@ static void cache_reap(void *unused) | |||
3571 | { | 3572 | { |
3572 | struct list_head *walk; | 3573 | struct list_head *walk; |
3573 | struct kmem_list3 *l3; | 3574 | struct kmem_list3 *l3; |
3575 | int node = numa_node_id(); | ||
3574 | 3576 | ||
3575 | if (!mutex_trylock(&cache_chain_mutex)) { | 3577 | if (!mutex_trylock(&cache_chain_mutex)) { |
3576 | /* Give up. Setup the next iteration. */ | 3578 | /* Give up. Setup the next iteration. */ |
@@ -3593,11 +3595,11 @@ static void cache_reap(void *unused) | |||
3593 | * have established with reasonable certainty that | 3595 | * have established with reasonable certainty that |
3594 | * we can do some work if the lock was obtained. | 3596 | * we can do some work if the lock was obtained. |
3595 | */ | 3597 | */ |
3596 | l3 = searchp->nodelists[numa_node_id()]; | 3598 | l3 = searchp->nodelists[node]; |
3597 | 3599 | ||
3598 | reap_alien(searchp, l3); | 3600 | reap_alien(searchp, l3); |
3599 | 3601 | ||
3600 | drain_array(searchp, l3, cpu_cache_get(searchp)); | 3602 | drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); |
3601 | 3603 | ||
3602 | /* | 3604 | /* |
3603 | * These are racy checks but it does not matter | 3605 | * These are racy checks but it does not matter |
@@ -3608,7 +3610,7 @@ static void cache_reap(void *unused) | |||
3608 | 3610 | ||
3609 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; | 3611 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; |
3610 | 3612 | ||
3611 | drain_array(searchp, l3, l3->shared); | 3613 | drain_array(searchp, l3, l3->shared, 0, node); |
3612 | 3614 | ||
3613 | if (l3->free_touched) { | 3615 | if (l3->free_touched) { |
3614 | l3->free_touched = 0; | 3616 | l3->free_touched = 0; |