aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:55:48 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:55:48 -0500
commit65f27f38446e1976cc98fd3004b110fedcddd189 (patch)
tree68f8be93feae31dfa018c22db392a05546b63ee1 /mm/slab.c
parent365970a1ea76d81cb1ad2f652acb605f06dae256 (diff)
WorkStruct: Pass the work_struct pointer instead of context data
Pass the work_struct pointer to the work function rather than context data. The work function can use container_of() to work out the data. For the cases where the container of the work_struct may go away the moment the pending bit is cleared, it is made possible to defer the release of the structure by deferring the clearing of the pending bit. To make this work, an extra flag is introduced into the management side of the work_struct. This governs auto-release of the structure upon execution. Ordinarily, the work queue executor would release the work_struct for further scheduling or deallocation by clearing the pending bit prior to jumping to the work function. This means that, unless the driver makes some guarantee itself that the work_struct won't go away, the work function may not access anything else in the work_struct or its container lest they be deallocated.. This is a problem if the auxiliary data is taken away (as done by the last patch). However, if the pending bit is *not* cleared before jumping to the work function, then the work function *may* access the work_struct and its container with no problems. But then the work function must itself release the work_struct by calling work_release(). In most cases, automatic release is fine, so this is the default. Special initiators exist for the non-auto-release case (ending in _NAR). Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a65bc5e992c3..5de81473df34 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
313static void free_block(struct kmem_cache *cachep, void **objpp, int len, 313static void free_block(struct kmem_cache *cachep, void **objpp, int len,
314 int node); 314 int node);
315static int enable_cpucache(struct kmem_cache *cachep); 315static int enable_cpucache(struct kmem_cache *cachep);
316static void cache_reap(void *unused); 316static void cache_reap(struct work_struct *unused);
317 317
318/* 318/*
319 * This function must be completely optimized away if a constant is passed to 319 * This function must be completely optimized away if a constant is passed to
@@ -925,7 +925,7 @@ static void __devinit start_cpu_timer(int cpu)
925 */ 925 */
926 if (keventd_up() && reap_work->work.func == NULL) { 926 if (keventd_up() && reap_work->work.func == NULL) {
927 init_reap_node(cpu); 927 init_reap_node(cpu);
928 INIT_DELAYED_WORK(reap_work, cache_reap, NULL); 928 INIT_DELAYED_WORK(reap_work, cache_reap);
929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
930 } 930 }
931} 931}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3815 * If we cannot acquire the cache chain mutex then just give up - we'll try 3815 * If we cannot acquire the cache chain mutex then just give up - we'll try
3816 * again on the next iteration. 3816 * again on the next iteration.
3817 */ 3817 */
3818static void cache_reap(void *unused) 3818static void cache_reap(struct work_struct *unused)
3819{ 3819{
3820 struct kmem_cache *searchp; 3820 struct kmem_cache *searchp;
3821 struct kmem_list3 *l3; 3821 struct kmem_list3 *l3;