diff options
author | David Howells <dhowells@redhat.com> | 2006-11-22 09:54:01 -0500 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2006-11-22 09:54:01 -0500 |
commit | 52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c (patch) | |
tree | 5849b4e3c17daa70a7e81cfdeaddac9ac8a0e953 /mm/slab.c | |
parent | 0f9005a6f7a82f4aacbd72f7b92322a8ca1c3f97 (diff) |
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them
into a separate structure (delayed_work), which incorporates a work_struct and
the timer_list removed from work_struct.
The work_struct struct is huge, and this limits it's usefulness. On a 64-bit
architecture it's nearly 100 bytes in size. This reduces that by half for the
non-delayable type of event.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 8 |
1 files changed, 4 insertions, 4 deletions
@@ -753,7 +753,7 @@ int slab_is_available(void) | |||
753 | return g_cpucache_up == FULL; | 753 | return g_cpucache_up == FULL; |
754 | } | 754 | } |
755 | 755 | ||
756 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 756 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
757 | 757 | ||
758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
759 | { | 759 | { |
@@ -916,16 +916,16 @@ static void next_reap_node(void) | |||
916 | */ | 916 | */ |
917 | static void __devinit start_cpu_timer(int cpu) | 917 | static void __devinit start_cpu_timer(int cpu) |
918 | { | 918 | { |
919 | struct work_struct *reap_work = &per_cpu(reap_work, cpu); | 919 | struct delayed_work *reap_work = &per_cpu(reap_work, cpu); |
920 | 920 | ||
921 | /* | 921 | /* |
922 | * When this gets called from do_initcalls via cpucache_init(), | 922 | * When this gets called from do_initcalls via cpucache_init(), |
923 | * init_workqueues() has already run, so keventd will be setup | 923 | * init_workqueues() has already run, so keventd will be setup |
924 | * at that time. | 924 | * at that time. |
925 | */ | 925 | */ |
926 | if (keventd_up() && reap_work->func == NULL) { | 926 | if (keventd_up() && reap_work->work.func == NULL) { |
927 | init_reap_node(cpu); | 927 | init_reap_node(cpu); |
928 | INIT_WORK(reap_work, cache_reap, NULL); | 928 | INIT_DELAYED_WORK(reap_work, cache_reap, NULL); |
929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); |
930 | } | 930 | } |
931 | } | 931 | } |