diff options
author | Paul Jackson <pj@sgi.com> | 2006-03-24 06:16:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 10:33:23 -0500 |
commit | 101a50019ae5e370d73984ee05d56dd3b08f330a (patch) | |
tree | f5628837d4bb68a4109dfb8d6601f95b630848c3 | |
parent | fffb60f93ce5880aade88e01d7133b52a4879710 (diff) |
[PATCH] cpuset memory spread slab cache implementation
Provide the slab cache infrastructure to support cpuset memory spreading.
See the previous patches, cpuset_mem_spread, for an explanation of cpuset
memory spreading.
This patch provides a slab cache SLAB_MEM_SPREAD flag. If set in the
kmem_cache_create() call defining a slab cache, then any task marked with the
process state flag PF_MEMSPREAD will spread memory page allocations for that
cache over all the allowed nodes, instead of preferring the local (faulting)
node.
On systems not configured with CONFIG_NUMA, this results in no change to the
page allocation code path for slab caches.
On systems with cpusets configured in the kernel, but the "memory_spread"
cpuset option not enabled for the current tasks cpuset, this adds a call to a
cpuset routine and failed bit test of the processor state flag PF_SPREAD_SLAB.
For tasks so marked, a second inline test is done for the slab cache flag
SLAB_MEM_SPREAD, and if that is set and if the allocation is not
in_interrupt(), this adds a call to to a cpuset routine that computes which of
the tasks mems_allowed nodes should be preferred for this allocation.
==> This patch adds another hook into the performance critical
code path to allocating objects from the slab cache, in the
____cache_alloc() chunk, below. The next patch optimizes this
hook, reducing the impact of the combined mempolicy plus memory
spreading hooks on this critical code path to a single check
against the tasks task_struct flags word.
This patch provides the generic slab flags and logic needed to apply memory
spreading to a particular slab.
A subsequent patch will mark a few specific slab caches for this placement
policy.
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/slab.h | 1 | ||||
-rw-r--r-- | mm/slab.c | 13 |
2 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 2b28c849d75a..e2ee5b268797 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -46,6 +46,7 @@ typedef struct kmem_cache kmem_cache_t; | |||
46 | what is reclaimable later*/ | 46 | what is reclaimable later*/ |
47 | #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ | 47 | #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ |
48 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ | 48 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ |
49 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | ||
49 | 50 | ||
50 | /* flags passed to a constructor func */ | 51 | /* flags passed to a constructor func */ |
51 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ | 52 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ |
@@ -94,6 +94,7 @@ | |||
94 | #include <linux/interrupt.h> | 94 | #include <linux/interrupt.h> |
95 | #include <linux/init.h> | 95 | #include <linux/init.h> |
96 | #include <linux/compiler.h> | 96 | #include <linux/compiler.h> |
97 | #include <linux/cpuset.h> | ||
97 | #include <linux/seq_file.h> | 98 | #include <linux/seq_file.h> |
98 | #include <linux/notifier.h> | 99 | #include <linux/notifier.h> |
99 | #include <linux/kallsyms.h> | 100 | #include <linux/kallsyms.h> |
@@ -173,12 +174,12 @@ | |||
173 | SLAB_CACHE_DMA | \ | 174 | SLAB_CACHE_DMA | \ |
174 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ | 175 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ |
175 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 176 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
176 | SLAB_DESTROY_BY_RCU) | 177 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) |
177 | #else | 178 | #else |
178 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 179 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
179 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ | 180 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ |
180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 181 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
181 | SLAB_DESTROY_BY_RCU) | 182 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) |
182 | #endif | 183 | #endif |
183 | 184 | ||
184 | /* | 185 | /* |
@@ -2813,6 +2814,14 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
2813 | if (nid != numa_node_id()) | 2814 | if (nid != numa_node_id()) |
2814 | return __cache_alloc_node(cachep, flags, nid); | 2815 | return __cache_alloc_node(cachep, flags, nid); |
2815 | } | 2816 | } |
2817 | if (unlikely(cpuset_do_slab_mem_spread() && | ||
2818 | (cachep->flags & SLAB_MEM_SPREAD) && | ||
2819 | !in_interrupt())) { | ||
2820 | int nid = cpuset_mem_spread_node(); | ||
2821 | |||
2822 | if (nid != numa_node_id()) | ||
2823 | return __cache_alloc_node(cachep, flags, nid); | ||
2824 | } | ||
2816 | #endif | 2825 | #endif |
2817 | 2826 | ||
2818 | check_irq_off(); | 2827 | check_irq_off(); |