From aedb0eb107961a234f7c38e53da65a8f7ea992a9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 21 Oct 2006 10:24:16 -0700 Subject: [PATCH] Slab: Do not fallback to nodes that have not been bootstrapped yet The zonelist may contain zones of nodes that have not been bootstrapped and we will oops if we try to allocate from those zones. So check if the node information for the slab and the node have been setup before attempting an allocation. If it has not been setup then skip that zone. Usually we will not encounter this situation since the slab bootstrap code avoids falling back before we have setup the respective nodes but we seem to have a special needs for pppc. Signed-off-by: Christoph Lameter Acked-by: Andy Whitcroft Cc: Paul Mackerras Cc: Mike Kravetz Cc: Benjamin Herrenschmidt Acked-by: Mel Gorman Acked-by: Will Schmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 266449d604..84c631f307 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3152,12 +3152,15 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) struct zone **z; void *obj = NULL; - for (z = zonelist->zones; *z && !obj; z++) + for (z = zonelist->zones; *z && !obj; z++) { + int nid = zone_to_nid(*z); + if (zone_idx(*z) <= ZONE_NORMAL && - cpuset_zone_allowed(*z, flags)) + cpuset_zone_allowed(*z, flags) && + cache->nodelists[nid]) obj = __cache_alloc_node(cache, - flags | __GFP_THISNODE, - zone_to_nid(*z)); + flags | __GFP_THISNODE, nid); + } return obj; } -- cgit v1.2.2 From 7f6b8876c7e66b0d15af134e2a5b87e55514eb6d Mon Sep 17 00:00:00 2001 From: Daniel Yeisley Date: Thu, 2 Nov 2006 22:07:14 -0800 Subject: [PATCH] init_reap_node() initialization fix It looks like there is a bug in init_reap_node() in slab.c that can cause multiple oops's on certain ES7000 configurations. The variable reap_node is defined per cpu, but only initialized on a single CPU. This causes an oops in next_reap_node() when __get_cpu_var(reap_node) returns the wrong value. Fix is below. Signed-off-by: Dan Yeisley Cc: Andi Kleen Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: Manfred Spraul Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 84c631f307..3c4a7e34ed 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -883,7 +883,7 @@ static void init_reap_node(int cpu) if (node == MAX_NUMNODES) node = first_node(node_online_map); - __get_cpu_var(reap_node) = node; + per_cpu(reap_node, cpu) = node; } static void next_reap_node(void) -- cgit v1.2.2 From 52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Nov 2006 14:54:01 +0000 Subject: WorkStruct: Separate delayable and non-delayable events. Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells --- mm/slab.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 3c4a7e34ed..a65bc5e992 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -753,7 +753,7 @@ int slab_is_available(void) return g_cpucache_up == FULL; } -static DEFINE_PER_CPU(struct work_struct, reap_work); +static DEFINE_PER_CPU(struct delayed_work, reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { @@ -916,16 +916,16 @@ static void next_reap_node(void) */ static void __devinit start_cpu_timer(int cpu) { - struct work_struct *reap_work = &per_cpu(reap_work, cpu); + struct delayed_work *reap_work = &per_cpu(reap_work, cpu); /* * When this gets called from do_initcalls via cpucache_init(), * init_workqueues() has already run, so keventd will be setup * at that time. */ - if (keventd_up() && reap_work->func == NULL) { + if (keventd_up() && reap_work->work.func == NULL) { init_reap_node(cpu); - INIT_WORK(reap_work, cache_reap, NULL); + INIT_DELAYED_WORK(reap_work, cache_reap, NULL); schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); } } -- cgit v1.2.2 From 65f27f38446e1976cc98fd3004b110fedcddd189 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Nov 2006 14:55:48 +0000 Subject: WorkStruct: Pass the work_struct pointer instead of context data Pass the work_struct pointer to the work function rather than context data. The work function can use container_of() to work out the data. For the cases where the container of the work_struct may go away the moment the pending bit is cleared, it is made possible to defer the release of the structure by deferring the clearing of the pending bit. To make this work, an extra flag is introduced into the management side of the work_struct. This governs auto-release of the structure upon execution. Ordinarily, the work queue executor would release the work_struct for further scheduling or deallocation by clearing the pending bit prior to jumping to the work function. This means that, unless the driver makes some guarantee itself that the work_struct won't go away, the work function may not access anything else in the work_struct or its container lest they be deallocated.. This is a problem if the auxiliary data is taken away (as done by the last patch). However, if the pending bit is *not* cleared before jumping to the work function, then the work function *may* access the work_struct and its container with no problems. But then the work function must itself release the work_struct by calling work_release(). In most cases, automatic release is fine, so this is the default. Special initiators exist for the non-auto-release case (ending in _NAR). Signed-Off-By: David Howells --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index a65bc5e992..5de81473df 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); static int enable_cpucache(struct kmem_cache *cachep); -static void cache_reap(void *unused); +static void cache_reap(struct work_struct *unused); /* * This function must be completely optimized away if a constant is passed to @@ -925,7 +925,7 @@ static void __devinit start_cpu_timer(int cpu) */ if (keventd_up() && reap_work->work.func == NULL) { init_reap_node(cpu); - INIT_DELAYED_WORK(reap_work, cache_reap, NULL); + INIT_DELAYED_WORK(reap_work, cache_reap); schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); } } @@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, * If we cannot acquire the cache chain mutex then just give up - we'll try * again on the next iteration. */ -static void cache_reap(void *unused) +static void cache_reap(struct work_struct *unused) { struct kmem_cache *searchp; struct kmem_list3 *l3; -- cgit v1.2.2 From a44b56d354b49f9abb184e5a14f71889856283bb Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Wed, 6 Dec 2006 20:32:11 -0800 Subject: [PATCH] slab debug and ARCH_SLAB_MINALIGN don't get along When CONFIG_SLAB_DEBUG is used in combination with ARCH_SLAB_MINALIGN, some debug flags should be disabled which depend on BYTES_PER_WORD alignment. The disabling of these debug flags is not properly handled when BYTES_PER_WORD < ARCH_SLAB_MEMALIGN < cache_line_size() This patch fixes that and also adds an alignment check to cache_alloc_debugcheck_after() when ARCH_SLAB_MINALIGN is used. Signed-off-by: Kevin Hilman Cc: Pekka Enberg Cc: Christoph Lameter Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 5de81473df..ff60a94142 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2197,18 +2197,17 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) ralign = BYTES_PER_WORD; - /* 2) arch mandated alignment: disables debug if necessary */ + /* 2) arch mandated alignment */ if (ralign < ARCH_SLAB_MINALIGN) { ralign = ARCH_SLAB_MINALIGN; - if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } - /* 3) caller mandated alignment: disables debug if necessary */ + /* 3) caller mandated alignment */ if (ralign < align) { ralign = align; - if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } + /* disable debug if necessary */ + if (ralign > BYTES_PER_WORD) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. */ @@ -3063,6 +3062,12 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, cachep->ctor(objp, cachep, ctor_flags); } +#if ARCH_SLAB_MINALIGN + if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { + printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", + objp, ARCH_SLAB_MINALIGN); + } +#endif return objp; } #else -- cgit v1.2.2 From 8f5be20bf87da7c7c59c5cc84f630a1eca5cc99c Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Wed, 6 Dec 2006 20:32:14 -0800 Subject: [PATCH] mm: slab: eliminate lock_cpu_hotplug from slab Here's an attempt towards doing away with lock_cpu_hotplug in the slab subsystem. This approach also fixes a bug which shows up when cpus are being offlined/onlined and slab caches are being tuned simultaneously. http://marc.theaimsgroup.com/?l=linux-kernel&m=116098888100481&w=2 The patch has been stress tested overnight on a 2 socket 4 core AMD box with repeated cpu online and offline, while dbench and kernbench process are running, and slab caches being tuned at the same time. There were no lockdep warnings either. (This test on 2,6.18 as 2.6.19-rc crashes at __drain_pages http://marc.theaimsgroup.com/?l=linux-kernel&m=116172164217678&w=2 ) The approach here is to hold cache_chain_mutex from CPU_UP_PREPARE until CPU_ONLINE (similar in approach as worqueue_mutex) . Slab code sensitive to cpu_online_map (kmem_cache_create, kmem_cache_destroy, slabinfo_write, __cache_shrink) is already serialized with cache_chain_mutex. (This patch lengthens cache_chain_mutex hold time at kmem_cache_destroy to cover this). This patch also takes the cache_chain_sem at kmem_cache_shrink to protect sanity of cpu_online_map at __cache_shrink, as viewed by slab. (kmem_cache_shrink->__cache_shrink->drain_cpu_caches). But, really, kmem_cache_shrink is used at just one place in the acpi subsystem! Do we really need to keep kmem_cache_shrink at all? Another note. Looks like a cpu hotplug event can send CPU_UP_CANCELED to a registered subsystem even if the subsystem did not receive CPU_UP_PREPARE. This could be due to a subsystem registered for notification earlier than the current subsystem crapping out with NOTIFY_BAD. Badness can occur with in the CPU_UP_CANCELED code path at slab if this happens (The same would apply for workqueue.c as well). To overcome this, we might have to use either a) a per subsystem flag and avoid handling of CPU_UP_CANCELED, or b) Use a special notifier events like LOCK_ACQUIRE/RELEASE as Gautham was using in his experiments, or c) Do not send CPU_UP_CANCELED to a subsystem which did not receive CPU_UP_PREPARE. I would prefer c). Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index ff60a94142..3318252f65 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -730,7 +730,10 @@ static inline void init_lock_keys(void) } #endif -/* Guard access to the cache-chain. */ +/* + * 1. Guard access to the cache-chain. + * 2. Protect sanity of cpu_online_map against cpu hotplug events + */ static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; @@ -1230,12 +1233,18 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, kfree(shared); free_alien_cache(alien); } - mutex_unlock(&cache_chain_mutex); break; case CPU_ONLINE: + mutex_unlock(&cache_chain_mutex); start_cpu_timer(cpu); break; #ifdef CONFIG_HOTPLUG_CPU + case CPU_DOWN_PREPARE: + mutex_lock(&cache_chain_mutex); + break; + case CPU_DOWN_FAILED: + mutex_unlock(&cache_chain_mutex); + break; case CPU_DEAD: /* * Even if all the cpus of a node are down, we don't free the @@ -1246,8 +1255,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, * gets destroyed at kmem_cache_destroy(). */ /* fall thru */ +#endif case CPU_UP_CANCELED: - mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; @@ -1308,11 +1317,9 @@ free_array_cache: } mutex_unlock(&cache_chain_mutex); break; -#endif } return NOTIFY_OK; bad: - mutex_unlock(&cache_chain_mutex); return NOTIFY_BAD; } @@ -2098,11 +2105,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, } /* - * Prevent CPUs from coming and going. - * lock_cpu_hotplug() nests outside cache_chain_mutex + * We use cache_chain_mutex to ensure a consistent view of + * cpu_online_map as well. Please see cpuup_callback */ - lock_cpu_hotplug(); - mutex_lock(&cache_chain_mutex); list_for_each_entry(pc, &cache_chain, next) { @@ -2325,7 +2330,6 @@ oops: panic("kmem_cache_create(): failed to create slab `%s'\n", name); mutex_unlock(&cache_chain_mutex); - unlock_cpu_hotplug(); return cachep; } EXPORT_SYMBOL(kmem_cache_create); @@ -2443,6 +2447,7 @@ out: return nr_freed; } +/* Called with cache_chain_mutex held to protect against cpu hotplug */ static int __cache_shrink(struct kmem_cache *cachep) { int ret = 0, i = 0; @@ -2473,9 +2478,13 @@ static int __cache_shrink(struct kmem_cache *cachep) */ int kmem_cache_shrink(struct kmem_cache *cachep) { + int ret; BUG_ON(!cachep || in_interrupt()); - return __cache_shrink(cachep); + mutex_lock(&cache_chain_mutex); + ret = __cache_shrink(cachep); + mutex_unlock(&cache_chain_mutex); + return ret; } EXPORT_SYMBOL(kmem_cache_shrink); @@ -2499,23 +2508,16 @@ void kmem_cache_destroy(struct kmem_cache *cachep) { BUG_ON(!cachep || in_interrupt()); - /* Don't let CPUs to come and go */ - lock_cpu_hotplug(); - /* Find the cache in the chain of caches. */ mutex_lock(&cache_chain_mutex); /* * the chain is never empty, cache_cache is never destroyed */ list_del(&cachep->next); - mutex_unlock(&cache_chain_mutex); - if (__cache_shrink(cachep)) { slab_error(cachep, "Can't free all objects"); - mutex_lock(&cache_chain_mutex); list_add(&cachep->next, &cache_chain); mutex_unlock(&cache_chain_mutex); - unlock_cpu_hotplug(); return; } @@ -2523,7 +2525,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) synchronize_rcu(); __kmem_cache_destroy(cachep); - unlock_cpu_hotplug(); + mutex_unlock(&cache_chain_mutex); } EXPORT_SYMBOL(kmem_cache_destroy); -- cgit v1.2.2 From 3395ee0588795b0b3bd889c260e55959cf2b61f5 Mon Sep 17 00:00:00 2001 From: Paul Menage Date: Wed, 6 Dec 2006 20:32:16 -0800 Subject: [PATCH] mm: add noaliencache boot option to disable numa alien caches When using numa=fake on non-NUMA hardware there is no benefit to having the alien caches, and they consume much memory. Add a kernel boot option to disable them. Christoph sayeth "This is good to have even on large NUMA. The problem is that the alien caches grow by the square of the size of the system in terms of nodes." Cc: Christoph Lameter Cc: Pekka Enberg Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 3318252f65..bfd654c0ef 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -869,6 +869,22 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, dump_stack(); } +/* + * By default on NUMA we use alien caches to stage the freeing of + * objects allocated from other nodes. This causes massive memory + * inefficiencies when using fake NUMA setup to split memory into a + * large number of small nodes, so it can be disabled on the command + * line + */ + +static int use_alien_caches __read_mostly = 1; +static int __init noaliencache_setup(char *s) +{ + use_alien_caches = 0; + return 1; +} +__setup("noaliencache", noaliencache_setup); + #ifdef CONFIG_NUMA /* * Special reaping functions for NUMA systems called from cache_reap(). @@ -1117,7 +1133,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) * Make sure we are not freeing a object from another node to the array * cache on this cpu. */ - if (likely(slabp->nodeid == node)) + if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) return 0; l3 = cachep->nodelists[node]; @@ -1195,7 +1211,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; + struct array_cache **alien = NULL; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); @@ -1207,9 +1223,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, if (!shared) goto bad; - alien = alloc_alien_cache(node, cachep->limit); - if (!alien) - goto bad; + if (use_alien_caches) { + alien = alloc_alien_cache(node, cachep->limit); + if (!alien) + goto bad; + } cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); @@ -3590,13 +3608,15 @@ static int alloc_kmemlist(struct kmem_cache *cachep) int node; struct kmem_list3 *l3; struct array_cache *new_shared; - struct array_cache **new_alien; + struct array_cache **new_alien = NULL; for_each_online_node(node) { - new_alien = alloc_alien_cache(node, cachep->limit); - if (!new_alien) - goto fail; + if (use_alien_caches) { + new_alien = alloc_alien_cache(node, cachep->limit); + if (!new_alien) + goto fail; + } new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, -- cgit v1.2.2 From 8b98c1699eba23cfd2e8b366625c50ff5fd1415b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 6 Dec 2006 20:32:30 -0800 Subject: [PATCH] leak tracking for kmalloc_node We have variants of kmalloc and kmem_cache_alloc that leave leak tracking to the caller. This is used for subsystem-specific allocators like skb_alloc. To make skb_alloc node-aware we need similar routines for the node-aware slab allocator, which this patch adds. Note that the code is rather ugly, but it mirrors the non-node-aware code 1:1: [akpm@osdl.org: add module export] Signed-off-by: Christoph Hellwig Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 55 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 13 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index bfd654c0ef..8f3f61cacb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1015,7 +1015,7 @@ static inline void *alternate_node_alloc(struct kmem_cache *cachep, return NULL; } -static inline void *__cache_alloc_node(struct kmem_cache *cachep, +static inline void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return NULL; @@ -1023,7 +1023,7 @@ static inline void *__cache_alloc_node(struct kmem_cache *cachep, #else /* CONFIG_NUMA */ -static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); +static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static struct array_cache **alloc_alien_cache(int node, int limit) @@ -3130,10 +3130,10 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, objp = ____cache_alloc(cachep, flags); /* * We may just have run out of memory on the local node. - * __cache_alloc_node() knows how to locate memory on other nodes + * ____cache_alloc_node() knows how to locate memory on other nodes */ if (NUMA_BUILD && !objp) - objp = __cache_alloc_node(cachep, flags, numa_node_id()); + objp = ____cache_alloc_node(cachep, flags, numa_node_id()); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); @@ -3160,7 +3160,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) else if (current->mempolicy) nid_alloc = slab_node(current->mempolicy); if (nid_alloc != nid_here) - return __cache_alloc_node(cachep, flags, nid_alloc); + return ____cache_alloc_node(cachep, flags, nid_alloc); return NULL; } @@ -3183,7 +3183,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) if (zone_idx(*z) <= ZONE_NORMAL && cpuset_zone_allowed(*z, flags) && cache->nodelists[nid]) - obj = __cache_alloc_node(cache, + obj = ____cache_alloc_node(cache, flags | __GFP_THISNODE, nid); } return obj; @@ -3192,7 +3192,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) /* * A interface to enable slab creation on nodeid */ -static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, +static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct list_head *entry; @@ -3465,7 +3465,9 @@ out: * New and improved: it will now make sure that the object gets * put on the correct node list so that there is no false sharing. */ -void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) +static __always_inline void * +__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + int nodeid, void *caller) { unsigned long save_flags; void *ptr; @@ -3477,17 +3479,23 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) !cachep->nodelists[nodeid]) ptr = ____cache_alloc(cachep, flags); else - ptr = __cache_alloc_node(cachep, flags, nodeid); + ptr = ____cache_alloc_node(cachep, flags, nodeid); local_irq_restore(save_flags); - ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, - __builtin_return_address(0)); + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); return ptr; } + +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) +{ + return __cache_alloc_node(cachep, flags, nodeid, + __builtin_return_address(0)); +} EXPORT_SYMBOL(kmem_cache_alloc_node); -void *__kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void * +__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) { struct kmem_cache *cachep; @@ -3496,8 +3504,29 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) return NULL; return kmem_cache_alloc_node(cachep, flags, node); } + +#ifdef CONFIG_DEBUG_SLAB +void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __do_kmalloc_node(size, flags, node, + __builtin_return_address(0)); +} EXPORT_SYMBOL(__kmalloc_node); -#endif + +void *__kmalloc_node_track_caller(size_t size, gfp_t flags, + int node, void *caller) +{ + return __do_kmalloc_node(size, flags, node, caller); +} +EXPORT_SYMBOL(__kmalloc_node_track_caller); +#else +void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __do_kmalloc_node(size, flags, node, NULL); +} +EXPORT_SYMBOL(__kmalloc_node); +#endif /* CONFIG_DEBUG_SLAB */ +#endif /* CONFIG_NUMA */ /** * __do_kmalloc - allocate memory -- cgit v1.2.2 From 6e0eaa4b05cf53ca5caa702fd2760a5b3376be69 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:10 -0800 Subject: [PATCH] slab: remove SLAB_NO_GROW It is only used internally in the slab. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 8f3f61cacb..e853dfe8fd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2721,8 +2721,8 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); - if (flags & SLAB_NO_GROW) + BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | __GFP_NO_GROW)); + if (flags & __GFP_NO_GROW) return 0; ctor_flags = SLAB_CTOR_CONSTRUCTOR; -- cgit v1.2.2 From a06d72c1dcbff015250df6ad9f0b1d18c02113bf Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:12 -0800 Subject: [PATCH] slab: remove SLAB_LEVEL_MASK SLAB_LEVEL_MASK is only used internally to the slab and is and alias of GFP_LEVEL_MASK. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index e853dfe8fd..9f34b4946f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2721,12 +2721,12 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | __GFP_NO_GROW)); + BUG_ON(flags & ~(SLAB_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); if (flags & __GFP_NO_GROW) return 0; ctor_flags = SLAB_CTOR_CONSTRUCTOR; - local_flags = (flags & SLAB_LEVEL_MASK); + local_flags = (flags & GFP_LEVEL_MASK); if (!(local_flags & __GFP_WAIT)) /* * Not allowed to sleep. Need to tell a constructor about -- cgit v1.2.2 From e94b1766097d53e6f3ccfb36c8baa562ffeda3fc Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:17 -0800 Subject: [PATCH] slab: remove SLAB_KERNEL SLAB_KERNEL is an alias of GFP_KERNEL. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 9f34b4946f..1f374c1df0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2237,7 +2237,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, align = ralign; /* Get cache's description obj. */ - cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL); + cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); if (!cachep) goto oops; -- cgit v1.2.2 From 441e143e95f5aa1e04026cb0aa71c801ba53982f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:19 -0800 Subject: [PATCH] slab: remove SLAB_DMA SLAB_DMA is an alias of GFP_DMA. This is the last one so we remove the leftover comment too. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 1f374c1df0..bb831ba63e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2637,7 +2637,7 @@ static void cache_init_objs(struct kmem_cache *cachep, static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { - if (flags & SLAB_DMA) + if (flags & GFP_DMA) BUG_ON(!(cachep->gfpflags & GFP_DMA)); else BUG_ON(cachep->gfpflags & GFP_DMA); @@ -2721,7 +2721,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(SLAB_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); + BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); if (flags & __GFP_NO_GROW) return 0; -- cgit v1.2.2 From 5bcd234d881d83ac0259c6d42d98f134e31c60a8 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:24 -0800 Subject: [PATCH] slab: fix two issues in kmalloc_node / __cache_alloc_node This addresses two issues: 1. Kmalloc_node() may intermittently return NULL if we are allocating from the current node and are unable to obtain memory for the current node from the page allocator. This is because we call ___cache_alloc() if nodeid == numa_node_id() and ____cache_alloc is not able to fallback to other nodes. This was introduced in the 2.6.19 development cycle. <= 2.6.18 in that case does not do a restricted allocation and blindly trusts the page allocator to have given us memory from the indicated node. It inserts the page regardless of the node it came from into the queues for the current node. 2. If kmalloc_node() is used on a node that has not been bootstrapped yet then we may try to pass an invalid node number to ____cache_alloc_node() triggering a BUG(). Change the function to call fallback_alloc() instead. Only call fallback_alloc() if we are allowed to fallback at all. The need to handle a node not bootstrapped yet also first surfaced in the 2.6.19 cycle. Update the comments since they were still describing the old kmalloc_node from 2.6.12. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index bb831ba63e..6da554fd3f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3459,29 +3459,45 @@ out: * @flags: See kmalloc(). * @nodeid: node number of the target node. * - * Identical to kmem_cache_alloc, except that this function is slow - * and can sleep. And it will allocate memory on the given node, which - * can improve the performance for cpu bound structures. - * New and improved: it will now make sure that the object gets - * put on the correct node list so that there is no false sharing. + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. */ static __always_inline void * __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *caller) { unsigned long save_flags; - void *ptr; + void *ptr = NULL; cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (nodeid == -1 || nodeid == numa_node_id() || - !cachep->nodelists[nodeid]) - ptr = ____cache_alloc(cachep, flags); - else - ptr = ____cache_alloc_node(cachep, flags, nodeid); - local_irq_restore(save_flags); + if (unlikely(nodeid == -1)) + nodeid = numa_node_id(); + if (likely(cachep->nodelists[nodeid])) { + if (nodeid == numa_node_id()) { + /* + * Use the locally cached objects if possible. + * However ____cache_alloc does not allow fallback + * to other nodes. It may fail while we still have + * objects on other nodes available. + */ + ptr = ____cache_alloc(cachep, flags); + } + if (!ptr) { + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + } + } else { + /* Node not bootstrapped yet */ + if (!(flags & __GFP_THISNODE)) + ptr = fallback_alloc(cachep, flags); + } + + local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); return ptr; -- cgit v1.2.2 From 3c517a6132098ca37e122a2980fc64a9e798b0d7 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:29 -0800 Subject: [PATCH] slab: better fallback allocation behavior Currently we simply attempt to allocate from all allowed nodes using GFP_THISNODE. However, GFP_THISNODE does not do reclaim (it wont do any at all if the recent GFP_THISNODE patch is accepted). If we truly run out of memory in the whole system then fallback_alloc may return NULL although memory may still be available if we would perform more thorough reclaim. This patch changes fallback_alloc() so that we first only inspect all the per node queues for available slabs. If we find any then we allocate from those. This avoids slab fragmentation by first getting rid of all partial allocated slabs on every node before allocating new memory. If we cannot satisfy the allocation from any per node queue then we extend a slab. We now call into the page allocator without specifying GFP_THISNODE. The page allocator will then implement its own fallback (in the given cpuset context), perform necessary reclaim (again considering not a single node but the whole set of allowed nodes) and then return pages for a new slab. We identify from which node the pages were allocated and then insert the pages into the corresponding per node structure. In order to do so we need to modify cache_grow() to take a parameter that specifies the new slab. kmem_getpages() can no longer set the GFP_THISNODE flag since we need to be able to use kmem_getpage to allocate from an arbitrary node. GFP_THISNODE needs to be specified when calling cache_grow(). One key advantage is that the decision from which node to allocate new memory is removed from slab fallback processing. The patch allows to go back to use of the page allocators fallback/reclaim logic. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 79 +++++++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 22 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 6da554fd3f..7b8e5d6685 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1605,12 +1605,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) flags |= __GFP_COMP; #endif - /* - * Under NUMA we want memory on the indicated node. We will handle - * the needed fallback ourselves since we want to serve from our - * per node object lists first for other nodes. - */ - flags |= cachep->gfpflags | GFP_THISNODE; + flags |= cachep->gfpflags; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) @@ -2567,7 +2562,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, - local_flags, nodeid); + local_flags & ~GFP_THISNODE, nodeid); if (!slabp) return NULL; } else { @@ -2708,10 +2703,10 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) +static int cache_grow(struct kmem_cache *cachep, + gfp_t flags, int nodeid, void *objp) { struct slab *slabp; - void *objp; size_t offset; gfp_t local_flags; unsigned long ctor_flags; @@ -2763,12 +2758,14 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ - objp = kmem_getpages(cachep, flags, nodeid); + if (!objp) + objp = kmem_getpages(cachep, flags, nodeid); if (!objp) goto failed; /* Get slab management. */ - slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); + slabp = alloc_slabmgmt(cachep, objp, offset, + local_flags & ~GFP_THISNODE, nodeid); if (!slabp) goto opps1; @@ -3006,7 +3003,7 @@ alloc_done: if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags, node); + x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); @@ -3166,9 +3163,11 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) /* * Fallback function if there was no memory available and no objects on a - * certain node and we are allowed to fall back. We mimick the behavior of - * the page allocator. We fall back according to a zonelist determined by - * the policy layer while obeying cpuset constraints. + * certain node and fall back is permitted. First we scan all the + * available nodelists for available objects. If that fails then we + * perform an allocation without specifying a node. This allows the page + * allocator to do its reclaim / fallback magic. We then insert the + * slab into the proper nodelist and then allocate from it. */ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) { @@ -3176,15 +3175,51 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ->node_zonelists[gfp_zone(flags)]; struct zone **z; void *obj = NULL; + int nid; +retry: + /* + * Look through allowed nodes for objects available + * from existing per node queues. + */ for (z = zonelist->zones; *z && !obj; z++) { - int nid = zone_to_nid(*z); + nid = zone_to_nid(*z); + + if (cpuset_zone_allowed(*z, flags) && + cache->nodelists[nid] && + cache->nodelists[nid]->free_objects) + obj = ____cache_alloc_node(cache, + flags | GFP_THISNODE, nid); + } - if (zone_idx(*z) <= ZONE_NORMAL && - cpuset_zone_allowed(*z, flags) && - cache->nodelists[nid]) - obj = ____cache_alloc_node(cache, - flags | __GFP_THISNODE, nid); + if (!obj) { + /* + * This allocation will be performed within the constraints + * of the current cpuset / memory policy requirements. + * We may trigger various forms of reclaim on the allowed + * set and go into memory reserves if necessary. + */ + obj = kmem_getpages(cache, flags, -1); + if (obj) { + /* + * Insert into the appropriate per node queues + */ + nid = page_to_nid(virt_to_page(obj)); + if (cache_grow(cache, flags, nid, obj)) { + obj = ____cache_alloc_node(cache, + flags | GFP_THISNODE, nid); + if (!obj) + /* + * Another processor may allocate the + * objects in the slab since we are + * not holding any locks. + */ + goto retry; + } else { + kmem_freepages(cache, obj); + obj = NULL; + } + } } return obj; } @@ -3241,7 +3276,7 @@ retry: must_grow: spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags, nodeid); + x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); if (x) goto retry; -- cgit v1.2.2 From 138ae6631a3d6f86851dd53686fa88295d1398bd Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 6 Dec 2006 20:36:41 -0800 Subject: [PATCH] slab: use probe_kernel_address() Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7b8e5d6685..86f5d6e995 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -103,12 +103,12 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -2124,7 +2124,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, mutex_lock(&cache_chain_mutex); list_for_each_entry(pc, &cache_chain, next) { - mm_segment_t old_fs = get_fs(); char tmp; int res; @@ -2133,9 +2132,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * destroy its slab cache and no-one else reuses the vmalloc * area of the module. Print a warning. */ - set_fs(KERNEL_DS); - res = __get_user(tmp, pc->name); - set_fs(old_fs); + res = probe_kernel_address(pc->name, tmp); if (res) { printk("SLAB: cache with size %d has lost its name\n", pc->buffer_size); -- cgit v1.2.2 From 15ad7cdcfd76450d4beebc789ec646664238184d Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 6 Dec 2006 20:40:36 -0800 Subject: [PATCH] struct seq_operations and struct file_operations constification - move some file_operations structs into the .rodata section - move static strings from policy_types[] array into the .rodata section - fix generic seq_operations usages, so that those structs may be defined as "const" as well [akpm@osdl.org: couple of fixes] Signed-off-by: Helge Deller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 86f5d6e995..068cb4503c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4142,7 +4142,7 @@ static int s_show(struct seq_file *m, void *p) * + further values on SMP and with statistics enabled */ -struct seq_operations slabinfo_op = { +const struct seq_operations slabinfo_op = { .start = s_start, .next = s_next, .stop = s_stop, @@ -4340,7 +4340,7 @@ static int leaks_show(struct seq_file *m, void *p) return 0; } -struct seq_operations slabstats_op = { +const struct seq_operations slabstats_op = { .start = leaks_start, .next = s_next, .stop = s_stop, -- cgit v1.2.2 From b8b50b6519afa9891b753c4fffa89d89e04df66a Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Fri, 8 Dec 2006 02:35:53 -0800 Subject: [PATCH] mm: fallback_alloc cpuset_zone_allowed irq fix fallback_alloc() could end up calling cpuset_zone_allowed() with interrupts disabled (by code in kmem_cache_alloc_node()), but without __GFP_HARDWALL set, leading to a possible call of a sleeping function with interrupts disabled. This results in the BUG report: BUG: sleeping function called from invalid context at kernel/cpuset.c:1520 in_atomic():0, irqs_disabled():1 Thanks to Paul Menage for catching this one. Signed-off-by: Paul Jackson Cc: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 068cb4503c..e90b6100a9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3182,7 +3182,7 @@ retry: for (z = zonelist->zones; *z && !obj; z++) { nid = zone_to_nid(*z); - if (cpuset_zone_allowed(*z, flags) && + if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) && cache->nodelists[nid] && cache->nodelists[nid]->free_objects) obj = ____cache_alloc_node(cache, -- cgit v1.2.2 From 8a8b6502fb669c3a0638a08955442814cedc86b1 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 8 Dec 2006 02:39:44 -0800 Subject: [PATCH] fault-injection capability for kmalloc This patch provides fault-injection capability for kmalloc. Boot option: failslab=,,, -- specifies the interval of failures. -- specifies how often it should fail in percent. -- specifies the size of free space where memory can be allocated safely in bytes. -- specifies how many times failures may happen at most. Debugfs: /debug/failslab/interval /debug/failslab/probability /debug/failslab/specifies /debug/failslab/times /debug/failslab/ignore-gfp-highmem /debug/failslab/ignore-gfp-wait Example: failslab=10,100,0,-1 slab allocation (kmalloc(), kmem_cache_alloc(),..) fails once per 10 times. Cc: Pekka Enberg Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index e90b6100a9..47011e2ef3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -107,6 +107,7 @@ #include #include #include +#include #include #include @@ -3088,12 +3089,88 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif +#ifdef CONFIG_FAILSLAB + +static struct failslab_attr { + + struct fault_attr attr; + + u32 ignore_gfp_wait; +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + struct dentry *ignore_gfp_wait_file; +#endif + +} failslab = { + .attr = FAULT_ATTR_INITIALIZER, +}; + +static int __init setup_failslab(char *str) +{ + return setup_fault_attr(&failslab.attr, str); +} +__setup("failslab=", setup_failslab); + +static int should_failslab(struct kmem_cache *cachep, gfp_t flags) +{ + if (cachep == &cache_cache) + return 0; + if (flags & __GFP_NOFAIL) + return 0; + if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) + return 0; + + return should_fail(&failslab.attr, obj_size(cachep)); +} + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init failslab_debugfs(void) +{ + mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + struct dentry *dir; + int err; + + err = init_fault_attr_dentries(&failslab.attr, "failslab"); + if (err) + return err; + dir = failslab.attr.dentries.dir; + + failslab.ignore_gfp_wait_file = + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &failslab.ignore_gfp_wait); + + if (!failslab.ignore_gfp_wait_file) { + err = -ENOMEM; + debugfs_remove(failslab.ignore_gfp_wait_file); + cleanup_fault_attr_dentries(&failslab.attr); + } + + return err; +} + +late_initcall(failslab_debugfs); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#else /* CONFIG_FAILSLAB */ + +static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) +{ + return 0; +} + +#endif /* CONFIG_FAILSLAB */ + static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; check_irq_off(); + + if (should_failslab(cachep, flags)) + return NULL; + ac = cpu_cache_get(cachep); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); -- cgit v1.2.2 From 6b1b60f41eef3ba7b188fd72f1d6de478aafd93c Mon Sep 17 00:00:00 2001 From: Don Mullis Date: Fri, 8 Dec 2006 02:39:53 -0800 Subject: [PATCH] fault-injection: defaults likely to please a new user Assign defaults most likely to please a new user: 1) generate some logging output (verbose=2) 2) avoid injecting failures likely to lock up UI (ignore_gfp_wait=1, ignore_gfp_highmem=1) Signed-off-by: Don Mullis Cc: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 47011e2ef3..56af694c9e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3102,6 +3102,7 @@ static struct failslab_attr { } failslab = { .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_wait = 1, }; static int __init setup_failslab(char *str) -- cgit v1.2.2 From 2b2842146cb4105877c2be51d3857ec61ebd4ff9 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 10 Dec 2006 02:21:28 -0800 Subject: [PATCH] user of the jiffies rounding patch: Slab This patch introduces users of the round_jiffies() function in the slab code. The slab code has a few "run every second" timers for background work; these are obviously not timing critical as long as they happen roughly at the right frequency. Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 56af694c9e..2c655532f5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -946,7 +946,8 @@ static void __devinit start_cpu_timer(int cpu) if (keventd_up() && reap_work->work.func == NULL) { init_reap_node(cpu); INIT_DELAYED_WORK(reap_work, cache_reap); - schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); + schedule_delayed_work_on(cpu, reap_work, + __round_jiffies_relative(HZ, cpu)); } } @@ -4006,7 +4007,7 @@ static void cache_reap(struct work_struct *unused) if (!mutex_trylock(&cache_chain_mutex)) { /* Give up. Setup the next iteration. */ schedule_delayed_work(&__get_cpu_var(reap_work), - REAPTIMEOUT_CPUC); + round_jiffies_relative(REAPTIMEOUT_CPUC)); return; } @@ -4052,7 +4053,8 @@ next: next_reap_node(); refresh_cpu_vm_stats(smp_processor_id()); /* Set up the next iteration */ - schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); + schedule_delayed_work(&__get_cpu_var(reap_work), + round_jiffies_relative(REAPTIMEOUT_CPUC)); } #ifdef CONFIG_PROC_FS -- cgit v1.2.2