aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c20
-rw-r--r--mm/slub.c10
4 files changed, 4 insertions, 32 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 59da5b734c80..75a32be64a21 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -162,8 +162,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
162static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 162static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
163 unsigned long flags) 163 unsigned long flags)
164{ 164{
165 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 165 if (flags & SLAB_CTOR_CONSTRUCTOR) {
166 SLAB_CTOR_CONSTRUCTOR) {
167 struct anon_vma *anon_vma = data; 166 struct anon_vma *anon_vma = data;
168 167
169 spin_lock_init(&anon_vma->lock); 168 spin_lock_init(&anon_vma->lock);
diff --git a/mm/shmem.c b/mm/shmem.c
index b2a35ebf071a..f01e8deed645 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2358,8 +2358,7 @@ static void init_once(void *foo, struct kmem_cache *cachep,
2358{ 2358{
2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2360 2360
2361 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 2361 if (flags & SLAB_CTOR_CONSTRUCTOR) {
2362 SLAB_CTOR_CONSTRUCTOR) {
2363 inode_init_once(&p->vfs_inode); 2362 inode_init_once(&p->vfs_inode);
2364#ifdef CONFIG_TMPFS_POSIX_ACL 2363#ifdef CONFIG_TMPFS_POSIX_ACL
2365 p->i_acl = NULL; 2364 p->i_acl = NULL;
diff --git a/mm/slab.c b/mm/slab.c
index 2a3cbd6e675d..a877d6f3d687 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,8 +116,7 @@
116#include <asm/page.h> 116#include <asm/page.h>
117 117
118/* 118/*
119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, 119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
120 * SLAB_RED_ZONE & SLAB_POISON.
121 * 0 for faster, smaller code (especially in the critical paths). 120 * 0 for faster, smaller code (especially in the critical paths).
122 * 121 *
123 * STATS - 1 to collect stats for /proc/slabinfo. 122 * STATS - 1 to collect stats for /proc/slabinfo.
@@ -172,7 +171,7 @@
172 171
173/* Legal flag mask for kmem_cache_create(). */ 172/* Legal flag mask for kmem_cache_create(). */
174#if DEBUG 173#if DEBUG
175# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 174# define CREATE_MASK (SLAB_RED_ZONE | \
176 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 175 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177 SLAB_CACHE_DMA | \ 176 SLAB_CACHE_DMA | \
178 SLAB_STORE_USER | \ 177 SLAB_STORE_USER | \
@@ -2184,12 +2183,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2184 2183
2185#if DEBUG 2184#if DEBUG
2186 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2185 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
2187 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
2188 /* No constructor, but inital state check requested */
2189 printk(KERN_ERR "%s: No con, but init state check "
2190 "requested - %s\n", __FUNCTION__, name);
2191 flags &= ~SLAB_DEBUG_INITIAL;
2192 }
2193#if FORCED_DEBUG 2186#if FORCED_DEBUG
2194 /* 2187 /*
2195 * Enable redzoning and last user accounting, except for caches with 2188 * Enable redzoning and last user accounting, except for caches with
@@ -2895,15 +2888,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2895 BUG_ON(objnr >= cachep->num); 2888 BUG_ON(objnr >= cachep->num);
2896 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2889 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2897 2890
2898 if (cachep->flags & SLAB_DEBUG_INITIAL) {
2899 /*
2900 * Need to call the slab's constructor so the caller can
2901 * perform a verify of its state (debugging). Called without
2902 * the cache-lock held.
2903 */
2904 cachep->ctor(objp + obj_offset(cachep),
2905 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
2906 }
2907 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2891 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2908 /* we want to cache poison the object, 2892 /* we want to cache poison the object,
2909 * call the destruction callback 2893 * call the destruction callback
diff --git a/mm/slub.c b/mm/slub.c
index 79940e98e5e6..bd86182e595e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -97,9 +97,6 @@
97 * 97 *
98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 98 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
99 * 99 *
100 * - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of
101 * it.
102 *
103 * - Variable sizing of the per node arrays 100 * - Variable sizing of the per node arrays
104 */ 101 */
105 102
@@ -126,11 +123,6 @@
126#endif 123#endif
127 124
128/* 125/*
129 * Flags from the regular SLAB that SLUB does not support:
130 */
131#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL)
132
133/*
134 * Mininum number of partial slabs. These will be left on the partial 126 * Mininum number of partial slabs. These will be left on the partial
135 * lists even if they are empty. kmem_cache_shrink may reclaim them. 127 * lists even if they are empty. kmem_cache_shrink may reclaim them.
136 */ 128 */
@@ -1748,8 +1740,6 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1748 s->flags = flags; 1740 s->flags = flags;
1749 s->align = align; 1741 s->align = align;
1750 1742
1751 BUG_ON(flags & SLUB_UNIMPLEMENTED);
1752
1753 /* 1743 /*
1754 * The page->offset field is only 16 bit wide. This is an offset 1744 * The page->offset field is only 16 bit wide. This is an offset
1755 * in units of words from the beginning of an object. If the slab 1745 * in units of words from the beginning of an object. If the slab