aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c5
3 files changed, 6 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 997c3b2f50c9..583644f6ae11 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -175,12 +175,12 @@
175# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 175# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
176 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 176 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177 SLAB_CACHE_DMA | \ 177 SLAB_CACHE_DMA | \
178 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 178 SLAB_STORE_USER | \
179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
181#else 181#else
182# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 182# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
183 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 183 SLAB_CACHE_DMA | \
184 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 184 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 185 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
186#endif 186#endif
diff --git a/mm/slob.c b/mm/slob.c
index 77786be032e0..c9401a7eaa5f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and 22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with 23 * destructors for every SLAB allocation. Objects are returned with
24 * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is 24 * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
25 * set, in which case the low-level allocator will fragment blocks to 25 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater 26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know 27 * are allocated by calling __get_free_pages. As SLAB objects know
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
295 c->ctor = ctor; 295 c->ctor = ctor;
296 c->dtor = dtor; 296 c->dtor = dtor;
297 /* ignore alignment unless it's forced */ 297 /* ignore alignment unless it's forced */
298 c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 298 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
299 if (c->align < align) 299 if (c->align < align)
300 c->align = align; 300 c->align = align;
301 } else if (flags & SLAB_PANIC) 301 } else if (flags & SLAB_PANIC)
diff --git a/mm/slub.c b/mm/slub.c
index 3904002bdb35..79940e98e5e6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags,
1496 * specified alignment though. If that is greater 1496 * specified alignment though. If that is greater
1497 * then use it. 1497 * then use it.
1498 */ 1498 */
1499 if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) && 1499 if ((flags & SLAB_HWCACHE_ALIGN) &&
1500 size > L1_CACHE_BYTES / 2) 1500 size > L1_CACHE_BYTES / 2)
1501 return max_t(unsigned long, align, L1_CACHE_BYTES); 1501 return max_t(unsigned long, align, L1_CACHE_BYTES);
1502 1502
@@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account);
3142 3142
3143static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3143static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3144{ 3144{
3145 return sprintf(buf, "%d\n", !!(s->flags & 3145 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3146 (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN)));
3147} 3146}
3148SLAB_ATTR_RO(hwcache_align); 3147SLAB_ATTR_RO(hwcache_align);
3149 3148