diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slob.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 5 |
3 files changed, 6 insertions, 7 deletions
@@ -175,12 +175,12 @@ | |||
175 | # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ | 175 | # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ |
176 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ | 176 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ |
177 | SLAB_CACHE_DMA | \ | 177 | SLAB_CACHE_DMA | \ |
178 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ | 178 | SLAB_STORE_USER | \ |
179 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 179 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
180 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 180 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) |
181 | #else | 181 | #else |
182 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 182 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
183 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ | 183 | SLAB_CACHE_DMA | \ |
184 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 184 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
185 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 185 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) |
186 | #endif | 186 | #endif |
@@ -21,7 +21,7 @@ | |||
21 | * | 21 | * |
22 | * SLAB is emulated on top of SLOB by simply calling constructors and | 22 | * SLAB is emulated on top of SLOB by simply calling constructors and |
23 | * destructors for every SLAB allocation. Objects are returned with | 23 | * destructors for every SLAB allocation. Objects are returned with |
24 | * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is | 24 | * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is |
25 | * set, in which case the low-level allocator will fragment blocks to | 25 | * set, in which case the low-level allocator will fragment blocks to |
26 | * create the proper alignment. Again, objects of page-size or greater | 26 | * create the proper alignment. Again, objects of page-size or greater |
27 | * are allocated by calling __get_free_pages. As SLAB objects know | 27 | * are allocated by calling __get_free_pages. As SLAB objects know |
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
295 | c->ctor = ctor; | 295 | c->ctor = ctor; |
296 | c->dtor = dtor; | 296 | c->dtor = dtor; |
297 | /* ignore alignment unless it's forced */ | 297 | /* ignore alignment unless it's forced */ |
298 | c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 298 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
299 | if (c->align < align) | 299 | if (c->align < align) |
300 | c->align = align; | 300 | c->align = align; |
301 | } else if (flags & SLAB_PANIC) | 301 | } else if (flags & SLAB_PANIC) |
@@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags, | |||
1496 | * specified alignment though. If that is greater | 1496 | * specified alignment though. If that is greater |
1497 | * then use it. | 1497 | * then use it. |
1498 | */ | 1498 | */ |
1499 | if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) && | 1499 | if ((flags & SLAB_HWCACHE_ALIGN) && |
1500 | size > L1_CACHE_BYTES / 2) | 1500 | size > L1_CACHE_BYTES / 2) |
1501 | return max_t(unsigned long, align, L1_CACHE_BYTES); | 1501 | return max_t(unsigned long, align, L1_CACHE_BYTES); |
1502 | 1502 | ||
@@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account); | |||
3142 | 3142 | ||
3143 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) | 3143 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) |
3144 | { | 3144 | { |
3145 | return sprintf(buf, "%d\n", !!(s->flags & | 3145 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); |
3146 | (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN))); | ||
3147 | } | 3146 | } |
3148 | SLAB_ATTR_RO(hwcache_align); | 3147 | SLAB_ATTR_RO(hwcache_align); |
3149 | 3148 | ||