aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-01-12 20:17:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:03 -0500
commit43570fd2f47ba518145e9289f54cde3dba4c8b25 (patch)
tree67aa61c8b972f4b3af66cd89082222e77e06a4cb /mm/slub.c
parent0d259cf8190b9c446eefd5225ffcc3941e76a432 (diff)
mm,slub,x86: decouple size of struct page from CONFIG_CMPXCHG_LOCAL
While implementing cmpxchg_double() on s390 I realized that we don't set CONFIG_CMPXCHG_LOCAL despite the fact that we have support for it. However setting that option will increase the size of struct page by eight bytes on 64 bit, which we certainly do not want. Also, it doesn't make sense that a present cpu feature should increase the size of struct page. Besides that it looks like the dependency to CMPXCHG_LOCAL is wrong and that it should depend on CMPXCHG_DOUBLE instead. This patch: If an architecture supports CMPXCHG_LOCAL this shouldn't result automatically in larger struct pages if the SLUB allocator is used. Instead introduce a new config option "HAVE_ALIGNED_STRUCT_PAGE" which can be selected if a double word aligned struct page is required. Also update x86 Kconfig so that it should work as before. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5d37b5e44140..72aa84134609 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -366,7 +366,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
366 const char *n) 366 const char *n)
367{ 367{
368 VM_BUG_ON(!irqs_disabled()); 368 VM_BUG_ON(!irqs_disabled());
369#ifdef CONFIG_CMPXCHG_DOUBLE 369#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
370 if (s->flags & __CMPXCHG_DOUBLE) { 370 if (s->flags & __CMPXCHG_DOUBLE) {
371 if (cmpxchg_double(&page->freelist, &page->counters, 371 if (cmpxchg_double(&page->freelist, &page->counters,
372 freelist_old, counters_old, 372 freelist_old, counters_old,
@@ -400,7 +400,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
400 void *freelist_new, unsigned long counters_new, 400 void *freelist_new, unsigned long counters_new,
401 const char *n) 401 const char *n)
402{ 402{
403#ifdef CONFIG_CMPXCHG_DOUBLE 403#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
404 if (s->flags & __CMPXCHG_DOUBLE) { 404 if (s->flags & __CMPXCHG_DOUBLE) {
405 if (cmpxchg_double(&page->freelist, &page->counters, 405 if (cmpxchg_double(&page->freelist, &page->counters,
406 freelist_old, counters_old, 406 freelist_old, counters_old,
@@ -3014,7 +3014,7 @@ static int kmem_cache_open(struct kmem_cache *s,
3014 } 3014 }
3015 } 3015 }
3016 3016
3017#ifdef CONFIG_CMPXCHG_DOUBLE 3017#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3018 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) 3018 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3019 /* Enable fast mode */ 3019 /* Enable fast mode */
3020 s->flags |= __CMPXCHG_DOUBLE; 3020 s->flags |= __CMPXCHG_DOUBLE;