aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--mm/slub.c6
4 files changed, 16 insertions, 8 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 2505740b81d2..a2c5c077c32d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -185,4 +185,12 @@ config HAVE_RCU_TABLE_FREE
185config ARCH_HAVE_NMI_SAFE_CMPXCHG 185config ARCH_HAVE_NMI_SAFE_CMPXCHG
186 bool 186 bool
187 187
188config HAVE_ALIGNED_STRUCT_PAGE
189 bool
190 help
191 This makes sure that struct pages are double word aligned and that
192 e.g. the SLUB allocator can perform double word atomic operations
193 on a struct page for better performance. However selecting this
194 might increase the size of a struct page by a word.
195
188source "kernel/gcov/Kconfig" 196source "kernel/gcov/Kconfig"
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a150f4c35e94..5201a2c27239 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -60,6 +60,7 @@ config X86
60 select PERF_EVENTS 60 select PERF_EVENTS
61 select HAVE_PERF_EVENTS_NMI 61 select HAVE_PERF_EVENTS_NMI
62 select ANON_INODES 62 select ANON_INODES
63 select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386
63 select HAVE_ARCH_KMEMCHECK 64 select HAVE_ARCH_KMEMCHECK
64 select HAVE_USER_RETURN_NOTIFIER 65 select HAVE_USER_RETURN_NOTIFIER
65 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 66 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5b42f1b34eb7..3cc3062b3767 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -151,12 +151,11 @@ struct page {
151#endif 151#endif
152} 152}
153/* 153/*
154 * If another subsystem starts using the double word pairing for atomic 154 * The struct page can be forced to be double word aligned so that atomic ops
155 * operations on struct page then it must change the #if to ensure 155 * on double words work. The SLUB allocator can make use of such a feature.
156 * proper alignment of the page struct.
157 */ 156 */
158#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) 157#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
159 __attribute__((__aligned__(2*sizeof(unsigned long)))) 158 __aligned(2 * sizeof(unsigned long))
160#endif 159#endif
161; 160;
162 161
diff --git a/mm/slub.c b/mm/slub.c
index 5d37b5e44140..72aa84134609 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -366,7 +366,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
366 const char *n) 366 const char *n)
367{ 367{
368 VM_BUG_ON(!irqs_disabled()); 368 VM_BUG_ON(!irqs_disabled());
369#ifdef CONFIG_CMPXCHG_DOUBLE 369#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
370 if (s->flags & __CMPXCHG_DOUBLE) { 370 if (s->flags & __CMPXCHG_DOUBLE) {
371 if (cmpxchg_double(&page->freelist, &page->counters, 371 if (cmpxchg_double(&page->freelist, &page->counters,
372 freelist_old, counters_old, 372 freelist_old, counters_old,
@@ -400,7 +400,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
400 void *freelist_new, unsigned long counters_new, 400 void *freelist_new, unsigned long counters_new,
401 const char *n) 401 const char *n)
402{ 402{
403#ifdef CONFIG_CMPXCHG_DOUBLE 403#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
404 if (s->flags & __CMPXCHG_DOUBLE) { 404 if (s->flags & __CMPXCHG_DOUBLE) {
405 if (cmpxchg_double(&page->freelist, &page->counters, 405 if (cmpxchg_double(&page->freelist, &page->counters,
406 freelist_old, counters_old, 406 freelist_old, counters_old,
@@ -3014,7 +3014,7 @@ static int kmem_cache_open(struct kmem_cache *s,
3014 } 3014 }
3015 } 3015 }
3016 3016
3017#ifdef CONFIG_CMPXCHG_DOUBLE 3017#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3018 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) 3018 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3019 /* Enable fast mode */ 3019 /* Enable fast mode */
3020 s->flags |= __CMPXCHG_DOUBLE; 3020 s->flags |= __CMPXCHG_DOUBLE;