aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-06-02 10:19:41 -0400
committerPekka Enberg <penberg@kernel.org>2011-06-03 12:33:49 -0400
commitd4d84fef6d0366b585b7de13527a0faeca84d9ce (patch)
treec67449976f955cff4a2fe8a74affc2dd67c37b7e
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
slub: always align cpu_slab to honor cmpxchg_double requirement
On an architecture without CMPXCHG_LOCAL but with DEBUG_VM enabled, the VM_BUG_ON() in __pcpu_double_call_return_bool() will cause an early panic during boot unless we always align cpu_slab properly. In principle we could remove the alignment-testing VM_BUG_ON() for architectures that don't have CMPXCHG_LOCAL, but leaving it in means that new code will tend not to break x86 even if it is introduced on another platform, and it's low cost to require alignment. Acked-by: David Rientjes <rientjes@google.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/percpu.h3
-rw-r--r--mm/slub.c12
2 files changed, 7 insertions, 8 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 8b97308e65df..9ca008f0c542 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void);
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two 259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word 260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter. 261 * boundary and the second has to follow directly thereafter.
262 * We enforce this on all architectures even if they don't support
263 * a double cmpxchg instruction, since it's a cheap requirement, and it
264 * avoids breaking the requirement for architectures with the instruction.
262 */ 265 */
263#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ 266#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264({ \ 267({ \
diff --git a/mm/slub.c b/mm/slub.c
index 7be0223531b0..35f351f26193 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2322 2322
2323#ifdef CONFIG_CMPXCHG_LOCAL
2324 /* 2323 /*
2325 * Must align to double word boundary for the double cmpxchg instructions 2324 * Must align to double word boundary for the double cmpxchg
2326 * to work. 2325 * instructions to work; see __pcpu_double_call_return_bool().
2327 */ 2326 */
2328 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); 2327 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2329#else 2328 2 * sizeof(void *));
2330 /* Regular alignment is sufficient */
2331 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2332#endif
2333 2329
2334 if (!s->cpu_slab) 2330 if (!s->cpu_slab)
2335 return 0; 2331 return 0;