aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRavikiran G Thirumalai <kiran@scalex86.org>2006-01-11 16:46:15 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-11 22:05:01 -0500
commit5fd63b308569060ffa40af52ed122d9734111bff (patch)
tree746c2701ec42374b1ec389a6af0d438c9105a9d7
parent99f7b77d3c62045bb47dfc4228a2341ba71bff1d (diff)
[PATCH] x86_64: Inclusion of ScaleMP vSMP architecture patches - vsmp_align
vSMP specific alignment patch to 1. Define INTERNODE_CACHE_SHIFT for vSMP 2. Use this for alignment of critical structures 3. Use INTERNODE_CACHE_SHIFT for ARCH_MIN_TASKALIGN, and let the slab align task_struct allocations to the internode cacheline size 4. Introduce and use ARCH_MIN_MMSTRUCT_ALIGN for mm_struct slab allocations. Signed-off-by: Ravikiran Thirumalai <kiran@scalemp.com> Signed-off-by: Shai Fultheim <shai@scalemp.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-x86_64/cache.h12
-rw-r--r--include/asm-x86_64/processor.h6
-rw-r--r--kernel/fork.c6
3 files changed, 23 insertions, 1 deletions
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index b4a2401de77b..263f0a211ed7 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -10,4 +10,16 @@
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12 12
13#ifdef CONFIG_X86_VSMP
14
15/* vSMP Internode cacheline shift */
16#define INTERNODE_CACHE_SHIFT (12)
17#ifdef CONFIG_SMP
18#define __cacheline_aligned_in_smp \
19 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
20 __attribute__((__section__(".data.page_aligned")))
21#endif
22
23#endif
24
13#endif 25#endif
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 5cb151538cd5..394dd729752d 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -227,7 +227,13 @@ struct tss_struct {
227extern struct cpuinfo_x86 boot_cpu_data; 227extern struct cpuinfo_x86 boot_cpu_data;
228DECLARE_PER_CPU(struct tss_struct,init_tss); 228DECLARE_PER_CPU(struct tss_struct,init_tss);
229 229
230#ifdef CONFIG_X86_VSMP
231#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
232#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
233#else
230#define ARCH_MIN_TASKALIGN 16 234#define ARCH_MIN_TASKALIGN 16
235#define ARCH_MIN_MMSTRUCT_ALIGN 0
236#endif
231 237
232struct thread_struct { 238struct thread_struct {
233 unsigned long rsp0; 239 unsigned long rsp0;
diff --git a/kernel/fork.c b/kernel/fork.c
index 16a776ec2c0b..4ae8cfc1c89c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1298,6 +1298,10 @@ long do_fork(unsigned long clone_flags,
1298 return pid; 1298 return pid;
1299} 1299}
1300 1300
1301#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1302#define ARCH_MIN_MMSTRUCT_ALIGN 0
1303#endif
1304
1301void __init proc_caches_init(void) 1305void __init proc_caches_init(void)
1302{ 1306{
1303 sighand_cachep = kmem_cache_create("sighand_cache", 1307 sighand_cachep = kmem_cache_create("sighand_cache",
@@ -1316,6 +1320,6 @@ void __init proc_caches_init(void)
1316 sizeof(struct vm_area_struct), 0, 1320 sizeof(struct vm_area_struct), 0,
1317 SLAB_PANIC, NULL, NULL); 1321 SLAB_PANIC, NULL, NULL);
1318 mm_cachep = kmem_cache_create("mm_struct", 1322 mm_cachep = kmem_cache_create("mm_struct",
1319 sizeof(struct mm_struct), 0, 1323 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1320 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1324 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1321} 1325}