From 19df0c2fef010e94e90df514aaf4e73f6b80145c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jan 2011 14:26:50 +0100 Subject: percpu: align percpu readmostly subsection to cacheline Currently percpu readmostly subsection may share cachelines with other percpu subsections which may result in unnecessary cacheline bounce and performance degradation. This patch adds @cacheline parameter to PERCPU() and PERCPU_VADDR() linker macros, makes each arch linker scripts specify its cacheline size and use it to align percpu subsections. This is based on Shaohua's x86 only patch. Signed-off-by: Tejun Heo Cc: Shaohua Li --- arch/alpha/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/alpha') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 003ef4c02585..173518f8c8bb 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -38,7 +38,7 @@ SECTIONS __init_begin = ALIGN(PAGE_SIZE); INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - PERCPU(PAGE_SIZE) + PERCPU(64, PAGE_SIZE) /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page needed for the THREAD_SIZE aligned init_task gets freed after init */ . = ALIGN(THREAD_SIZE); -- cgit v1.2.2 From 0f06c063628689de5eab5dfb3ce9c797a09db900 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Jan 2011 14:27:38 +0100 Subject: alpha: use L1_CACHE_BYTES for cacheline size in the linker script Currently the linker script uses 64 for cacheline size which isn't optimal for all cases. Include asm/cache.h and use L1_CACHE_BYTES instead as suggested by Sam Ravnborg. Signed-off-by: Tejun Heo Cc: Sam Ravnborg --- arch/alpha/kernel/vmlinux.lds.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/alpha') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 173518f8c8bb..433be2a24f31 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -1,5 +1,6 @@ #include #include +#include #include OUTPUT_FORMAT("elf64-alpha") @@ -38,7 +39,7 @@ SECTIONS __init_begin = ALIGN(PAGE_SIZE); INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - PERCPU(64, PAGE_SIZE) + PERCPU(L1_CACHE_BYTES, PAGE_SIZE) /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page needed for the THREAD_SIZE aligned init_task gets freed after init */ . = ALIGN(THREAD_SIZE); @@ -46,7 +47,7 @@ SECTIONS /* Freed after init ends here */ _data = .; - RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .got : { *(.got) -- cgit v1.2.2