aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Abbott <tabbott@ksplice.com>2010-02-19 19:03:34 -0500
committerMichal Marek <mmarek@suse.cz>2010-03-03 05:25:58 -0500
commit4af57b787b4be09419a2bb48aa705fa87ef41cca (patch)
treeaa0d843953181e8e75f6937da6aaf3072dc63970
parentbc75cc6b5636eed5f6a481cba808e906f71cfd94 (diff)
Rename .data.cacheline_aligned to .data..cacheline_aligned.
Signed-off-by: Tim Abbott <tabbott@ksplice.com> Cc: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Michal Marek <mmarek@suse.cz>
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/kernel/init_task.c2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/cache.h2
4 files changed, 4 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index dcd01c82e701..3229c0622161 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -231,7 +231,7 @@ SECTIONS
231 PAGE_ALIGNED_DATA(PAGE_SIZE) 231 PAGE_ALIGNED_DATA(PAGE_SIZE)
232 } 232 }
233 233
234 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { 234 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
235 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 235 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
236 } 236 }
237 237
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 3a54dcb9cd0e..43e9ccf44947 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
34/* 34/*
35 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 35 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
36 * no more per-task TSS's. The TSS size is kept cacheline-aligned 36 * no more per-task TSS's. The TSS size is kept cacheline-aligned
37 * so they are allowed to end up in the .data.cacheline_aligned 37 * so they are allowed to end up in the .data..cacheline_aligned
38 * section. Since TSS's are completely CPU-local, we want them 38 * section. Since TSS's are completely CPU-local, we want them
39 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 39 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
40 */ 40 */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e652068e0e..78450aaab9ef 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -189,7 +189,7 @@
189 189
190#define CACHELINE_ALIGNED_DATA(align) \ 190#define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \ 191 . = ALIGN(align); \
192 *(.data.cacheline_aligned) 192 *(.data..cacheline_aligned)
193 193
194#define INIT_TASK_DATA(align) \ 194#define INIT_TASK_DATA(align) \
195 . = ALIGN(align); \ 195 . = ALIGN(align); \
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 97e24881c4c6..4c570653ab84 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,7 +31,7 @@
31#ifndef __cacheline_aligned 31#ifndef __cacheline_aligned
32#define __cacheline_aligned \ 32#define __cacheline_aligned \
33 __attribute__((__aligned__(SMP_CACHE_BYTES), \ 33 __attribute__((__aligned__(SMP_CACHE_BYTES), \
34 __section__(".data.cacheline_aligned"))) 34 __section__(".data..cacheline_aligned")))
35#endif /* __cacheline_aligned */ 35#endif /* __cacheline_aligned */
36 36
37#ifndef __cacheline_aligned_in_smp 37#ifndef __cacheline_aligned_in_smp