aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2007-07-19 04:48:12 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:44 -0400
commit5fb7dc37dc16fbc8b80d81318a582201ef7e280d (patch)
tree4d6bb4441ece64380e7186ebadd35ad2f5486f9f /include
parent3d7e33825d8799115dd2495c9944badd3272a623 (diff)
define new percpu interface for shared data
per cpu data section contains two types of data. One set which is exclusively accessed by the local cpu and the other set which is per cpu, but also shared by remote cpus. In the current kernel, these two sets are not clearely separated out. This can potentially cause the same data cacheline shared between the two sets of data, which will result in unnecessary bouncing of the cacheline between cpus. One way to fix the problem is to cacheline align the remotely accessed per cpu data, both at the beginning and at the end. Because of the padding at both ends, this will likely cause some memory wastage and also the interface to achieve this is not clean. This patch: Moves the remotely accessed per cpu data (which is currently marked as ____cacheline_aligned_in_smp) into a different section, where all the data elements are cacheline aligned. And as such, this differentiates the local only data and remotely accessed data cleanly. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: <linux-arch@vger.kernel.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/asm-i386/percpu.h5
-rw-r--r--include/asm-ia64/percpu.h10
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-s390/percpu.h7
-rw-r--r--include/asm-sparc64/percpu.h7
-rw-r--r--include/asm-x86_64/percpu.h7
8 files changed, 59 insertions, 0 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d984a9041436..d85172e9ed45 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
14#define DEFINE_PER_CPU(type, name) \ 14#define DEFINE_PER_CPU(type, name) \
15 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 15 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
16 16
17#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
18 __attribute__((__section__(".data.percpu.shared_aligned"))) \
19 __typeof__(type) per_cpu__##name \
20 ____cacheline_aligned_in_smp
21
17/* var is in discarded region: offset to particular copy we want */ 22/* var is in discarded region: offset to particular copy we want */
18#define per_cpu(var, cpu) (*({ \ 23#define per_cpu(var, cpu) (*({ \
19 extern int simple_identifier_##var(void); \ 24 extern int simple_identifier_##var(void); \
@@ -34,6 +39,9 @@ do { \
34#define DEFINE_PER_CPU(type, name) \ 39#define DEFINE_PER_CPU(type, name) \
35 __typeof__(type) per_cpu__##name 40 __typeof__(type) per_cpu__##name
36 41
42#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
43 DEFINE_PER_CPU(type, name)
44
37#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 45#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
38#define __get_cpu_var(var) per_cpu__##var 46#define __get_cpu_var(var) per_cpu__##var
39#define __raw_get_cpu_var(var) per_cpu__##var 47#define __raw_get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 84155eb67f1d..a2b09ed852ad 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -245,3 +245,11 @@
245 *(.initcall7.init) \ 245 *(.initcall7.init) \
246 *(.initcall7s.init) 246 *(.initcall7s.init)
247 247
248#define PERCPU(align) \
249 . = ALIGN(align); \
250 __per_cpu_start = .; \
251 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
252 *(.data.percpu) \
253 *(.data.percpu.shared_aligned) \
254 } \
255 __per_cpu_end = .;
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b5d5ac..a7ebd436f3cc 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[];
54#define DEFINE_PER_CPU(type, name) \ 54#define DEFINE_PER_CPU(type, name) \
55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56 56
57#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
58 __attribute__((__section__(".data.percpu.shared_aligned"))) \
59 __typeof__(type) per_cpu__##name \
60 ____cacheline_aligned_in_smp
61
57/* We can use this directly for local CPU (faster). */ 62/* We can use this directly for local CPU (faster). */
58DECLARE_PER_CPU(unsigned long, this_cpu_off); 63DECLARE_PER_CPU(unsigned long, this_cpu_off);
59 64
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index fbe5cf3ab8dc..43a7aac414e0 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -29,6 +29,16 @@
29 __attribute__((__section__(".data.percpu"))) \ 29 __attribute__((__section__(".data.percpu"))) \
30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name 30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
31 31
32#ifdef CONFIG_SMP
33#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
34 __attribute__((__section__(".data.percpu.shared_aligned"))) \
35 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
36 ____cacheline_aligned_in_smp
37#else
38#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
39 DEFINE_PER_CPU(type, name)
40#endif
41
32/* 42/*
33 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an 43 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
34 * external routine, to avoid include-hell. 44 * external routine, to avoid include-hell.
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 2f2e3024fa61..73dc8ba4010d 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -20,6 +20,11 @@
20#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22 22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_aligned_in_smp
27
23/* var is in discarded region: offset to particular copy we want */ 28/* var is in discarded region: offset to particular copy we want */
24#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 29#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
25#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) 30#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
@@ -40,6 +45,8 @@ extern void setup_per_cpu_areas(void);
40 45
41#define DEFINE_PER_CPU(type, name) \ 46#define DEFINE_PER_CPU(type, name) \
42 __typeof__(type) per_cpu__##name 47 __typeof__(type) per_cpu__##name
48#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
49 DEFINE_PER_CPU(type, name)
43 50
44#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 51#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
45#define __get_cpu_var(var) per_cpu__##var 52#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 9ea7f1023e57..545857e64443 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -41,6 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
41 __attribute__((__section__(".data.percpu"))) \ 41 __attribute__((__section__(".data.percpu"))) \
42 __typeof__(type) per_cpu__##name 42 __typeof__(type) per_cpu__##name
43 43
44#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
45 __attribute__((__section__(".data.percpu.shared_aligned"))) \
46 __typeof__(type) per_cpu__##name \
47 ____cacheline_aligned_in_smp
48
44#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 49#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
45#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 50#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
46#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) 51#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
@@ -59,6 +64,8 @@ do { \
59 64
60#define DEFINE_PER_CPU(type, name) \ 65#define DEFINE_PER_CPU(type, name) \
61 __typeof__(type) per_cpu__##name 66 __typeof__(type) per_cpu__##name
67#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
68 DEFINE_PER_CPU(type, name)
62 69
63#define __get_cpu_var(var) __reloc_hide(var,0) 70#define __get_cpu_var(var) __reloc_hide(var,0)
64#define __raw_get_cpu_var(var) __reloc_hide(var,0) 71#define __raw_get_cpu_var(var) __reloc_hide(var,0)
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index 88db872ce2f8..caf8750792ff 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -18,6 +18,11 @@ extern unsigned long __per_cpu_shift;
18#define DEFINE_PER_CPU(type, name) \ 18#define DEFINE_PER_CPU(type, name) \
19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
20 20
21#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
22 __attribute__((__section__(".data.percpu.shared_aligned"))) \
23 __typeof__(type) per_cpu__##name \
24 ____cacheline_aligned_in_smp
25
21register unsigned long __local_per_cpu_offset asm("g5"); 26register unsigned long __local_per_cpu_offset asm("g5");
22 27
23/* var is in discarded region: offset to particular copy we want */ 28/* var is in discarded region: offset to particular copy we want */
@@ -38,6 +43,8 @@ do { \
38#define real_setup_per_cpu_areas() do { } while (0) 43#define real_setup_per_cpu_areas() do { } while (0)
39#define DEFINE_PER_CPU(type, name) \ 44#define DEFINE_PER_CPU(type, name) \
40 __typeof__(type) per_cpu__##name 45 __typeof__(type) per_cpu__##name
46#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
47 DEFINE_PER_CPU(type, name)
41 48
42#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 49#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
43#define __get_cpu_var(var) per_cpu__##var 50#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index c6fbb67eac90..5abd48270101 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -20,6 +20,11 @@
20#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22 22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_internodealigned_in_smp
27
23/* var is in discarded region: offset to particular copy we want */ 28/* var is in discarded region: offset to particular copy we want */
24#define per_cpu(var, cpu) (*({ \ 29#define per_cpu(var, cpu) (*({ \
25 extern int simple_identifier_##var(void); \ 30 extern int simple_identifier_##var(void); \
@@ -46,6 +51,8 @@ extern void setup_per_cpu_areas(void);
46 51
47#define DEFINE_PER_CPU(type, name) \ 52#define DEFINE_PER_CPU(type, name) \
48 __typeof__(type) per_cpu__##name 53 __typeof__(type) per_cpu__##name
54#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
55 DEFINE_PER_CPU(type, name)
49 56
50#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 57#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
51#define __get_cpu_var(var) per_cpu__##var 58#define __get_cpu_var(var) per_cpu__##var