aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2007-07-19 04:48:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:45 -0400
commitf34e3b61f2be9628bd41244f3ecc42009c5eced5 (patch)
tree192a64c12f84b3d69b9bf12ba56c2c7d86bc269b
parent5fb7dc37dc16fbc8b80d81318a582201ef7e280d (diff)
use the new percpu interface for shared data
Currently most of the per cpu data, which is accessed by different cpus, has a ____cacheline_aligned_in_smp attribute. Move all this data to the new per cpu shared data section: .data.percpu.shared_aligned. This will seperate the percpu data which is referenced frequently by other cpus from the local only percpu data. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/i386/kernel/init_task.c2
-rw-r--r--arch/i386/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/x86_64/kernel/init_task.c2
-rw-r--r--kernel/sched.c2
5 files changed, 5 insertions, 5 deletions
diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c
index cff95d10a4d8..d26fc063a760 100644
--- a/arch/i386/kernel/init_task.c
+++ b/arch/i386/kernel/init_task.c
@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
42 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 42 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
43 * no more per-task TSS's. 43 * no more per-task TSS's.
44 */ 44 */
45DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; 45DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
46 46
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index d2daf672f4a2..ba44d40b066d 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -21,7 +21,7 @@
21#include <asm/apic.h> 21#include <asm/apic.h>
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23 23
24DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; 24DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25EXPORT_PER_CPU_SYMBOL(irq_stat); 25EXPORT_PER_CPU_SYMBOL(irq_stat);
26 26
27DEFINE_PER_CPU(struct pt_regs *, irq_regs); 27DEFINE_PER_CPU(struct pt_regs *, irq_regs);
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index b3a47f986e1e..9f72838db26e 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data;
82#define IPI_KDUMP_CPU_STOP 3 82#define IPI_KDUMP_CPU_STOP 3
83 83
84/* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 84/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
85static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; 85static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
86 86
87extern void cpu_halt (void); 87extern void cpu_halt (void);
88 88
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
index 3dc5854ba21e..4ff33d4f8551 100644
--- a/arch/x86_64/kernel/init_task.c
+++ b/arch/x86_64/kernel/init_task.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task);
44 * section. Since TSS's are completely CPU-local, we want them 44 * section. Since TSS's are completely CPU-local, we want them
45 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 45 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
46 */ 46 */
47DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; 47DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
48 48
49/* Copies of the original ist values from the tss are only accessed during 49/* Copies of the original ist values from the tss are only accessed during
50 * debugging, no special alignment required. 50 * debugging, no special alignment required.
diff --git a/kernel/sched.c b/kernel/sched.c
index cb31fb4a1379..645256b228c3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -301,7 +301,7 @@ struct rq {
301 struct lock_class_key rq_lock_key; 301 struct lock_class_key rq_lock_key;
302}; 302};
303 303
304static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; 304static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
305static DEFINE_MUTEX(sched_hotcpu_mutex); 305static DEFINE_MUTEX(sched_hotcpu_mutex);
306 306
307static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) 307static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)