aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-24 02:13:47 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:47 -0400
commitb9bf3121af348d9255f1c917830fe8c2df52efcb (patch)
tree477f93b1000d7ac4bd283ee75d632b107eaf9600 /kernel/sched.c
parent204fba4aa303ea4a7bb726a539bf4a5b9e3203d0 (diff)
percpu: use DEFINE_PER_CPU_SHARED_ALIGNED()
There are a few places where ___cacheline_aligned* is used with DEFINE_PER_CPU(). Use DEFINE_PER_CPU_SHARED_ALIGNED() instead. DEFINE_PER_CPU_SHARED_ALIGNED() applies alignment only on SMPs. While all other converted places used _in_smp variant or only get compiled for SMP, net/rds used unconditional ____cacheline_aligned. I don't see any reason these data structures should be aligned on UP and thus converted together. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7c9098d186e6..34fd81d21784 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -318,12 +318,12 @@ struct task_group root_task_group;
318/* Default task group's sched entity on each cpu */ 318/* Default task group's sched entity on each cpu */
319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
320/* Default task group's cfs_rq on each cpu */ 320/* Default task group's cfs_rq on each cpu */
321static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 321static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_cfs_rq);
322#endif /* CONFIG_FAIR_GROUP_SCHED */ 322#endif /* CONFIG_FAIR_GROUP_SCHED */
323 323
324#ifdef CONFIG_RT_GROUP_SCHED 324#ifdef CONFIG_RT_GROUP_SCHED
325static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 325static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
326static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 326static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
327#endif /* CONFIG_RT_GROUP_SCHED */ 327#endif /* CONFIG_RT_GROUP_SCHED */
328#else /* !CONFIG_USER_SCHED */ 328#else /* !CONFIG_USER_SCHED */
329#define root_task_group init_task_group 329#define root_task_group init_task_group