aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 13:45:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:45:00 -0400
commitb758149c02638146a835f42097dd1950a6cae638 (patch)
treea17be39d414eaa6ba6de612c049765f3bbf30efb /kernel/sched_fair.c
parentb40b2e8eb52192a8a22d707ed37925792b7bdfd1 (diff)
sched: prepatory code movement
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c190
1 files changed, 92 insertions, 98 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3dde0f0ec93a..de4250c53a19 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -77,6 +77,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
77 * CFS operations on generic schedulable entities: 77 * CFS operations on generic schedulable entities:
78 */ 78 */
79 79
80static inline struct task_struct *task_of(struct sched_entity *se)
81{
82 return container_of(se, struct task_struct, se);
83}
84
80#ifdef CONFIG_FAIR_GROUP_SCHED 85#ifdef CONFIG_FAIR_GROUP_SCHED
81 86
82/* cpu runqueue to which this cfs_rq is attached */ 87/* cpu runqueue to which this cfs_rq is attached */
@@ -88,6 +93,54 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
88/* An entity is a task if it doesn't "own" a runqueue */ 93/* An entity is a task if it doesn't "own" a runqueue */
89#define entity_is_task(se) (!se->my_q) 94#define entity_is_task(se) (!se->my_q)
90 95
96/* Walk up scheduling entities hierarchy */
97#define for_each_sched_entity(se) \
98 for (; se; se = se->parent)
99
100static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
101{
102 return p->se.cfs_rq;
103}
104
105/* runqueue on which this entity is (to be) queued */
106static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
107{
108 return se->cfs_rq;
109}
110
111/* runqueue "owned" by this group */
112static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
113{
114 return grp->my_q;
115}
116
117/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
118 * another cpu ('this_cpu')
119 */
120static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
121{
122 return cfs_rq->tg->cfs_rq[this_cpu];
123}
124
125/* Iterate thr' all leaf cfs_rq's on a runqueue */
126#define for_each_leaf_cfs_rq(rq, cfs_rq) \
127 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
128
129/* Do the two (enqueued) entities belong to the same group ? */
130static inline int
131is_same_group(struct sched_entity *se, struct sched_entity *pse)
132{
133 if (se->cfs_rq == pse->cfs_rq)
134 return 1;
135
136 return 0;
137}
138
139static inline struct sched_entity *parent_entity(struct sched_entity *se)
140{
141 return se->parent;
142}
143
91#else /* CONFIG_FAIR_GROUP_SCHED */ 144#else /* CONFIG_FAIR_GROUP_SCHED */
92 145
93static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 146static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -97,13 +150,49 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
97 150
98#define entity_is_task(se) 1 151#define entity_is_task(se) 1
99 152
100#endif /* CONFIG_FAIR_GROUP_SCHED */ 153#define for_each_sched_entity(se) \
154 for (; se; se = NULL)
101 155
102static inline struct task_struct *task_of(struct sched_entity *se) 156static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
103{ 157{
104 return container_of(se, struct task_struct, se); 158 return &task_rq(p)->cfs;
105} 159}
106 160
161static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
162{
163 struct task_struct *p = task_of(se);
164 struct rq *rq = task_rq(p);
165
166 return &rq->cfs;
167}
168
169/* runqueue "owned" by this group */
170static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
171{
172 return NULL;
173}
174
175static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
176{
177 return &cpu_rq(this_cpu)->cfs;
178}
179
180#define for_each_leaf_cfs_rq(rq, cfs_rq) \
181 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
182
183static inline int
184is_same_group(struct sched_entity *se, struct sched_entity *pse)
185{
186 return 1;
187}
188
189static inline struct sched_entity *parent_entity(struct sched_entity *se)
190{
191 return NULL;
192}
193
194#endif /* CONFIG_FAIR_GROUP_SCHED */
195
107 196
108/************************************************************** 197/**************************************************************
109 * Scheduling class tree data structure manipulation methods: 198 * Scheduling class tree data structure manipulation methods:
@@ -699,101 +788,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
699 * CFS operations on tasks: 788 * CFS operations on tasks:
700 */ 789 */
701 790
702#ifdef CONFIG_FAIR_GROUP_SCHED
703
704/* Walk up scheduling entities hierarchy */
705#define for_each_sched_entity(se) \
706 for (; se; se = se->parent)
707
708static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
709{
710 return p->se.cfs_rq;
711}
712
713/* runqueue on which this entity is (to be) queued */
714static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
715{
716 return se->cfs_rq;
717}
718
719/* runqueue "owned" by this group */
720static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
721{
722 return grp->my_q;
723}
724
725/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
726 * another cpu ('this_cpu')
727 */
728static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
729{
730 return cfs_rq->tg->cfs_rq[this_cpu];
731}
732
733/* Iterate thr' all leaf cfs_rq's on a runqueue */
734#define for_each_leaf_cfs_rq(rq, cfs_rq) \
735 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
736
737/* Do the two (enqueued) entities belong to the same group ? */
738static inline int
739is_same_group(struct sched_entity *se, struct sched_entity *pse)
740{
741 if (se->cfs_rq == pse->cfs_rq)
742 return 1;
743
744 return 0;
745}
746
747static inline struct sched_entity *parent_entity(struct sched_entity *se)
748{
749 return se->parent;
750}
751
752#else /* CONFIG_FAIR_GROUP_SCHED */
753
754#define for_each_sched_entity(se) \
755 for (; se; se = NULL)
756
757static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
758{
759 return &task_rq(p)->cfs;
760}
761
762static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
763{
764 struct task_struct *p = task_of(se);
765 struct rq *rq = task_rq(p);
766
767 return &rq->cfs;
768}
769
770/* runqueue "owned" by this group */
771static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
772{
773 return NULL;
774}
775
776static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
777{
778 return &cpu_rq(this_cpu)->cfs;
779}
780
781#define for_each_leaf_cfs_rq(rq, cfs_rq) \
782 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
783
784static inline int
785is_same_group(struct sched_entity *se, struct sched_entity *pse)
786{
787 return 1;
788}
789
790static inline struct sched_entity *parent_entity(struct sched_entity *se)
791{
792 return NULL;
793}
794
795#endif /* CONFIG_FAIR_GROUP_SCHED */
796
797#ifdef CONFIG_SCHED_HRTICK 791#ifdef CONFIG_SCHED_HRTICK
798static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 792static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
799{ 793{