aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_fair.c31
2 files changed, 10 insertions, 23 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f6a81061fd50..3209e2cc2c2e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -189,11 +189,11 @@ struct cfs_rq {
189 struct rb_root tasks_timeline; 189 struct rb_root tasks_timeline;
190 struct rb_node *rb_leftmost; 190 struct rb_node *rb_leftmost;
191 struct rb_node *rb_load_balance_curr; 191 struct rb_node *rb_load_balance_curr;
192#ifdef CONFIG_FAIR_GROUP_SCHED
193 /* 'curr' points to currently running entity on this cfs_rq. 192 /* 'curr' points to currently running entity on this cfs_rq.
194 * It is set to NULL otherwise (i.e when none are currently running). 193 * It is set to NULL otherwise (i.e when none are currently running).
195 */ 194 */
196 struct sched_entity *curr; 195 struct sched_entity *curr;
196#ifdef CONFIG_FAIR_GROUP_SCHED
197 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 197 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
198 198
199 /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 199 /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 105d57b41aa2..335faf06a561 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -111,51 +111,38 @@ extern struct sched_class fair_sched_class;
111 * CFS operations on generic schedulable entities: 111 * CFS operations on generic schedulable entities:
112 */ 112 */
113 113
114#ifdef CONFIG_FAIR_GROUP_SCHED
115
116/* cpu runqueue to which this cfs_rq is attached */
117static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
118{
119 return cfs_rq->rq;
120}
121
122/* currently running entity (if any) on this cfs_rq */ 114/* currently running entity (if any) on this cfs_rq */
123static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) 115static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
124{ 116{
125 return cfs_rq->curr; 117 return cfs_rq->curr;
126} 118}
127 119
128/* An entity is a task if it doesn't "own" a runqueue */
129#define entity_is_task(se) (!se->my_q)
130
131static inline void 120static inline void
132set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) 121set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
133{ 122{
134 cfs_rq->curr = se; 123 cfs_rq->curr = se;
135} 124}
136 125
137#else /* CONFIG_FAIR_GROUP_SCHED */ 126#ifdef CONFIG_FAIR_GROUP_SCHED
138 127
128/* cpu runqueue to which this cfs_rq is attached */
139static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 129static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
140{ 130{
141 return container_of(cfs_rq, struct rq, cfs); 131 return cfs_rq->rq;
142} 132}
143 133
144static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq) 134/* An entity is a task if it doesn't "own" a runqueue */
145{ 135#define entity_is_task(se) (!se->my_q)
146 struct rq *rq = rq_of(cfs_rq);
147 136
148 if (unlikely(rq->curr->sched_class != &fair_sched_class)) 137#else /* CONFIG_FAIR_GROUP_SCHED */
149 return NULL;
150 138
151 return &rq->curr->se; 139static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
140{
141 return container_of(cfs_rq, struct rq, cfs);
152} 142}
153 143
154#define entity_is_task(se) 1 144#define entity_is_task(se) 1
155 145
156static inline void
157set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
158
159#endif /* CONFIG_FAIR_GROUP_SCHED */ 146#endif /* CONFIG_FAIR_GROUP_SCHED */
160 147
161static inline struct task_struct *task_of(struct sched_entity *se) 148static inline struct task_struct *task_of(struct sched_entity *se)