aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_litmus.c')
-rw-r--r--litmus/sched_litmus.c37
1 files changed, 10 insertions, 27 deletions
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 889d300760f9..81ea464a81bc 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -62,7 +62,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
62 */ 62 */
63 was_running = is_running(prev); 63 was_running = is_running(prev);
64 mb(); 64 mb();
65 spin_unlock(&rq->lock); 65 raw_spin_unlock(&rq->lock);
66 66
67 /* Don't race with a concurrent switch. This could deadlock in 67 /* Don't race with a concurrent switch. This could deadlock in
68 * the case of cross or circular migrations. It's the job of 68 * the case of cross or circular migrations. It's the job of
@@ -93,7 +93,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
93 next = NULL; 93 next = NULL;
94 94
95 /* bail out */ 95 /* bail out */
96 spin_lock(&rq->lock); 96 raw_spin_lock(&rq->lock);
97 return next; 97 return next;
98 } 98 }
99 } 99 }
@@ -141,7 +141,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
141 next = NULL; 141 next = NULL;
142 } 142 }
143 /* release the other CPU's runqueue, but keep ours */ 143 /* release the other CPU's runqueue, but keep ours */
144 spin_unlock(&other_rq->lock); 144 raw_spin_unlock(&other_rq->lock);
145 } 145 }
146 if (next) { 146 if (next) {
147 next->rt_param.stack_in_use = rq->cpu; 147 next->rt_param.stack_in_use = rq->cpu;
@@ -152,7 +152,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
152} 152}
153 153
154static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, 154static void enqueue_task_litmus(struct rq *rq, struct task_struct *p,
155 int wakeup) 155 int wakeup, bool head)
156{ 156{
157 if (wakeup) { 157 if (wakeup) {
158 sched_trace_task_resume(p); 158 sched_trace_task_resume(p);
@@ -245,7 +245,7 @@ static void prio_changed_litmus(struct rq *rq, struct task_struct *p,
245{ 245{
246} 246}
247 247
248unsigned int get_rr_interval_litmus(struct task_struct *p) 248unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p)
249{ 249{
250 /* return infinity */ 250 /* return infinity */
251 return 0; 251 return 0;
@@ -263,31 +263,16 @@ static void set_curr_task_litmus(struct rq *rq)
263 263
264 264
265#ifdef CONFIG_SMP 265#ifdef CONFIG_SMP
266/* execve tries to rebalance task in this scheduling domain */ 266/* execve tries to rebalance task in this scheduling domain.
267 * We don't care about the scheduling domain; can gets called from
268 * exec, fork, wakeup.
269 */
267static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) 270static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags)
268{ 271{
269 /* preemption is already disabled. 272 /* preemption is already disabled.
270 * We don't want to change cpu here 273 * We don't want to change cpu here
271 */ 274 */
272 return smp_processor_id(); 275 return task_cpu(p);
273}
274
275/* we don't repartition at runtime */
276
277static unsigned long
278load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest,
279 unsigned long max_load_move,
280 struct sched_domain *sd, enum cpu_idle_type idle,
281 int *all_pinned, int *this_best_prio)
282{
283 return 0;
284}
285
286static int
287move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest,
288 struct sched_domain *sd, enum cpu_idle_type idle)
289{
290 return 0;
291} 276}
292#endif 277#endif
293 278
@@ -305,8 +290,6 @@ const struct sched_class litmus_sched_class = {
305#ifdef CONFIG_SMP 290#ifdef CONFIG_SMP
306 .select_task_rq = select_task_rq_litmus, 291 .select_task_rq = select_task_rq_litmus,
307 292
308 .load_balance = load_balance_litmus,
309 .move_one_task = move_one_task_litmus,
310 .pre_schedule = pre_schedule_litmus, 293 .pre_schedule = pre_schedule_litmus,
311#endif 294#endif
312 295