diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:50:17 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:28 -0400 |
commit | 8e9830a5bdb081fd3f4387db3a3838a687dfdad2 (patch) | |
tree | aff1662d222aa6d76b973a471b194fd5d93f7952 /litmus/sched_litmus.c | |
parent | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (diff) |
Update sched_class and spinlock_t in litmus.c
- get_rr_interval() changed signature
- load_balance() and move_one_tak() are no longer needed
- spinlock_t -> raw_spinlock_t
This commit does not compile.
Diffstat (limited to 'litmus/sched_litmus.c')
-rw-r--r-- | litmus/sched_litmus.c | 37 |
1 files changed, 10 insertions, 27 deletions
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index c1fc7748e590..0cdf284eb9c2 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -60,7 +60,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
60 | */ | 60 | */ |
61 | was_running = is_running(prev); | 61 | was_running = is_running(prev); |
62 | mb(); | 62 | mb(); |
63 | spin_unlock(&rq->lock); | 63 | raw_spin_unlock(&rq->lock); |
64 | 64 | ||
65 | /* Don't race with a concurrent switch. This could deadlock in | 65 | /* Don't race with a concurrent switch. This could deadlock in |
66 | * the case of cross or circular migrations. It's the job of | 66 | * the case of cross or circular migrations. It's the job of |
@@ -91,7 +91,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
91 | next = NULL; | 91 | next = NULL; |
92 | 92 | ||
93 | /* bail out */ | 93 | /* bail out */ |
94 | spin_lock(&rq->lock); | 94 | raw_spin_lock(&rq->lock); |
95 | return next; | 95 | return next; |
96 | } | 96 | } |
97 | } | 97 | } |
@@ -139,7 +139,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
139 | next = NULL; | 139 | next = NULL; |
140 | } | 140 | } |
141 | /* release the other CPU's runqueue, but keep ours */ | 141 | /* release the other CPU's runqueue, but keep ours */ |
142 | spin_unlock(&other_rq->lock); | 142 | raw_spin_unlock(&other_rq->lock); |
143 | } | 143 | } |
144 | if (next) { | 144 | if (next) { |
145 | next->rt_param.stack_in_use = rq->cpu; | 145 | next->rt_param.stack_in_use = rq->cpu; |
@@ -150,7 +150,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
150 | } | 150 | } |
151 | 151 | ||
152 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | 152 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, |
153 | int wakeup) | 153 | int wakeup, bool head) |
154 | { | 154 | { |
155 | if (wakeup) { | 155 | if (wakeup) { |
156 | sched_trace_task_resume(p); | 156 | sched_trace_task_resume(p); |
@@ -243,7 +243,7 @@ static void prio_changed_litmus(struct rq *rq, struct task_struct *p, | |||
243 | { | 243 | { |
244 | } | 244 | } |
245 | 245 | ||
246 | unsigned int get_rr_interval_litmus(struct task_struct *p) | 246 | unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p) |
247 | { | 247 | { |
248 | /* return infinity */ | 248 | /* return infinity */ |
249 | return 0; | 249 | return 0; |
@@ -261,31 +261,16 @@ static void set_curr_task_litmus(struct rq *rq) | |||
261 | 261 | ||
262 | 262 | ||
263 | #ifdef CONFIG_SMP | 263 | #ifdef CONFIG_SMP |
264 | /* execve tries to rebalance task in this scheduling domain */ | 264 | /* execve tries to rebalance task in this scheduling domain. |
265 | * We don't care about the scheduling domain; can gets called from | ||
266 | * exec, fork, wakeup. | ||
267 | */ | ||
265 | static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) | 268 | static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) |
266 | { | 269 | { |
267 | /* preemption is already disabled. | 270 | /* preemption is already disabled. |
268 | * We don't want to change cpu here | 271 | * We don't want to change cpu here |
269 | */ | 272 | */ |
270 | return smp_processor_id(); | 273 | return task_cpu(p); |
271 | } | ||
272 | |||
273 | /* we don't repartition at runtime */ | ||
274 | |||
275 | static unsigned long | ||
276 | load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
277 | unsigned long max_load_move, | ||
278 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
279 | int *all_pinned, int *this_best_prio) | ||
280 | { | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static int | ||
285 | move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
286 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
287 | { | ||
288 | return 0; | ||
289 | } | 274 | } |
290 | #endif | 275 | #endif |
291 | 276 | ||
@@ -303,8 +288,6 @@ const struct sched_class litmus_sched_class = { | |||
303 | #ifdef CONFIG_SMP | 288 | #ifdef CONFIG_SMP |
304 | .select_task_rq = select_task_rq_litmus, | 289 | .select_task_rq = select_task_rq_litmus, |
305 | 290 | ||
306 | .load_balance = load_balance_litmus, | ||
307 | .move_one_task = move_one_task_litmus, | ||
308 | .pre_schedule = pre_schedule_litmus, | 291 | .pre_schedule = pre_schedule_litmus, |
309 | #endif | 292 | #endif |
310 | 293 | ||