aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2014-09-17 03:33:32 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2014-11-03 21:59:45 -0500
commit33ad22dfbddcff613fd530f3721cd3e941f4614c (patch)
treeb1c172bfda9d36fa7bd2419724f411523fb4f29b
parentb1c6f8b1f57417ea05d83261e8a20623ca11b6d5 (diff)
P-RES: keep track in per-task state of whether it suspended
Checking state->scheduled is not accurate when bandwidth inheritance is applied.
-rw-r--r--litmus/sched_pres.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/litmus/sched_pres.c b/litmus/sched_pres.c
index 6126852ec8ab..34b096dece90 100644
--- a/litmus/sched_pres.c
+++ b/litmus/sched_pres.c
@@ -17,6 +17,7 @@
17struct pres_task_state { 17struct pres_task_state {
18 struct task_client res_info; 18 struct task_client res_info;
19 int cpu; 19 int cpu;
20 bool has_departed;
20}; 21};
21 22
22struct pres_cpu_state { 23struct pres_cpu_state {
@@ -49,6 +50,7 @@ static void task_departs(struct task_struct *tsk, int job_complete)
49 client = &state->res_info.client; 50 client = &state->res_info.client;
50 51
51 res->ops->client_departs(res, client, job_complete); 52 res->ops->client_departs(res, client, job_complete);
53 state->has_departed = true;
52} 54}
53 55
54static void task_arrives(struct task_struct *tsk) 56static void task_arrives(struct task_struct *tsk)
@@ -60,6 +62,7 @@ static void task_arrives(struct task_struct *tsk)
60 res = state->res_info.client.reservation; 62 res = state->res_info.client.reservation;
61 client = &state->res_info.client; 63 client = &state->res_info.client;
62 64
65 state->has_departed = false;
63 res->ops->client_arrives(res, client); 66 res->ops->client_arrives(res, client);
64} 67}
65 68
@@ -223,8 +226,8 @@ static void pres_task_resume(struct task_struct *tsk)
223 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 226 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
224 227
225 raw_spin_lock_irqsave(&state->lock, flags); 228 raw_spin_lock_irqsave(&state->lock, flags);
226 /* Requeue if self-suspension was already processed. */ 229 /* Requeue only if self-suspension was already processed. */
227 if (state->scheduled != tsk) 230 if (tinfo->has_departed)
228 { 231 {
229 /* Assumption: litmus_clock() is synchronized across cores, 232 /* Assumption: litmus_clock() is synchronized across cores,
230 * since we might not actually be executing on tinfo->cpu 233 * since we might not actually be executing on tinfo->cpu
@@ -234,8 +237,10 @@ static void pres_task_resume(struct task_struct *tsk)
234 /* NOTE: drops state->lock */ 237 /* NOTE: drops state->lock */
235 pres_update_timer_and_unlock(state); 238 pres_update_timer_and_unlock(state);
236 local_irq_restore(flags); 239 local_irq_restore(flags);
237 } else 240 } else {
241 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
238 raw_spin_unlock_irqrestore(&state->lock, flags); 242 raw_spin_unlock_irqrestore(&state->lock, flags);
243 }
239 244
240 resume_legacy_task_model_updates(tsk); 245 resume_legacy_task_model_updates(tsk);
241} 246}
@@ -280,6 +285,7 @@ static long pres_admit_task(struct task_struct *tsk)
280 if (res) { 285 if (res) {
281 task_client_init(&tinfo->res_info, tsk, res); 286 task_client_init(&tinfo->res_info, tsk, res);
282 tinfo->cpu = task_cpu(tsk); 287 tinfo->cpu = task_cpu(tsk);
288 tinfo->has_departed = true;
283 tsk_rt(tsk)->plugin_state = tinfo; 289 tsk_rt(tsk)->plugin_state = tinfo;
284 err = 0; 290 err = 0;
285 291