diff options
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r-- | litmus/sched_mc2.c | 453 |
1 files changed, 382 insertions, 71 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index b9f05238461b..3c8aa739345d 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <litmus/reservation.h> | 15 | #include <litmus/reservation.h> |
16 | #include <litmus/polling_reservations.h> | 16 | #include <litmus/polling_reservations.h> |
17 | 17 | ||
18 | struct gmp_reservation_environment _global_env; | ||
19 | |||
18 | struct mc2_task_state { | 20 | struct mc2_task_state { |
19 | struct task_client res_info; | 21 | struct task_client res_info; |
20 | int cpu; | 22 | int cpu; |
@@ -26,11 +28,18 @@ struct mc2_cpu_state { | |||
26 | raw_spinlock_t lock; | 28 | raw_spinlock_t lock; |
27 | 29 | ||
28 | struct sup_reservation_environment sup_env; | 30 | struct sup_reservation_environment sup_env; |
31 | struct gmp_reservation_environment* gmp_env; | ||
29 | struct hrtimer timer; | 32 | struct hrtimer timer; |
30 | 33 | ||
31 | int cpu; | 34 | int cpu; |
32 | struct task_struct* scheduled; | 35 | struct task_struct* scheduled; |
36 | struct task_struct* will_schedule; | ||
37 | struct task_struct* linked; // for level C | ||
33 | enum crit_level run_level; | 38 | enum crit_level run_level; |
39 | struct task_struct* crit_entry[NUM_CRIT_LEVELS]; // mc2_task_state (get_mc2_state) | ||
40 | |||
41 | // indicate the current timer event is global | ||
42 | bool is_global_event; | ||
34 | }; | 43 | }; |
35 | 44 | ||
36 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); | 45 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); |
@@ -74,12 +83,14 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
74 | { | 83 | { |
75 | int local; | 84 | int local; |
76 | lt_t update, now; | 85 | lt_t update, now; |
77 | 86 | struct next_timer_event *n_event = NULL; | |
87 | int global_found = 0; | ||
88 | |||
78 | update = state->sup_env.next_scheduler_update; | 89 | update = state->sup_env.next_scheduler_update; |
79 | now = state->sup_env.env.current_time; | 90 | now = state->sup_env.env.current_time; |
80 | 91 | ||
81 | /* Be sure we're actually running on the right core, | 92 | /* Be sure we're actually running on the right core, |
82 | * as pres_update_timer() is also called from pres_task_resume(), | 93 | * as mc2_update_timer() is also called from mc2_task_resume(), |
83 | * which might be called on any CPU when a thread resumes. | 94 | * which might be called on any CPU when a thread resumes. |
84 | */ | 95 | */ |
85 | local = local_cpu_state() == state; | 96 | local = local_cpu_state() == state; |
@@ -87,16 +98,50 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
87 | /* Must drop state lock before calling into hrtimer_start(), which | 98 | /* Must drop state lock before calling into hrtimer_start(), which |
88 | * may raise a softirq, which in turn may wake ksoftirqd. */ | 99 | * may raise a softirq, which in turn may wake ksoftirqd. */ |
89 | raw_spin_unlock(&state->lock); | 100 | raw_spin_unlock(&state->lock); |
101 | |||
102 | raw_spin_lock(&(_global_env.event_lock)); | ||
103 | list_for_each_entry(n_event, &state->gmp_env->next_events, list) { | ||
104 | TRACE("G_EVENT time: %llu, timer_armed_on: %d\n", n_event->next_event, n_event->timer_armed_on == NO_CPU?(-1):n_event->timer_armed_on); | ||
105 | if (n_event->timer_armed_on == NO_CPU) { | ||
106 | global_found = 1; | ||
107 | break; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | if (global_found == 1) { | ||
112 | if (update >= n_event->next_event) { | ||
113 | update = n_event->next_event; | ||
114 | now = _global_env.env.current_time; | ||
115 | //state->is_global_event = true; | ||
116 | //n_event->timer_armed_on = state->cpu; | ||
117 | } else { // next event is sup | ||
118 | global_found = 0; | ||
119 | } | ||
120 | } | ||
90 | 121 | ||
122 | raw_spin_unlock(&(_global_env.event_lock)); | ||
123 | |||
91 | if (update <= now) { | 124 | if (update <= now) { |
92 | litmus_reschedule(state->cpu); | 125 | litmus_reschedule(state->cpu); |
93 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | 126 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { |
94 | /* Reprogram only if not already set correctly. */ | 127 | /* Reprogram only if not already set correctly. */ |
95 | if (!hrtimer_active(&state->timer) || | 128 | if (!hrtimer_active(&state->timer) || |
96 | ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { | 129 | ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { |
130 | |||
131 | if ((ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) && (state->is_global_event == true)) { | ||
132 | struct next_timer_event *prev_event = NULL; | ||
133 | raw_spin_lock(&(_global_env.event_lock)); | ||
134 | list_for_each_entry(prev_event, &state->gmp_env->next_events, list) { | ||
135 | if (prev_event->timer_armed_on == state->cpu) { | ||
136 | prev_event->timer_armed_on = NO_CPU; | ||
137 | break; | ||
138 | } | ||
139 | } | ||
140 | raw_spin_unlock(&(_global_env.event_lock)); | ||
141 | } | ||
97 | TRACE("canceling timer...\n"); | 142 | TRACE("canceling timer...\n"); |
98 | hrtimer_cancel(&state->timer); | 143 | hrtimer_cancel(&state->timer); |
99 | TRACE("setting scheduler timer for %llu\n", update); | 144 | TRACE("setting scheduler (global: %d) timer for %llu\n", state->is_global_event, update); |
100 | /* We cannot use hrtimer_start() here because the | 145 | /* We cannot use hrtimer_start() here because the |
101 | * wakeup flag must be set to zero. */ | 146 | * wakeup flag must be set to zero. */ |
102 | __hrtimer_start_range_ns(&state->timer, | 147 | __hrtimer_start_range_ns(&state->timer, |
@@ -104,6 +149,16 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
104 | 0 /* timer coalescing slack */, | 149 | 0 /* timer coalescing slack */, |
105 | HRTIMER_MODE_ABS_PINNED, | 150 | HRTIMER_MODE_ABS_PINNED, |
106 | 0 /* wakeup */); | 151 | 0 /* wakeup */); |
152 | if (global_found) { | ||
153 | raw_spin_lock(&(_global_env.event_lock)); | ||
154 | state->is_global_event = true; | ||
155 | n_event->timer_armed_on = state->cpu; | ||
156 | raw_spin_unlock(&(_global_env.event_lock)); | ||
157 | } else { | ||
158 | state->is_global_event = false; | ||
159 | } | ||
160 | |||
161 | TRACE("set scheduler (global: %d) timer for %llu on P%d\n", state->is_global_event, update, n_event->timer_armed_on); | ||
107 | } | 162 | } |
108 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { | 163 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { |
109 | /* Poke remote core only if timer needs to be set earlier than | 164 | /* Poke remote core only if timer needs to be set earlier than |
@@ -132,6 +187,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
132 | unsigned long flags; | 187 | unsigned long flags; |
133 | enum hrtimer_restart restart = HRTIMER_NORESTART; | 188 | enum hrtimer_restart restart = HRTIMER_NORESTART; |
134 | struct mc2_cpu_state *state; | 189 | struct mc2_cpu_state *state; |
190 | struct next_timer_event *n_event, *next; | ||
135 | lt_t update, now; | 191 | lt_t update, now; |
136 | 192 | ||
137 | state = container_of(timer, struct mc2_cpu_state, timer); | 193 | state = container_of(timer, struct mc2_cpu_state, timer); |
@@ -144,16 +200,47 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
144 | */ | 200 | */ |
145 | BUG_ON(state->cpu != raw_smp_processor_id()); | 201 | BUG_ON(state->cpu != raw_smp_processor_id()); |
146 | 202 | ||
203 | TRACE("TIMER fired at %llu\n", litmus_clock()); | ||
204 | |||
205 | if (state->is_global_event == true) { | ||
206 | |||
207 | raw_spin_lock_irqsave(&(_global_env.event_lock), flags); | ||
208 | |||
209 | TRACE("GLOBAL EVENT FIRED\n"); | ||
210 | list_for_each_entry_safe(n_event, next, &state->gmp_env->next_events, list) { | ||
211 | if (n_event->timer_armed_on == state->cpu) { | ||
212 | list_del(&n_event->list); | ||
213 | TRACE("EVENT ENTRY IS DELETED\n"); | ||
214 | break; | ||
215 | } | ||
216 | } | ||
217 | gmp_update_time(state->gmp_env, litmus_clock()); | ||
218 | |||
219 | |||
220 | update = n_event->next_event; | ||
221 | now = state->gmp_env->env.current_time; | ||
222 | |||
223 | kfree(n_event); | ||
224 | TRACE("ON TIMER UPDATE = %llu, NOW = %llu\n", update, now); | ||
225 | raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags); | ||
226 | } | ||
227 | |||
147 | raw_spin_lock_irqsave(&state->lock, flags); | 228 | raw_spin_lock_irqsave(&state->lock, flags); |
148 | sup_update_time(&state->sup_env, litmus_clock()); | 229 | |
149 | 230 | ||
150 | update = state->sup_env.next_scheduler_update; | 231 | if (state->is_global_event != true) { |
151 | now = state->sup_env.env.current_time; | 232 | sup_update_time(&state->sup_env, litmus_clock()); |
233 | |||
234 | update = state->sup_env.next_scheduler_update; | ||
235 | now = state->sup_env.env.current_time; | ||
236 | } else { | ||
237 | state->is_global_event = false; | ||
238 | } | ||
152 | 239 | ||
153 | TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n", | 240 | TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n", |
154 | now, update, state->cpu); | 241 | now, update, state->cpu); |
155 | 242 | ||
156 | if (update <= now) { | 243 | if (update <= now || state->gmp_env->schedule_now == true) { |
157 | litmus_reschedule_local(); | 244 | litmus_reschedule_local(); |
158 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { | 245 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { |
159 | hrtimer_set_expires(timer, ns_to_ktime(update)); | 246 | hrtimer_set_expires(timer, ns_to_ktime(update)); |
@@ -161,7 +248,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
161 | } | 248 | } |
162 | 249 | ||
163 | raw_spin_unlock_irqrestore(&state->lock, flags); | 250 | raw_spin_unlock_irqrestore(&state->lock, flags); |
164 | 251 | ||
165 | return restart; | 252 | return restart; |
166 | } | 253 | } |
167 | 254 | ||
@@ -176,10 +263,20 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
176 | BUG_ON(state->scheduled && state->scheduled != prev); | 263 | BUG_ON(state->scheduled && state->scheduled != prev); |
177 | BUG_ON(state->scheduled && !is_realtime(prev)); | 264 | BUG_ON(state->scheduled && !is_realtime(prev)); |
178 | 265 | ||
266 | tinfo = get_mc2_state(prev); | ||
267 | if (state->scheduled != NULL) { | ||
268 | struct reservation* res; | ||
269 | if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { | ||
270 | res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id); | ||
271 | res->scheduled_on = NO_CPU; | ||
272 | prev->rt_param.scheduled_on = NO_CPU; | ||
273 | } | ||
274 | } | ||
275 | |||
179 | /* update time */ | 276 | /* update time */ |
180 | state->sup_env.will_schedule = true; | 277 | state->sup_env.will_schedule = true; |
181 | sup_update_time(&state->sup_env, litmus_clock()); | 278 | sup_update_time(&state->sup_env, litmus_clock()); |
182 | 279 | ||
183 | /* remove task from reservation if it blocks */ | 280 | /* remove task from reservation if it blocks */ |
184 | if (is_realtime(prev) && !is_running(prev)) | 281 | if (is_realtime(prev) && !is_running(prev)) |
185 | task_departs(prev, is_completed(prev)); | 282 | task_departs(prev, is_completed(prev)); |
@@ -187,6 +284,17 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
187 | /* figure out what to schedule next */ | 284 | /* figure out what to schedule next */ |
188 | state->scheduled = sup_dispatch(&state->sup_env); | 285 | state->scheduled = sup_dispatch(&state->sup_env); |
189 | 286 | ||
287 | if (!state->scheduled) { | ||
288 | raw_spin_lock(&(_global_env.event_lock)); | ||
289 | |||
290 | state->gmp_env->will_schedule = true; | ||
291 | gmp_update_time(state->gmp_env, litmus_clock()); | ||
292 | //state->scheduled = gmp_dispatch(&_global_env); | ||
293 | state->gmp_env->will_schedule = false; | ||
294 | |||
295 | raw_spin_unlock(&(_global_env.event_lock)); | ||
296 | } | ||
297 | |||
190 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | 298 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ |
191 | sched_state_task_picked(); | 299 | sched_state_task_picked(); |
192 | 300 | ||
@@ -196,13 +304,27 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
196 | mc2_update_timer_and_unlock(state); | 304 | mc2_update_timer_and_unlock(state); |
197 | 305 | ||
198 | if (prev != state->scheduled && is_realtime(prev)) { | 306 | if (prev != state->scheduled && is_realtime(prev)) { |
307 | struct reservation* res; | ||
199 | TRACE_TASK(prev, "descheduled.\n"); | 308 | TRACE_TASK(prev, "descheduled.\n"); |
309 | TRACE_TASK(state->scheduled, "SCHEDULED.\n"); | ||
200 | state->run_level = NUM_CRIT_LEVELS; | 310 | state->run_level = NUM_CRIT_LEVELS; |
311 | tinfo = get_mc2_state(prev); | ||
312 | if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { | ||
313 | res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id); | ||
314 | res->scheduled_on = NO_CPU; | ||
315 | prev->rt_param.scheduled_on = NO_CPU; | ||
316 | } | ||
201 | } | 317 | } |
202 | if (state->scheduled) { | 318 | if (state->scheduled) { |
319 | struct reservation* res; | ||
203 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 320 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
204 | //tinfo = get_mc2_state(state->scheduled); | 321 | tinfo = get_mc2_state(state->scheduled); |
205 | //state->run_level = tinfo->mc2_param.crit; | 322 | state->run_level = tinfo->mc2_param.crit; |
323 | if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { | ||
324 | res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id); | ||
325 | res->scheduled_on = state->cpu; | ||
326 | state->scheduled->rt_param.scheduled_on = state->cpu; | ||
327 | } | ||
206 | } | 328 | } |
207 | 329 | ||
208 | return state->scheduled; | 330 | return state->scheduled; |
@@ -230,10 +352,16 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
230 | { | 352 | { |
231 | unsigned long flags; | 353 | unsigned long flags; |
232 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 354 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
233 | struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); | 355 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); |
234 | 356 | ||
235 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 357 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
236 | 358 | ||
359 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { | ||
360 | state = cpu_state_for(tinfo->cpu); | ||
361 | } else { | ||
362 | state = local_cpu_state(); | ||
363 | } | ||
364 | |||
237 | raw_spin_lock_irqsave(&state->lock, flags); | 365 | raw_spin_lock_irqsave(&state->lock, flags); |
238 | /* Requeue only if self-suspension was already processed. */ | 366 | /* Requeue only if self-suspension was already processed. */ |
239 | if (tinfo->has_departed) | 367 | if (tinfo->has_departed) |
@@ -241,8 +369,16 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
241 | /* Assumption: litmus_clock() is synchronized across cores, | 369 | /* Assumption: litmus_clock() is synchronized across cores, |
242 | * since we might not actually be executing on tinfo->cpu | 370 | * since we might not actually be executing on tinfo->cpu |
243 | * at the moment. */ | 371 | * at the moment. */ |
244 | sup_update_time(&state->sup_env, litmus_clock()); | 372 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { |
245 | task_arrives(tsk); | 373 | sup_update_time(&state->sup_env, litmus_clock()); |
374 | task_arrives(tsk); | ||
375 | } else if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { | ||
376 | raw_spin_lock(&(_global_env.event_lock)); | ||
377 | gmp_update_time(state->gmp_env, litmus_clock()); | ||
378 | task_arrives(tsk); | ||
379 | raw_spin_unlock(&(_global_env.event_lock)); | ||
380 | } | ||
381 | |||
246 | /* NOTE: drops state->lock */ | 382 | /* NOTE: drops state->lock */ |
247 | TRACE("mc2_resume()\n"); | 383 | TRACE("mc2_resume()\n"); |
248 | mc2_update_timer_and_unlock(state); | 384 | mc2_update_timer_and_unlock(state); |
@@ -255,24 +391,40 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
255 | resume_legacy_task_model_updates(tsk); | 391 | resume_legacy_task_model_updates(tsk); |
256 | } | 392 | } |
257 | 393 | ||
394 | static void mc2_task_block(struct task_struct *task) | ||
395 | { | ||
396 | struct mc2_task_state *tinfo; | ||
397 | |||
398 | tinfo = get_mc2_state(task); | ||
399 | |||
400 | TRACE_TASK(task, "TASK BLOCK\n"); | ||
401 | if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { | ||
402 | struct reservation *res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); | ||
403 | res->scheduled_on = NO_CPU; | ||
404 | task->rt_param.scheduled_on = NO_CPU; | ||
405 | } | ||
406 | } | ||
258 | /* syscall backend for job completions */ | 407 | /* syscall backend for job completions */ |
259 | static long mc2_complete_job(void) | 408 | static long mc2_complete_job(void) |
260 | { | 409 | { |
261 | ktime_t next_release; | 410 | ktime_t next_release; |
262 | long err; | 411 | long err; |
263 | struct mc2_cpu_state *state = local_cpu_state(); | 412 | struct mc2_cpu_state *state = local_cpu_state(); |
264 | struct reservation_environment *env = &(state->sup_env.env); | 413 | struct reservation_environment *env = NULL; |
265 | struct mc2_task_state *tinfo = get_mc2_state(current); | 414 | struct mc2_task_state *tinfo = get_mc2_state(current); |
266 | 415 | ||
416 | if (tinfo->mc2_param.crit == CRIT_LEVEL_C) | ||
417 | env = &(_global_env.env); | ||
418 | else | ||
419 | env = &(state->sup_env.env); | ||
267 | 420 | ||
268 | TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), | 421 | TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), get_deadline(current)); |
269 | get_deadline(current)); | ||
270 | 422 | ||
271 | tsk_rt(current)->completed = 1; | 423 | tsk_rt(current)->completed = 1; |
272 | 424 | ||
273 | if (tsk_rt(current)->sporadic_release) { | 425 | if (tsk_rt(current)->sporadic_release) { |
274 | env->time_zero = tsk_rt(current)->sporadic_release_time; | 426 | env->time_zero = tsk_rt(current)->sporadic_release_time; |
275 | 427 | hrtimer_cancel(&state->timer); | |
276 | if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { | 428 | if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { |
277 | struct reservation *res; | 429 | struct reservation *res; |
278 | struct table_driven_reservation *tdres; | 430 | struct table_driven_reservation *tdres; |
@@ -286,7 +438,7 @@ static long mc2_complete_job(void) | |||
286 | res->next_replenishment += tdres->intervals[0].start; | 438 | res->next_replenishment += tdres->intervals[0].start; |
287 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | 439 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); |
288 | 440 | ||
289 | TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | 441 | TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); |
290 | } | 442 | } |
291 | 443 | ||
292 | } | 444 | } |
@@ -327,32 +479,55 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
327 | } | 479 | } |
328 | 480 | ||
329 | preempt_disable(); | 481 | preempt_disable(); |
330 | 482 | if (mp->crit == CRIT_LEVEL_C) { | |
331 | state = cpu_state_for(task_cpu(tsk)); | 483 | raw_spin_lock_irqsave(&(_global_env.event_lock), flags); |
332 | raw_spin_lock_irqsave(&state->lock, flags); | ||
333 | |||
334 | res = sup_find_by_id(&state->sup_env, mp->res_id); | ||
335 | |||
336 | /* found the appropriate reservation (or vCPU) */ | ||
337 | if (res) { | ||
338 | TRACE_TASK(tsk, "FOUND RES ID\n"); | ||
339 | tinfo->mc2_param.crit = mp->crit; | ||
340 | tinfo->mc2_param.res_id = mp->res_id; | ||
341 | 484 | ||
342 | kfree(tsk_rt(tsk)->plugin_state); | 485 | res = gmp_find_by_id(&_global_env, mp->res_id); |
343 | tsk_rt(tsk)->plugin_state = NULL; | 486 | if (res) { |
487 | TRACE_TASK(tsk, "FOUND GMP RES ID\n"); | ||
488 | tinfo->mc2_param.crit = mp->crit; | ||
489 | tinfo->mc2_param.res_id = mp->res_id; | ||
490 | |||
491 | kfree(tsk_rt(tsk)->plugin_state); | ||
492 | tsk_rt(tsk)->plugin_state = NULL; | ||
493 | |||
494 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
495 | tinfo->cpu = -1; | ||
496 | tinfo->has_departed = true; | ||
497 | tsk_rt(tsk)->plugin_state = tinfo; | ||
498 | |||
499 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
500 | } | ||
344 | 501 | ||
345 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | 502 | raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags); |
346 | tinfo->cpu = task_cpu(tsk); | 503 | |
347 | tinfo->has_departed = true; | 504 | } else { |
348 | tsk_rt(tsk)->plugin_state = tinfo; | 505 | state = cpu_state_for(task_cpu(tsk)); |
506 | raw_spin_lock_irqsave(&state->lock, flags); | ||
349 | 507 | ||
350 | /* disable LITMUS^RT's per-thread budget enforcement */ | 508 | res = sup_find_by_id(&state->sup_env, mp->res_id); |
351 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
352 | } | ||
353 | 509 | ||
354 | raw_spin_unlock_irqrestore(&state->lock, flags); | 510 | /* found the appropriate reservation (or vCPU) */ |
511 | if (res) { | ||
512 | TRACE_TASK(tsk, "FOUND SUP RES ID\n"); | ||
513 | tinfo->mc2_param.crit = mp->crit; | ||
514 | tinfo->mc2_param.res_id = mp->res_id; | ||
515 | |||
516 | kfree(tsk_rt(tsk)->plugin_state); | ||
517 | tsk_rt(tsk)->plugin_state = NULL; | ||
518 | |||
519 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
520 | tinfo->cpu = task_cpu(tsk); | ||
521 | tinfo->has_departed = true; | ||
522 | tsk_rt(tsk)->plugin_state = tinfo; | ||
523 | |||
524 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
525 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
526 | } | ||
355 | 527 | ||
528 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
529 | } | ||
530 | |||
356 | preempt_enable(); | 531 | preempt_enable(); |
357 | 532 | ||
358 | if (err) | 533 | if (err) |
@@ -366,15 +541,29 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
366 | { | 541 | { |
367 | unsigned long flags; | 542 | unsigned long flags; |
368 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 543 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
369 | struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); | 544 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); |
370 | struct reservation *res; | 545 | struct reservation *res; |
371 | 546 | ||
372 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", | 547 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", |
373 | litmus_clock(), on_runqueue, is_running); | 548 | litmus_clock(), on_runqueue, is_running); |
374 | 549 | ||
550 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { | ||
551 | state = cpu_state_for(tinfo->cpu); | ||
552 | } else { | ||
553 | state = local_cpu_state(); | ||
554 | } | ||
555 | |||
375 | /* acquire the lock protecting the state and disable interrupts */ | 556 | /* acquire the lock protecting the state and disable interrupts */ |
376 | raw_spin_lock_irqsave(&state->lock, flags); | 557 | raw_spin_lock_irqsave(&state->lock, flags); |
377 | 558 | ||
559 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { | ||
560 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
561 | } else { | ||
562 | raw_spin_lock(&(_global_env.event_lock)); | ||
563 | res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); | ||
564 | raw_spin_unlock(&(_global_env.event_lock)); | ||
565 | } | ||
566 | |||
378 | if (is_running) { | 567 | if (is_running) { |
379 | state->scheduled = tsk; | 568 | state->scheduled = tsk; |
380 | /* make sure this task should actually be running */ | 569 | /* make sure this task should actually be running */ |
@@ -384,7 +573,14 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
384 | if (on_runqueue || is_running) { | 573 | if (on_runqueue || is_running) { |
385 | /* Assumption: litmus_clock() is synchronized across cores | 574 | /* Assumption: litmus_clock() is synchronized across cores |
386 | * [see comment in pres_task_resume()] */ | 575 | * [see comment in pres_task_resume()] */ |
387 | sup_update_time(&state->sup_env, litmus_clock()); | 576 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { |
577 | sup_update_time(&state->sup_env, litmus_clock()); | ||
578 | } else if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { | ||
579 | raw_spin_lock(&(_global_env.event_lock)); | ||
580 | TRACE_TASK(tsk, "CALL GMP_UPDATE_TIME in task_new at %llu\n", litmus_clock()); | ||
581 | gmp_update_time(state->gmp_env, litmus_clock()); | ||
582 | raw_spin_unlock(&(_global_env.event_lock)); | ||
583 | } | ||
388 | task_arrives(tsk); | 584 | task_arrives(tsk); |
389 | /* NOTE: drops state->lock */ | 585 | /* NOTE: drops state->lock */ |
390 | TRACE("mc2_new()\n"); | 586 | TRACE("mc2_new()\n"); |
@@ -393,7 +589,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
393 | } else | 589 | } else |
394 | raw_spin_unlock_irqrestore(&state->lock, flags); | 590 | raw_spin_unlock_irqrestore(&state->lock, flags); |
395 | 591 | ||
396 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
397 | release_at(tsk, res->next_replenishment); | 592 | release_at(tsk, res->next_replenishment); |
398 | if (res) | 593 | if (res) |
399 | TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); | 594 | TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); |
@@ -407,9 +602,14 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
407 | struct mc2_cpu_state *state; | 602 | struct mc2_cpu_state *state; |
408 | struct reservation *res = NULL, *next; | 603 | struct reservation *res = NULL, *next; |
409 | struct sup_reservation_environment *sup_env; | 604 | struct sup_reservation_environment *sup_env; |
605 | struct gmp_reservation_environment *gmp_env; | ||
410 | int found = 0; | 606 | int found = 0; |
411 | 607 | ||
412 | state = cpu_state_for(cpu); | 608 | if (cpu != -1) |
609 | state = cpu_state_for(cpu); | ||
610 | else | ||
611 | state = local_cpu_state(); | ||
612 | |||
413 | raw_spin_lock(&state->lock); | 613 | raw_spin_lock(&state->lock); |
414 | 614 | ||
415 | // res = sup_find_by_id(&state->sup_env, reservation_id); | 615 | // res = sup_find_by_id(&state->sup_env, reservation_id); |
@@ -447,6 +647,43 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
447 | 647 | ||
448 | raw_spin_unlock(&state->lock); | 648 | raw_spin_unlock(&state->lock); |
449 | 649 | ||
650 | raw_spin_lock(&(_global_env.event_lock)); | ||
651 | |||
652 | gmp_env = &_global_env; | ||
653 | //if (!res) { | ||
654 | if (!found) { | ||
655 | list_for_each_entry_safe(res, next, &gmp_env->depleted_reservations, list) { | ||
656 | if (res->id == reservation_id) { | ||
657 | list_del(&res->list); | ||
658 | //kfree(res); | ||
659 | found = 1; | ||
660 | ret = 0; | ||
661 | } | ||
662 | } | ||
663 | } | ||
664 | if (!found) { | ||
665 | list_for_each_entry_safe(res, next, &gmp_env->inactive_reservations, list) { | ||
666 | if (res->id == reservation_id) { | ||
667 | list_del(&res->list); | ||
668 | //kfree(res); | ||
669 | found = 1; | ||
670 | ret = 0; | ||
671 | } | ||
672 | } | ||
673 | } | ||
674 | if (!found) { | ||
675 | list_for_each_entry_safe(res, next, &gmp_env->active_reservations, list) { | ||
676 | if (res->id == reservation_id) { | ||
677 | list_del(&res->list); | ||
678 | //kfree(res); | ||
679 | found = 1; | ||
680 | ret = 0; | ||
681 | } | ||
682 | } | ||
683 | } | ||
684 | |||
685 | raw_spin_unlock(&(_global_env.event_lock)); | ||
686 | |||
450 | TRACE("RESERVATION_DESTROY ret = %d\n", ret); | 687 | TRACE("RESERVATION_DESTROY ret = %d\n", ret); |
451 | return ret; | 688 | return ret; |
452 | } | 689 | } |
@@ -455,8 +692,14 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
455 | { | 692 | { |
456 | unsigned long flags; | 693 | unsigned long flags; |
457 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 694 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
458 | struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); | 695 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); |
459 | 696 | ||
697 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { | ||
698 | state = cpu_state_for(tinfo->cpu); | ||
699 | } else { | ||
700 | state = local_cpu_state(); | ||
701 | } | ||
702 | |||
460 | raw_spin_lock_irqsave(&state->lock, flags); | 703 | raw_spin_lock_irqsave(&state->lock, flags); |
461 | 704 | ||
462 | if (state->scheduled == tsk) | 705 | if (state->scheduled == tsk) |
@@ -466,7 +709,13 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
466 | if (is_running(tsk)) { | 709 | if (is_running(tsk)) { |
467 | /* Assumption: litmus_clock() is synchronized across cores | 710 | /* Assumption: litmus_clock() is synchronized across cores |
468 | * [see comment in pres_task_resume()] */ | 711 | * [see comment in pres_task_resume()] */ |
469 | sup_update_time(&state->sup_env, litmus_clock()); | 712 | if (tinfo->mc2_param.crit != CRIT_LEVEL_C) |
713 | sup_update_time(&state->sup_env, litmus_clock()); | ||
714 | else { | ||
715 | raw_spin_lock(&(_global_env.event_lock)); | ||
716 | gmp_update_time(state->gmp_env, litmus_clock()); | ||
717 | raw_spin_unlock(&(_global_env.event_lock)); | ||
718 | } | ||
470 | task_departs(tsk, 0); | 719 | task_departs(tsk, 0); |
471 | /* NOTE: drops state->lock */ | 720 | /* NOTE: drops state->lock */ |
472 | TRACE("mc2_exit()\n"); | 721 | TRACE("mc2_exit()\n"); |
@@ -505,7 +754,7 @@ static long create_polling_reservation( | |||
505 | int use_edf = config->priority == LITMUS_NO_PRIORITY; | 754 | int use_edf = config->priority == LITMUS_NO_PRIORITY; |
506 | int periodic = res_type == PERIODIC_POLLING; | 755 | int periodic = res_type == PERIODIC_POLLING; |
507 | long err = -EINVAL; | 756 | long err = -EINVAL; |
508 | 757 | ||
509 | if (config->polling_params.budget > | 758 | if (config->polling_params.budget > |
510 | config->polling_params.period) { | 759 | config->polling_params.period) { |
511 | printk(KERN_ERR "invalid polling reservation (%u): " | 760 | printk(KERN_ERR "invalid polling reservation (%u): " |
@@ -533,26 +782,48 @@ static long create_polling_reservation( | |||
533 | if (!pres) | 782 | if (!pres) |
534 | return -ENOMEM; | 783 | return -ENOMEM; |
535 | 784 | ||
536 | state = cpu_state_for(config->cpu); | 785 | if (config->cpu != -1) { |
537 | raw_spin_lock_irqsave(&state->lock, flags); | 786 | state = cpu_state_for(config->cpu); |
787 | raw_spin_lock_irqsave(&state->lock, flags); | ||
538 | 788 | ||
539 | res = sup_find_by_id(&state->sup_env, config->id); | 789 | res = sup_find_by_id(&state->sup_env, config->id); |
540 | if (!res) { | 790 | if (!res) { |
541 | polling_reservation_init(pres, use_edf, periodic, | 791 | polling_reservation_init(pres, use_edf, periodic, |
542 | config->polling_params.budget, | 792 | config->polling_params.budget, |
543 | config->polling_params.period, | 793 | config->polling_params.period, |
544 | config->polling_params.relative_deadline, | 794 | config->polling_params.relative_deadline, |
545 | config->polling_params.offset); | 795 | config->polling_params.offset); |
546 | pres->res.id = config->id; | 796 | pres->res.id = config->id; |
547 | if (!use_edf) | 797 | if (!use_edf) |
548 | pres->res.priority = config->priority; | 798 | pres->res.priority = config->priority; |
549 | sup_add_new_reservation(&state->sup_env, &pres->res); | 799 | sup_add_new_reservation(&state->sup_env, &pres->res); |
550 | err = config->id; | 800 | err = config->id; |
551 | } else { | 801 | } else { |
552 | err = -EEXIST; | 802 | err = -EEXIST; |
553 | } | 803 | } |
554 | 804 | ||
555 | raw_spin_unlock_irqrestore(&state->lock, flags); | 805 | raw_spin_unlock_irqrestore(&state->lock, flags); |
806 | } else if (config->cpu == -1) { | ||
807 | raw_spin_lock_irqsave(&(_global_env.event_lock), flags); | ||
808 | |||
809 | res = gmp_find_by_id(&_global_env, config->id); | ||
810 | if (!res) { | ||
811 | polling_reservation_init(pres, use_edf, periodic, | ||
812 | config->polling_params.budget, | ||
813 | config->polling_params.period, | ||
814 | config->polling_params.relative_deadline, | ||
815 | config->polling_params.offset); | ||
816 | pres->res.id = config->id; | ||
817 | if (!use_edf) | ||
818 | pres->res.priority = config->priority; | ||
819 | gmp_add_new_reservation(&_global_env, &pres->res); | ||
820 | err = config->id; | ||
821 | } else { | ||
822 | err = -EEXIST; | ||
823 | } | ||
824 | |||
825 | raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags); | ||
826 | } | ||
556 | 827 | ||
557 | if (err < 0) | 828 | if (err < 0) |
558 | kfree(pres); | 829 | kfree(pres); |
@@ -671,10 +942,12 @@ static long mc2_reservation_create(int res_type, void* __user _config) | |||
671 | if (copy_from_user(&config, _config, sizeof(config))) | 942 | if (copy_from_user(&config, _config, sizeof(config))) |
672 | return -EFAULT; | 943 | return -EFAULT; |
673 | 944 | ||
674 | if (config.cpu < 0 || !cpu_online(config.cpu)) { | 945 | if (config.cpu != -1) { |
675 | printk(KERN_ERR "invalid polling reservation (%u): " | 946 | if (config.cpu < 0 || !cpu_online(config.cpu)) { |
676 | "CPU %d offline\n", config.id, config.cpu); | 947 | printk(KERN_ERR "invalid polling reservation (%u): " |
677 | return -EINVAL; | 948 | "CPU %d offline\n", config.id, config.cpu); |
949 | return -EINVAL; | ||
950 | } | ||
678 | } | 951 | } |
679 | 952 | ||
680 | switch (res_type) { | 953 | switch (res_type) { |
@@ -732,6 +1005,8 @@ static long mc2_activate_plugin(void) | |||
732 | int cpu; | 1005 | int cpu; |
733 | struct mc2_cpu_state *state; | 1006 | struct mc2_cpu_state *state; |
734 | 1007 | ||
1008 | gmp_init(&_global_env); | ||
1009 | |||
735 | for_each_online_cpu(cpu) { | 1010 | for_each_online_cpu(cpu) { |
736 | TRACE("Initializing CPU%d...\n", cpu); | 1011 | TRACE("Initializing CPU%d...\n", cpu); |
737 | 1012 | ||
@@ -740,7 +1015,11 @@ static long mc2_activate_plugin(void) | |||
740 | raw_spin_lock_init(&state->lock); | 1015 | raw_spin_lock_init(&state->lock); |
741 | state->cpu = cpu; | 1016 | state->cpu = cpu; |
742 | state->scheduled = NULL; | 1017 | state->scheduled = NULL; |
743 | 1018 | state->will_schedule = NULL; | |
1019 | state->linked = NULL; | ||
1020 | state->gmp_env = &_global_env; | ||
1021 | state->is_global_event = false; | ||
1022 | |||
744 | sup_init(&state->sup_env); | 1023 | sup_init(&state->sup_env); |
745 | 1024 | ||
746 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | 1025 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
@@ -794,6 +1073,37 @@ static long mc2_deactivate_plugin(void) | |||
794 | raw_spin_unlock(&state->lock); | 1073 | raw_spin_unlock(&state->lock); |
795 | } | 1074 | } |
796 | 1075 | ||
1076 | raw_spin_lock(&(_global_env.event_lock)); | ||
1077 | |||
1078 | /* Delete all reservations --- assumes struct reservation | ||
1079 | * is prefix of containing struct. */ | ||
1080 | |||
1081 | while (!list_empty(&_global_env.active_reservations)) { | ||
1082 | res = list_first_entry( | ||
1083 | &_global_env.active_reservations, | ||
1084 | struct reservation, list); | ||
1085 | list_del(&res->list); | ||
1086 | kfree(res); | ||
1087 | } | ||
1088 | |||
1089 | while (!list_empty(&_global_env.inactive_reservations)) { | ||
1090 | res = list_first_entry( | ||
1091 | &_global_env.inactive_reservations, | ||
1092 | struct reservation, list); | ||
1093 | list_del(&res->list); | ||
1094 | kfree(res); | ||
1095 | } | ||
1096 | |||
1097 | while (!list_empty(&_global_env.depleted_reservations)) { | ||
1098 | res = list_first_entry( | ||
1099 | &_global_env.depleted_reservations, | ||
1100 | struct reservation, list); | ||
1101 | list_del(&res->list); | ||
1102 | kfree(res); | ||
1103 | } | ||
1104 | |||
1105 | raw_spin_unlock(&(_global_env.event_lock)); | ||
1106 | |||
797 | destroy_domain_proc_info(&mc2_domain_proc_info); | 1107 | destroy_domain_proc_info(&mc2_domain_proc_info); |
798 | return 0; | 1108 | return 0; |
799 | } | 1109 | } |
@@ -802,6 +1112,7 @@ static struct sched_plugin mc2_plugin = { | |||
802 | .plugin_name = "MC2", | 1112 | .plugin_name = "MC2", |
803 | .schedule = mc2_schedule, | 1113 | .schedule = mc2_schedule, |
804 | .task_wake_up = mc2_task_resume, | 1114 | .task_wake_up = mc2_task_resume, |
1115 | .task_block = mc2_task_block, | ||
805 | .admit_task = mc2_admit_task, | 1116 | .admit_task = mc2_admit_task, |
806 | .task_new = mc2_task_new, | 1117 | .task_new = mc2_task_new, |
807 | .task_exit = mc2_task_exit, | 1118 | .task_exit = mc2_task_exit, |