diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-03-23 08:20:51 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-03-23 08:20:51 -0400 |
commit | ff210e0441b743890ad85c7335e41894b34a1431 (patch) | |
tree | 21027c2433f5ca9a26731b3af72fa6eb620df369 /litmus/sched_mc2.c | |
parent | 2e23e3f0cc7c3249b510e94b5b3ec92577b67e81 (diff) |
MC2 scheduler and partition modules
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r-- | litmus/sched_mc2.c | 1849 |
1 files changed, 1849 insertions, 0 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c new file mode 100644 index 000000000000..0ff27135c825 --- /dev/null +++ b/litmus/sched_mc2.c | |||
@@ -0,0 +1,1849 @@ | |||
1 | /* | ||
2 | * litmus/sched_mc2.c | ||
3 | * | ||
4 | * Implementation of the Mixed-Criticality on MultiCore scheduler | ||
5 | * | ||
6 | * Thus plugin implements a scheduling algorithm proposed in | ||
7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. | ||
8 | */ | ||
9 | |||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | #include <litmus/sched_plugin.h> | ||
15 | #include <litmus/preempt.h> | ||
16 | #include <litmus/debug_trace.h> | ||
17 | |||
18 | #include <litmus/litmus.h> | ||
19 | #include <litmus/jobs.h> | ||
20 | #include <litmus/budget.h> | ||
21 | #include <litmus/litmus_proc.h> | ||
22 | #include <litmus/sched_trace.h> | ||
23 | #include <litmus/cache_proc.h> | ||
24 | #include <litmus/trace.h> | ||
25 | |||
26 | #include <litmus/mc2_common.h> | ||
27 | #include <litmus/reservation.h> | ||
28 | #include <litmus/polling_reservations.h> | ||
29 | |||
30 | //#define TRACE(fmt, args...) do {} while (false) | ||
31 | //#define TRACE_TASK(fmt, args...) do {} while (false) | ||
32 | |||
33 | #define BUDGET_ENFORCEMENT_AT_C 0 | ||
34 | |||
35 | extern void do_partition(enum crit_level lv, int cpu); | ||
36 | |||
37 | /* _global_env - reservation container for level-C tasks*/ | ||
38 | struct gmp_reservation_environment _global_env; | ||
39 | |||
40 | /* cpu_entry - keep track of a running task on a cpu | ||
41 | * This state is used to decide the lowest priority cpu | ||
42 | */ | ||
43 | struct cpu_entry { | ||
44 | struct task_struct *scheduled; | ||
45 | lt_t deadline; | ||
46 | int cpu; | ||
47 | enum crit_level lv; | ||
48 | /* if will_schedule is true, this cpu is already selected and | ||
49 | call mc2_schedule() soon. */ | ||
50 | bool will_schedule; | ||
51 | }; | ||
52 | |||
53 | /* cpu_priority - a global state for choosing the lowest priority CPU */ | ||
54 | struct cpu_priority { | ||
55 | raw_spinlock_t lock; | ||
56 | struct cpu_entry cpu_entries[NR_CPUS]; | ||
57 | }; | ||
58 | |||
59 | struct cpu_priority _lowest_prio_cpu; | ||
60 | |||
61 | /* mc2_task_state - a task state structure */ | ||
62 | struct mc2_task_state { | ||
63 | struct task_client res_info; | ||
64 | /* if cpu == -1, this task is a global task (level C) */ | ||
65 | int cpu; | ||
66 | bool has_departed; | ||
67 | struct mc2_task mc2_param; | ||
68 | }; | ||
69 | |||
70 | /* crit_entry - maintain the logically running job (ghost job) */ | ||
71 | struct crit_entry { | ||
72 | enum crit_level level; | ||
73 | struct task_struct *running; | ||
74 | }; | ||
75 | |||
76 | /* mc2_cpu_state - maintain the scheduled state and ghost jobs | ||
77 | * timer : timer for partitioned tasks (level A and B) | ||
78 | * g_timer : timer for global tasks (level C) | ||
79 | */ | ||
80 | struct mc2_cpu_state { | ||
81 | raw_spinlock_t lock; | ||
82 | |||
83 | struct sup_reservation_environment sup_env; | ||
84 | struct hrtimer timer; | ||
85 | |||
86 | int cpu; | ||
87 | struct task_struct* scheduled; | ||
88 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | ||
89 | }; | ||
90 | |||
91 | static int resched_cpu[NR_CPUS]; | ||
92 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); | ||
93 | |||
94 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) | ||
95 | #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) | ||
96 | |||
97 | /* get_mc2_state - get the task's state */ | ||
98 | static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) | ||
99 | { | ||
100 | struct mc2_task_state* tinfo; | ||
101 | |||
102 | tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state; | ||
103 | |||
104 | if (tinfo) | ||
105 | return tinfo; | ||
106 | else | ||
107 | return NULL; | ||
108 | } | ||
109 | |||
110 | /* get_task_crit_level - return the criticaility level of a task */ | ||
111 | static enum crit_level get_task_crit_level(struct task_struct *tsk) | ||
112 | { | ||
113 | struct mc2_task *mp; | ||
114 | |||
115 | if (!tsk || !is_realtime(tsk)) | ||
116 | return NUM_CRIT_LEVELS; | ||
117 | |||
118 | mp = tsk_rt(tsk)->mc2_data; | ||
119 | |||
120 | if (!mp) | ||
121 | return NUM_CRIT_LEVELS; | ||
122 | else | ||
123 | return mp->crit; | ||
124 | } | ||
125 | |||
126 | /* task_depart - remove a task from its reservation | ||
127 | * If the job has remaining budget, convert it to a ghost job | ||
128 | * and update crit_entries[] | ||
129 | * | ||
130 | * @job_complete indicate whether job completes or not | ||
131 | */ | ||
132 | static void task_departs(struct task_struct *tsk, int job_complete) | ||
133 | { | ||
134 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
135 | //struct mc2_cpu_state* state = local_cpu_state(); | ||
136 | struct reservation* res = NULL; | ||
137 | struct reservation_client *client = NULL; | ||
138 | |||
139 | BUG_ON(!is_realtime(tsk)); | ||
140 | |||
141 | res = tinfo->res_info.client.reservation; | ||
142 | client = &tinfo->res_info.client; | ||
143 | BUG_ON(!res); | ||
144 | BUG_ON(!client); | ||
145 | |||
146 | /* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ | ||
147 | if (job_complete) { | ||
148 | res->cur_budget = 0; | ||
149 | sched_trace_task_completion(tsk, 0); | ||
150 | } | ||
151 | /* fix end */ | ||
152 | |||
153 | res->ops->client_departs(res, client, job_complete); | ||
154 | tinfo->has_departed = true; | ||
155 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu\n", res->cur_budget); | ||
156 | /* 9/18/2015 fix start - no remaining budget | ||
157 | * | ||
158 | if (job_complete && res->cur_budget) { | ||
159 | struct crit_entry* ce; | ||
160 | enum crit_level lv = tinfo->mc2_param.crit; | ||
161 | |||
162 | ce = &state->crit_entries[lv]; | ||
163 | ce->running = tsk; | ||
164 | res->is_ghost = state->cpu; | ||
165 | #if BUDGET_ENFORCEMENT_AT_C | ||
166 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
167 | #endif | ||
168 | TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); | ||
169 | } | ||
170 | * fix -end | ||
171 | */ | ||
172 | |||
173 | } | ||
174 | |||
175 | /* task_arrive - put a task into its reservation | ||
176 | * If the job was a ghost job, remove it from crit_entries[] | ||
177 | */ | ||
178 | static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | ||
179 | { | ||
180 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
181 | struct reservation* res; | ||
182 | struct reservation_client *client; | ||
183 | enum crit_level lv = get_task_crit_level(tsk); | ||
184 | |||
185 | res = tinfo->res_info.client.reservation; | ||
186 | client = &tinfo->res_info.client; | ||
187 | |||
188 | tinfo->has_departed = false; | ||
189 | |||
190 | switch(lv) { | ||
191 | case CRIT_LEVEL_A: | ||
192 | case CRIT_LEVEL_B: | ||
193 | TS_RELEASE_START; | ||
194 | break; | ||
195 | case CRIT_LEVEL_C: | ||
196 | TS_RELEASE_C_START; | ||
197 | break; | ||
198 | default: | ||
199 | break; | ||
200 | } | ||
201 | |||
202 | res->ops->client_arrives(res, client); | ||
203 | |||
204 | if (lv != NUM_CRIT_LEVELS) { | ||
205 | struct crit_entry *ce; | ||
206 | ce = &state->crit_entries[lv]; | ||
207 | /* if the currrent task is a ghost job, remove it */ | ||
208 | if (ce->running == tsk) | ||
209 | ce->running = NULL; | ||
210 | } | ||
211 | /* do we need this?? | ||
212 | if (resched_cpu[state->cpu]) | ||
213 | litmus_reschedule(state->cpu); | ||
214 | */ | ||
215 | |||
216 | switch(lv) { | ||
217 | case CRIT_LEVEL_A: | ||
218 | case CRIT_LEVEL_B: | ||
219 | TS_RELEASE_END; | ||
220 | break; | ||
221 | case CRIT_LEVEL_C: | ||
222 | TS_RELEASE_C_END; | ||
223 | break; | ||
224 | default: | ||
225 | break; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* get_lowest_prio_cpu - return the lowest priority cpu | ||
230 | * This will be used for scheduling level-C tasks. | ||
231 | * If all CPUs are running tasks which has | ||
232 | * higher priority than level C, return NO_CPU. | ||
233 | */ | ||
234 | static int get_lowest_prio_cpu(lt_t priority) | ||
235 | { | ||
236 | struct cpu_entry *ce; | ||
237 | int cpu, ret = NO_CPU; | ||
238 | lt_t latest_deadline = 0; | ||
239 | |||
240 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
241 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | ||
242 | if (!ce->will_schedule && !ce->scheduled) { | ||
243 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
244 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | ||
245 | return ce->cpu; | ||
246 | } else { | ||
247 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); | ||
248 | } | ||
249 | |||
250 | for_each_online_cpu(cpu) { | ||
251 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
252 | /* If a CPU will call schedule() in the near future, we don't | ||
253 | return that CPU. */ | ||
254 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | ||
255 | ce->scheduled ? (ce->scheduled)->comm : "null", | ||
256 | ce->scheduled ? (ce->scheduled)->pid : 0, | ||
257 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | ||
258 | if (!ce->will_schedule) { | ||
259 | if (!ce->scheduled) { | ||
260 | /* Idle cpu, return this. */ | ||
261 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
262 | TRACE("CPU %d is the lowest!\n", ce->cpu); | ||
263 | return ce->cpu; | ||
264 | } else if (ce->lv == CRIT_LEVEL_C && | ||
265 | ce->deadline > latest_deadline) { | ||
266 | latest_deadline = ce->deadline; | ||
267 | ret = ce->cpu; | ||
268 | } | ||
269 | } | ||
270 | } | ||
271 | |||
272 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
273 | |||
274 | if (priority >= latest_deadline) | ||
275 | ret = NO_CPU; | ||
276 | |||
277 | TRACE("CPU %d is the lowest!\n", ret); | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | /* mc2_update_time - update time for a given criticality level. | ||
283 | * caller must hold a proper lock | ||
284 | * (cpu_state lock or global lock) | ||
285 | */ | ||
286 | /* 9/24/2015 temporally not using | ||
287 | static void mc2_update_time(enum crit_level lv, | ||
288 | struct mc2_cpu_state *state, lt_t time) | ||
289 | { | ||
290 | int global_schedule_now; | ||
291 | |||
292 | if (lv < CRIT_LEVEL_C) | ||
293 | sup_update_time(&state->sup_env, time); | ||
294 | else if (lv == CRIT_LEVEL_C) { | ||
295 | global_schedule_now = gmp_update_time(&_global_env, time); | ||
296 | while (global_schedule_now--) { | ||
297 | int cpu = get_lowest_prio_cpu(0); | ||
298 | if (cpu != NO_CPU) { | ||
299 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
300 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
301 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
302 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
303 | litmus_reschedule(cpu); | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | else | ||
308 | TRACE("update_time(): Criticality level error!!!!\n"); | ||
309 | } | ||
310 | */ | ||
311 | |||
312 | /* NOTE: drops state->lock */ | ||
313 | /* mc2_update_timer_and_unlock - set a timer and g_timer and unlock | ||
314 | * Whenever res_env.current_time is updated, | ||
315 | * we check next_scheduler_update and set | ||
316 | * a timer. | ||
317 | * If there exist a global event which is | ||
318 | * not armed on any CPU and g_timer is not | ||
319 | * active, set a g_timer for that event. | ||
320 | */ | ||
321 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | ||
322 | { | ||
323 | int local, cpus; | ||
324 | lt_t update, now; | ||
325 | //enum crit_level lv = get_task_crit_level(state->scheduled); | ||
326 | struct next_timer_event *event, *next; | ||
327 | int reschedule[NR_CPUS]; | ||
328 | |||
329 | for (cpus = 0; cpus<NR_CPUS; cpus++) | ||
330 | reschedule[cpus] = 0; | ||
331 | |||
332 | update = state->sup_env.next_scheduler_update; | ||
333 | now = state->sup_env.env.current_time; | ||
334 | |||
335 | /* Be sure we're actually running on the right core, | ||
336 | * as pres_update_timer() is also called from pres_task_resume(), | ||
337 | * which might be called on any CPU when a thread resumes. | ||
338 | */ | ||
339 | local = local_cpu_state() == state; | ||
340 | |||
341 | raw_spin_lock(&_global_env.lock); | ||
342 | |||
343 | list_for_each_entry_safe(event, next, &_global_env.next_events, list) { | ||
344 | /* If the event time is already passed, we call schedule() on | ||
345 | the lowest priority cpu */ | ||
346 | if (event->next_update >= update) { | ||
347 | break; | ||
348 | } | ||
349 | |||
350 | if (event->next_update < litmus_clock()) { | ||
351 | if (event->timer_armed_on == NO_CPU) { | ||
352 | struct reservation *res = gmp_find_by_id(&_global_env, event->id); | ||
353 | int cpu = get_lowest_prio_cpu(res?res->priority:0); | ||
354 | TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); | ||
355 | list_del(&event->list); | ||
356 | kfree(event); | ||
357 | if (cpu != NO_CPU) { | ||
358 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
359 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
360 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
361 | if (cpu == local_cpu_state()->cpu) | ||
362 | litmus_reschedule_local(); | ||
363 | else | ||
364 | reschedule[cpu] = 1; | ||
365 | } | ||
366 | } | ||
367 | } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { | ||
368 | event->timer_armed_on = state->cpu; | ||
369 | update = event->next_update; | ||
370 | break; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | /* Must drop state lock before calling into hrtimer_start(), which | ||
375 | * may raise a softirq, which in turn may wake ksoftirqd. */ | ||
376 | raw_spin_unlock(&_global_env.lock); | ||
377 | raw_spin_unlock(&state->lock); | ||
378 | |||
379 | if (update <= now || reschedule[state->cpu]) { | ||
380 | //litmus_reschedule(state->cpu); | ||
381 | raw_spin_lock(&state->lock); | ||
382 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
383 | raw_spin_unlock(&state->lock); | ||
384 | reschedule[state->cpu] = 0; | ||
385 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | ||
386 | /* Reprogram only if not already set correctly. */ | ||
387 | if (!hrtimer_active(&state->timer) || | ||
388 | ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { | ||
389 | TRACE("canceling timer...at %llu\n", | ||
390 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
391 | hrtimer_cancel(&state->timer); | ||
392 | TRACE("setting scheduler timer for %llu\n", update); | ||
393 | /* We cannot use hrtimer_start() here because the | ||
394 | * wakeup flag must be set to zero. */ | ||
395 | __hrtimer_start_range_ns(&state->timer, | ||
396 | ns_to_ktime(update), | ||
397 | 0 /* timer coalescing slack */, | ||
398 | HRTIMER_MODE_ABS_PINNED, | ||
399 | 0 /* wakeup */); | ||
400 | } | ||
401 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { | ||
402 | /* Poke remote core only if timer needs to be set earlier than | ||
403 | * it is currently set. | ||
404 | */ | ||
405 | TRACE("mc2_update_timer for remote CPU %d (update=%llu, " | ||
406 | "active:%d, set:%llu)\n", | ||
407 | state->cpu, | ||
408 | update, | ||
409 | hrtimer_active(&state->timer), | ||
410 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
411 | if (!hrtimer_active(&state->timer) || | ||
412 | ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { | ||
413 | TRACE("poking CPU %d so that it can update its " | ||
414 | "scheduling timer (active:%d, set:%llu)\n", | ||
415 | state->cpu, | ||
416 | hrtimer_active(&state->timer), | ||
417 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
418 | //litmus_reschedule(state->cpu); | ||
419 | raw_spin_lock(&state->lock); | ||
420 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
421 | raw_spin_unlock(&state->lock); | ||
422 | reschedule[state->cpu] = 0; | ||
423 | } | ||
424 | } | ||
425 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
426 | if (reschedule[cpus]) { | ||
427 | //litmus_reschedule(cpus); | ||
428 | struct mc2_cpu_state *remote_state; | ||
429 | |||
430 | remote_state = cpu_state_for(cpus); | ||
431 | raw_spin_lock(&remote_state->lock); | ||
432 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | ||
433 | raw_spin_unlock(&remote_state->lock); | ||
434 | } | ||
435 | } | ||
436 | } | ||
437 | |||
438 | /* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs | ||
439 | * If the budget of a ghost is exhausted, | ||
440 | * clear is_ghost and reschedule | ||
441 | */ | ||
442 | /* | ||
443 | static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state) | ||
444 | { | ||
445 | int lv = 0; | ||
446 | struct crit_entry* ce; | ||
447 | struct reservation *res; | ||
448 | struct mc2_task_state *tinfo; | ||
449 | lt_t ret = ULLONG_MAX; | ||
450 | |||
451 | BUG_ON(!state); | ||
452 | |||
453 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | ||
454 | ce = &state->crit_entries[lv]; | ||
455 | if (ce->running != NULL) { | ||
456 | //printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0); | ||
457 | tinfo = get_mc2_state(ce->running); | ||
458 | if (!tinfo) | ||
459 | continue; | ||
460 | |||
461 | res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
462 | //BUG_ON(!res); | ||
463 | if (!res) { | ||
464 | printk(KERN_ALERT "mc2_update_ghost_state(): R%d not found!\n", tinfo->mc2_param.res_id); | ||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | TRACE("LV %d running id %d budget %llu\n", | ||
469 | lv, tinfo->mc2_param.res_id, res->cur_budget); | ||
470 | // If the budget is exhausted, clear is_ghost and reschedule | ||
471 | if (!res->cur_budget) { | ||
472 | struct sup_reservation_environment* sup_env = &state->sup_env; | ||
473 | |||
474 | TRACE("GHOST FINISH id %d at %llu\n", | ||
475 | tinfo->mc2_param.res_id, litmus_clock()); | ||
476 | ce->running = NULL; | ||
477 | res->is_ghost = NO_CPU; | ||
478 | |||
479 | if (lv < CRIT_LEVEL_C) { | ||
480 | res = list_first_entry_or_null( | ||
481 | &sup_env->active_reservations, | ||
482 | struct reservation, list); | ||
483 | if (res) | ||
484 | litmus_reschedule_local(); | ||
485 | } else if (lv == CRIT_LEVEL_C) { | ||
486 | res = list_first_entry_or_null( | ||
487 | &_global_env.active_reservations, | ||
488 | struct reservation, list); | ||
489 | if (res) | ||
490 | litmus_reschedule(state->cpu); | ||
491 | } | ||
492 | } else { | ||
493 | //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget); | ||
494 | //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
495 | if (ret > res->cur_budget) { | ||
496 | ret = res->cur_budget; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | } | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | */ | ||
505 | |||
506 | /* update_cpu_prio - Update cpu's priority | ||
507 | * When a cpu picks a new task, call this function | ||
508 | * to update cpu priorities. | ||
509 | */ | ||
510 | static void update_cpu_prio(struct mc2_cpu_state *state) | ||
511 | { | ||
512 | struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu]; | ||
513 | enum crit_level lv = get_task_crit_level(state->scheduled); | ||
514 | |||
515 | if (!state->scheduled) { | ||
516 | /* cpu is idle. */ | ||
517 | ce->scheduled = NULL; | ||
518 | ce->deadline = ULLONG_MAX; | ||
519 | ce->lv = NUM_CRIT_LEVELS; | ||
520 | } else if (lv == CRIT_LEVEL_C) { | ||
521 | ce->scheduled = state->scheduled; | ||
522 | ce->deadline = get_deadline(state->scheduled); | ||
523 | ce->lv = lv; | ||
524 | } else if (lv < CRIT_LEVEL_C) { | ||
525 | /* If cpu is running level A or B tasks, it is not eligible | ||
526 | to run level-C tasks */ | ||
527 | ce->scheduled = state->scheduled; | ||
528 | ce->deadline = 0; | ||
529 | ce->lv = lv; | ||
530 | } | ||
531 | }; | ||
532 | |||
533 | /* on_scheduling_timer - timer event for partitioned tasks | ||
534 | */ | ||
535 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | ||
536 | { | ||
537 | unsigned long flags; | ||
538 | enum hrtimer_restart restart = HRTIMER_NORESTART; | ||
539 | struct mc2_cpu_state *state; | ||
540 | lt_t update, now; | ||
541 | int global_schedule_now; | ||
542 | //lt_t remain_budget; // no ghost jobs | ||
543 | int reschedule[NR_CPUS]; | ||
544 | int cpus; | ||
545 | |||
546 | for (cpus = 0; cpus<NR_CPUS; cpus++) | ||
547 | reschedule[cpus] = 0; | ||
548 | |||
549 | state = container_of(timer, struct mc2_cpu_state, timer); | ||
550 | |||
551 | /* The scheduling timer should only fire on the local CPU, because | ||
552 | * otherwise deadlocks via timer_cancel() are possible. | ||
553 | * Note: this does not interfere with dedicated interrupt handling, as | ||
554 | * even under dedicated interrupt handling scheduling timers for | ||
555 | * budget enforcement must occur locally on each CPU. | ||
556 | */ | ||
557 | BUG_ON(state->cpu != raw_smp_processor_id()); | ||
558 | |||
559 | TS_ISR_START; | ||
560 | |||
561 | TRACE("Timer fired at %llu\n", litmus_clock()); | ||
562 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
563 | raw_spin_lock_irqsave(&state->lock, flags); | ||
564 | now = litmus_clock(); | ||
565 | sup_update_time(&state->sup_env, now); | ||
566 | |||
567 | /* 9/20/2015 fix - no ghost job | ||
568 | remain_budget = mc2_update_ghost_state(state); | ||
569 | */ | ||
570 | update = state->sup_env.next_scheduler_update; | ||
571 | now = state->sup_env.env.current_time; | ||
572 | |||
573 | /* 9/20/2015 fix - no ghost job | ||
574 | if (remain_budget != ULLONG_MAX && update > now + remain_budget) { | ||
575 | update = now + remain_budget; | ||
576 | } | ||
577 | |||
578 | TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d remain_budget:%llu\n", now, update, state->cpu, global_schedule_now, remain_budget); | ||
579 | */ | ||
580 | |||
581 | if (update <= now) { | ||
582 | litmus_reschedule_local(); | ||
583 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { | ||
584 | hrtimer_set_expires(timer, ns_to_ktime(update)); | ||
585 | restart = HRTIMER_RESTART; | ||
586 | } | ||
587 | |||
588 | raw_spin_lock(&_global_env.lock); | ||
589 | global_schedule_now = gmp_update_time(&_global_env, now); | ||
590 | |||
591 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); | ||
592 | |||
593 | /* Find the lowest cpu, and call reschedule */ | ||
594 | while (global_schedule_now--) { | ||
595 | int cpu = get_lowest_prio_cpu(0); | ||
596 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
597 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
598 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
599 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
600 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
601 | if (cpu == state->cpu && update > now) | ||
602 | litmus_reschedule_local(); | ||
603 | else | ||
604 | reschedule[cpu] = 1; | ||
605 | } | ||
606 | } | ||
607 | raw_spin_unlock(&_global_env.lock); | ||
608 | |||
609 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
610 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
611 | |||
612 | TS_ISR_END; | ||
613 | |||
614 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
615 | if (reschedule[cpus]) { | ||
616 | //litmus_reschedule(cpus); | ||
617 | struct mc2_cpu_state *remote_state; | ||
618 | |||
619 | remote_state = cpu_state_for(cpus); | ||
620 | raw_spin_lock(&remote_state->lock); | ||
621 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | ||
622 | raw_spin_unlock(&remote_state->lock); | ||
623 | } | ||
624 | } | ||
625 | |||
626 | |||
627 | return restart; | ||
628 | } | ||
629 | |||
630 | /* mc2_dispatch - Select the next task to schedule. | ||
631 | */ | ||
632 | struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state) | ||
633 | { | ||
634 | struct reservation *res, *next; | ||
635 | struct task_struct *tsk = NULL; | ||
636 | struct crit_entry *ce; | ||
637 | enum crit_level lv; | ||
638 | lt_t time_slice; | ||
639 | |||
640 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
641 | if (res->state == RESERVATION_ACTIVE) { | ||
642 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
643 | if (likely(tsk)) { | ||
644 | lv = get_task_crit_level(tsk); | ||
645 | if (lv == NUM_CRIT_LEVELS) { | ||
646 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
647 | return tsk; | ||
648 | } else { | ||
649 | ce = &state->crit_entries[lv]; | ||
650 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
651 | res->blocked_by_ghost = 0; | ||
652 | res->is_ghost = NO_CPU; | ||
653 | return tsk; | ||
654 | /* no ghost jobs | ||
655 | if (likely(!ce->running)) { | ||
656 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
657 | res->blocked_by_ghost = 0; | ||
658 | res->is_ghost = NO_CPU; | ||
659 | return tsk; | ||
660 | } else { | ||
661 | res->blocked_by_ghost = 1; | ||
662 | TRACE_TASK(ce->running, " is GHOST\n"); | ||
663 | } | ||
664 | */ | ||
665 | } | ||
666 | } | ||
667 | } | ||
668 | } | ||
669 | |||
670 | return NULL; | ||
671 | } | ||
672 | |||
673 | struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | ||
674 | { | ||
675 | struct reservation *res, *next; | ||
676 | struct task_struct *tsk = NULL; | ||
677 | //struct crit_entry *ce; | ||
678 | enum crit_level lv; | ||
679 | lt_t time_slice; | ||
680 | |||
681 | /* no eligible level A or B tasks exists */ | ||
682 | /* check the ghost job */ | ||
683 | /* | ||
684 | ce = &state->crit_entries[CRIT_LEVEL_C]; | ||
685 | if (ce->running) { | ||
686 | TRACE_TASK(ce->running," is GHOST\n"); | ||
687 | return NULL; | ||
688 | } | ||
689 | */ | ||
690 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | ||
691 | BUG_ON(!res); | ||
692 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | ||
693 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
694 | if (likely(tsk)) { | ||
695 | lv = get_task_crit_level(tsk); | ||
696 | if (lv == NUM_CRIT_LEVELS) { | ||
697 | #if BUDGET_ENFORCEMENT_AT_C | ||
698 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
699 | #endif | ||
700 | res->event_added = 1; | ||
701 | res->blocked_by_ghost = 0; | ||
702 | res->is_ghost = NO_CPU; | ||
703 | res->scheduled_on = state->cpu; | ||
704 | return tsk; | ||
705 | } else if (lv == CRIT_LEVEL_C) { | ||
706 | //ce = &state->crit_entries[lv]; | ||
707 | //if (likely(!ce->running)) { | ||
708 | #if BUDGET_ENFORCEMENT_AT_C | ||
709 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
710 | #endif | ||
711 | res->event_added = 1; | ||
712 | res->blocked_by_ghost = 0; | ||
713 | res->is_ghost = NO_CPU; | ||
714 | res->scheduled_on = state->cpu; | ||
715 | return tsk; | ||
716 | //} else { | ||
717 | // res->blocked_by_ghost = 1; | ||
718 | // TRACE_TASK(ce->running, " is GHOST\n"); | ||
719 | // return NULL; | ||
720 | //} | ||
721 | } else { | ||
722 | BUG(); | ||
723 | } | ||
724 | } | ||
725 | } | ||
726 | } | ||
727 | |||
728 | return NULL; | ||
729 | } | ||
730 | |||
731 | static inline void pre_schedule(struct task_struct *prev, int cpu) | ||
732 | { | ||
733 | TS_SCHED_A_START; | ||
734 | TS_SCHED_C_START; | ||
735 | |||
736 | if (!prev || !is_realtime(prev)) | ||
737 | return; | ||
738 | |||
739 | do_partition(CRIT_LEVEL_C, cpu); | ||
740 | } | ||
741 | |||
742 | static inline void post_schedule(struct task_struct *next, int cpu) | ||
743 | { | ||
744 | enum crit_level lev; | ||
745 | if ((!next) || !is_realtime(next)) | ||
746 | return; | ||
747 | |||
748 | lev = get_task_crit_level(next); | ||
749 | do_partition(lev, cpu); | ||
750 | |||
751 | switch(lev) { | ||
752 | case CRIT_LEVEL_A: | ||
753 | case CRIT_LEVEL_B: | ||
754 | TS_SCHED_A_END(next); | ||
755 | break; | ||
756 | case CRIT_LEVEL_C: | ||
757 | TS_SCHED_C_END(next); | ||
758 | break; | ||
759 | default: | ||
760 | break; | ||
761 | } | ||
762 | |||
763 | } | ||
764 | |||
765 | /* mc2_schedule - main scheduler function. pick the next task to run | ||
766 | */ | ||
767 | static struct task_struct* mc2_schedule(struct task_struct * prev) | ||
768 | { | ||
769 | /* next == NULL means "schedule background work". */ | ||
770 | lt_t now; | ||
771 | struct mc2_cpu_state *state = local_cpu_state(); | ||
772 | |||
773 | pre_schedule(prev, state->cpu); | ||
774 | |||
775 | /* 9/20/2015 fix | ||
776 | raw_spin_lock(&_global_env.lock); | ||
777 | */ | ||
778 | raw_spin_lock(&state->lock); | ||
779 | |||
780 | //BUG_ON(state->scheduled && state->scheduled != prev); | ||
781 | //BUG_ON(state->scheduled && !is_realtime(prev)); | ||
782 | if (state->scheduled && state->scheduled != prev) | ||
783 | ; //printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | ||
784 | if (state->scheduled && !is_realtime(prev)) | ||
785 | ; //printk(KERN_ALERT "BUG2!!!!!!!! \n"); | ||
786 | |||
787 | /* update time */ | ||
788 | state->sup_env.will_schedule = true; | ||
789 | |||
790 | now = litmus_clock(); | ||
791 | sup_update_time(&state->sup_env, now); | ||
792 | /* 9/20/2015 fix | ||
793 | gmp_update_time(&_global_env, now); | ||
794 | */ | ||
795 | /* 9/20/2015 fix | ||
796 | mc2_update_ghost_state(state); | ||
797 | */ | ||
798 | |||
799 | /* remove task from reservation if it blocks */ | ||
800 | if (is_realtime(prev) && !is_running(prev)) { | ||
801 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
802 | raw_spin_lock(&_global_env.lock); | ||
803 | task_departs(prev, is_completed(prev)); | ||
804 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
805 | raw_spin_unlock(&_global_env.lock); | ||
806 | } | ||
807 | |||
808 | /* figure out what to schedule next */ | ||
809 | state->scheduled = mc2_dispatch(&state->sup_env, state); | ||
810 | /* if (state->scheduled && is_realtime(state->scheduled)) | ||
811 | TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n"); | ||
812 | */ | ||
813 | if (!state->scheduled) { | ||
814 | raw_spin_lock(&_global_env.lock); | ||
815 | gmp_update_time(&_global_env, now); | ||
816 | state->scheduled = mc2_global_dispatch(state); | ||
817 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
818 | update_cpu_prio(state); | ||
819 | raw_spin_unlock(&_global_env.lock); | ||
820 | } else { | ||
821 | raw_spin_lock(&_global_env.lock); | ||
822 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
823 | update_cpu_prio(state); | ||
824 | raw_spin_unlock(&_global_env.lock); | ||
825 | } | ||
826 | |||
827 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
828 | //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
829 | //update_cpu_prio(state); | ||
830 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
831 | |||
832 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | ||
833 | sched_state_task_picked(); | ||
834 | |||
835 | /* program scheduler timer */ | ||
836 | state->sup_env.will_schedule = false; | ||
837 | |||
838 | /* NOTE: drops state->lock */ | ||
839 | mc2_update_timer_and_unlock(state); | ||
840 | |||
841 | if (prev != state->scheduled && is_realtime(prev)) { | ||
842 | struct mc2_task_state* tinfo = get_mc2_state(prev); | ||
843 | struct reservation* res = tinfo->res_info.client.reservation; | ||
844 | TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); | ||
845 | res->scheduled_on = NO_CPU; | ||
846 | TRACE_TASK(prev, "descheduled.\n"); | ||
847 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | ||
848 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | ||
849 | int cpu; | ||
850 | raw_spin_lock(&_global_env.lock); | ||
851 | cpu = get_lowest_prio_cpu(res?res->priority:0); | ||
852 | //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | ||
853 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
854 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
855 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
856 | resched_cpu[cpu] = 1; | ||
857 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
858 | } | ||
859 | raw_spin_unlock(&_global_env.lock); | ||
860 | } | ||
861 | } | ||
862 | if (state->scheduled) { | ||
863 | TRACE_TASK(state->scheduled, "scheduled.\n"); | ||
864 | } | ||
865 | |||
866 | post_schedule(state->scheduled, state->cpu); | ||
867 | |||
868 | return state->scheduled; | ||
869 | } | ||
870 | |||
871 | static void resume_legacy_task_model_updates(struct task_struct *tsk) | ||
872 | { | ||
873 | lt_t now; | ||
874 | if (is_sporadic(tsk)) { | ||
875 | /* If this sporadic task was gone for a "long" time and woke up past | ||
876 | * its deadline, then give it a new budget by triggering a job | ||
877 | * release. This is purely cosmetic and has no effect on the | ||
878 | * MC2 scheduler. */ | ||
879 | |||
880 | now = litmus_clock(); | ||
881 | if (is_tardy(tsk, now)) { | ||
882 | //release_at(tsk, now); | ||
883 | //sched_trace_task_release(tsk); | ||
884 | } | ||
885 | } | ||
886 | } | ||
887 | |||
888 | /* mc2_task_resume - Called when the state of tsk changes back to | ||
889 | * TASK_RUNNING. We need to requeue the task. | ||
890 | */ | ||
891 | static void mc2_task_resume(struct task_struct *tsk) | ||
892 | { | ||
893 | unsigned long flags; | ||
894 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
895 | struct mc2_cpu_state *state; | ||
896 | |||
897 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | ||
898 | |||
899 | local_irq_save(flags); | ||
900 | if (tinfo->cpu != -1) | ||
901 | state = cpu_state_for(tinfo->cpu); | ||
902 | else | ||
903 | state = local_cpu_state(); | ||
904 | |||
905 | /* 9/20/2015 fix | ||
906 | raw_spin_lock(&_global_env.lock); | ||
907 | */ | ||
908 | /* Requeue only if self-suspension was already processed. */ | ||
909 | if (tinfo->has_departed) | ||
910 | { | ||
911 | /* We don't want to consider jobs before synchronous releases */ | ||
912 | if (tsk_rt(tsk)->job_params.job_no > 5) { | ||
913 | switch(get_task_crit_level(tsk)) { | ||
914 | case CRIT_LEVEL_A: | ||
915 | TS_RELEASE_LATENCY_A(get_release(tsk)); | ||
916 | break; | ||
917 | case CRIT_LEVEL_B: | ||
918 | TS_RELEASE_LATENCY_B(get_release(tsk)); | ||
919 | break; | ||
920 | case CRIT_LEVEL_C: | ||
921 | TS_RELEASE_LATENCY_C(get_release(tsk)); | ||
922 | break; | ||
923 | default: | ||
924 | break; | ||
925 | } | ||
926 | } | ||
927 | |||
928 | raw_spin_lock(&state->lock); | ||
929 | /* Assumption: litmus_clock() is synchronized across cores, | ||
930 | * since we might not actually be executing on tinfo->cpu | ||
931 | * at the moment. */ | ||
932 | if (tinfo->cpu != -1) { | ||
933 | sup_update_time(&state->sup_env, litmus_clock()); | ||
934 | task_arrives(state, tsk); | ||
935 | } else { | ||
936 | raw_spin_lock(&_global_env.lock); | ||
937 | gmp_update_time(&_global_env, litmus_clock()); | ||
938 | task_arrives(state, tsk); | ||
939 | raw_spin_unlock(&_global_env.lock); | ||
940 | } | ||
941 | |||
942 | /* 9/20/2015 fix | ||
943 | mc2_update_ghost_state(state); | ||
944 | */ | ||
945 | //task_arrives(state, tsk); | ||
946 | /* NOTE: drops state->lock */ | ||
947 | TRACE_TASK(tsk, "mc2_resume()\n"); | ||
948 | mc2_update_timer_and_unlock(state); | ||
949 | } else { | ||
950 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | ||
951 | //raw_spin_unlock(&_global_env.lock); | ||
952 | } | ||
953 | |||
954 | local_irq_restore(flags); | ||
955 | |||
956 | //gmp_free_passed_event(); | ||
957 | resume_legacy_task_model_updates(tsk); | ||
958 | } | ||
959 | |||
960 | /* mc2_complete_job - syscall backend for job completions | ||
961 | */ | ||
962 | static long mc2_complete_job(void) | ||
963 | { | ||
964 | ktime_t next_release; | ||
965 | long err; | ||
966 | |||
967 | tsk_rt(current)->completed = 1; | ||
968 | |||
969 | /* If this the first job instance, we need to reset replenish | ||
970 | time to the next release time */ | ||
971 | if (tsk_rt(current)->sporadic_release) { | ||
972 | struct mc2_cpu_state *state; | ||
973 | struct reservation_environment *env; | ||
974 | struct mc2_task_state *tinfo; | ||
975 | struct reservation *res = NULL; | ||
976 | unsigned long flags; | ||
977 | enum crit_level lv; | ||
978 | |||
979 | preempt_disable(); | ||
980 | local_irq_save(flags); | ||
981 | |||
982 | tinfo = get_mc2_state(current); | ||
983 | lv = get_task_crit_level(current); | ||
984 | |||
985 | if (lv < CRIT_LEVEL_C) { | ||
986 | state = cpu_state_for(tinfo->cpu); | ||
987 | raw_spin_lock(&state->lock); | ||
988 | env = &(state->sup_env.env); | ||
989 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
990 | env->time_zero = tsk_rt(current)->sporadic_release_time; | ||
991 | } | ||
992 | else if (lv == CRIT_LEVEL_C) { | ||
993 | state = local_cpu_state(); | ||
994 | raw_spin_lock(&state->lock); | ||
995 | raw_spin_lock(&_global_env.lock); | ||
996 | res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); | ||
997 | _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; | ||
998 | } | ||
999 | else | ||
1000 | BUG(); | ||
1001 | |||
1002 | /* set next_replenishtime to synchronous release time */ | ||
1003 | BUG_ON(!res); | ||
1004 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | ||
1005 | /* | ||
1006 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | ||
1007 | struct table_driven_reservation *tdres; | ||
1008 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1009 | tdres->next_interval = 0; | ||
1010 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | ||
1011 | res->next_replenishment += tdres->intervals[0].start; | ||
1012 | } | ||
1013 | */ | ||
1014 | res->cur_budget = 0; | ||
1015 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
1016 | |||
1017 | //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | ||
1018 | |||
1019 | //if (lv < CRIT_LEVEL_C) | ||
1020 | // raw_spin_unlock(&state->lock); | ||
1021 | //else | ||
1022 | if (lv == CRIT_LEVEL_C) | ||
1023 | raw_spin_unlock(&_global_env.lock); | ||
1024 | |||
1025 | raw_spin_unlock(&state->lock); | ||
1026 | local_irq_restore(flags); | ||
1027 | preempt_enable(); | ||
1028 | } | ||
1029 | |||
1030 | sched_trace_task_completion(current, 0); | ||
1031 | /* update the next release time and deadline */ | ||
1032 | prepare_for_next_period(current); | ||
1033 | sched_trace_task_release(current); | ||
1034 | next_release = ns_to_ktime(get_release(current)); | ||
1035 | preempt_disable(); | ||
1036 | TRACE_CUR("next_release=%llu\n", get_release(current)); | ||
1037 | if (get_release(current) > litmus_clock()) { | ||
1038 | /* sleep until next_release */ | ||
1039 | set_current_state(TASK_INTERRUPTIBLE); | ||
1040 | preempt_enable_no_resched(); | ||
1041 | err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); | ||
1042 | } else { | ||
1043 | /* release the next job immediately */ | ||
1044 | err = 0; | ||
1045 | TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock()); | ||
1046 | preempt_enable(); | ||
1047 | } | ||
1048 | |||
1049 | TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); | ||
1050 | |||
1051 | return err; | ||
1052 | } | ||
1053 | |||
1054 | /* mc2_admit_task - Setup mc2 task parameters | ||
1055 | */ | ||
1056 | static long mc2_admit_task(struct task_struct *tsk) | ||
1057 | { | ||
1058 | long err = -ESRCH; | ||
1059 | unsigned long flags; | ||
1060 | struct reservation *res; | ||
1061 | struct mc2_cpu_state *state; | ||
1062 | struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); | ||
1063 | struct mc2_task *mp = tsk_rt(tsk)->mc2_data; | ||
1064 | enum crit_level lv; | ||
1065 | |||
1066 | if (!tinfo) | ||
1067 | return -ENOMEM; | ||
1068 | |||
1069 | if (!mp) { | ||
1070 | printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); | ||
1071 | return err; | ||
1072 | } | ||
1073 | |||
1074 | lv = mp->crit; | ||
1075 | preempt_disable(); | ||
1076 | |||
1077 | if (lv < CRIT_LEVEL_C) { | ||
1078 | state = cpu_state_for(task_cpu(tsk)); | ||
1079 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1080 | |||
1081 | res = sup_find_by_id(&state->sup_env, mp->res_id); | ||
1082 | |||
1083 | /* found the appropriate reservation */ | ||
1084 | if (res) { | ||
1085 | TRACE_TASK(tsk, "SUP FOUND RES ID\n"); | ||
1086 | tinfo->mc2_param.crit = mp->crit; | ||
1087 | tinfo->mc2_param.res_id = mp->res_id; | ||
1088 | |||
1089 | /* initial values */ | ||
1090 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
1091 | tinfo->cpu = task_cpu(tsk); | ||
1092 | tinfo->has_departed = true; | ||
1093 | tsk_rt(tsk)->plugin_state = tinfo; | ||
1094 | |||
1095 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
1096 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
1097 | } | ||
1098 | |||
1099 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1100 | } else if (lv == CRIT_LEVEL_C) { | ||
1101 | state = local_cpu_state(); | ||
1102 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1103 | raw_spin_lock(&_global_env.lock); | ||
1104 | //state = local_cpu_state(); | ||
1105 | |||
1106 | //raw_spin_lock(&state->lock); | ||
1107 | |||
1108 | res = gmp_find_by_id(&_global_env, mp->res_id); | ||
1109 | |||
1110 | /* found the appropriate reservation (or vCPU) */ | ||
1111 | if (res) { | ||
1112 | TRACE_TASK(tsk, "GMP FOUND RES ID\n"); | ||
1113 | tinfo->mc2_param.crit = mp->crit; | ||
1114 | tinfo->mc2_param.res_id = mp->res_id; | ||
1115 | |||
1116 | /* initial values */ | ||
1117 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
1118 | tinfo->cpu = -1; | ||
1119 | tinfo->has_departed = true; | ||
1120 | tsk_rt(tsk)->plugin_state = tinfo; | ||
1121 | |||
1122 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
1123 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
1124 | } | ||
1125 | |||
1126 | raw_spin_unlock(&_global_env.lock); | ||
1127 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1128 | } | ||
1129 | |||
1130 | preempt_enable(); | ||
1131 | |||
1132 | if (err) | ||
1133 | kfree(tinfo); | ||
1134 | |||
1135 | return err; | ||
1136 | } | ||
1137 | |||
1138 | /* mc2_task_new - A new real-time job is arrived. Release the next job | ||
1139 | * at the next reservation replenish time | ||
1140 | */ | ||
1141 | static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | ||
1142 | int is_running) | ||
1143 | { | ||
1144 | unsigned long flags; | ||
1145 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
1146 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); | ||
1147 | struct reservation *res; | ||
1148 | enum crit_level lv = get_task_crit_level(tsk); | ||
1149 | lt_t release = 0; | ||
1150 | |||
1151 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", | ||
1152 | litmus_clock(), on_runqueue, is_running); | ||
1153 | |||
1154 | if (tinfo->cpu == -1) | ||
1155 | state = local_cpu_state(); | ||
1156 | else | ||
1157 | state = cpu_state_for(tinfo->cpu); | ||
1158 | |||
1159 | local_irq_save(flags); | ||
1160 | |||
1161 | /* acquire the lock protecting the state and disable interrupts */ | ||
1162 | //raw_spin_lock(&_global_env.lock); | ||
1163 | //raw_spin_lock(&state->lock); | ||
1164 | if (is_running) { | ||
1165 | state->scheduled = tsk; | ||
1166 | /* make sure this task should actually be running */ | ||
1167 | litmus_reschedule_local(); | ||
1168 | } | ||
1169 | |||
1170 | raw_spin_lock(&state->lock); | ||
1171 | |||
1172 | if (lv == CRIT_LEVEL_C) { | ||
1173 | raw_spin_lock(&_global_env.lock); | ||
1174 | res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); | ||
1175 | } | ||
1176 | else { | ||
1177 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | ||
1178 | } | ||
1179 | //res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
1180 | release = res->next_replenishment; | ||
1181 | |||
1182 | if (on_runqueue || is_running) { | ||
1183 | /* Assumption: litmus_clock() is synchronized across cores | ||
1184 | * [see comment in pres_task_resume()] */ | ||
1185 | if (lv == CRIT_LEVEL_C) { | ||
1186 | gmp_update_time(&_global_env, litmus_clock()); | ||
1187 | //raw_spin_unlock(&_global_env.lock); | ||
1188 | } | ||
1189 | else | ||
1190 | sup_update_time(&state->sup_env, litmus_clock()); | ||
1191 | //mc2_update_time(lv, state, litmus_clock()); | ||
1192 | /* 9/20/2015 fix | ||
1193 | mc2_update_ghost_state(state); | ||
1194 | */ | ||
1195 | task_arrives(state, tsk); | ||
1196 | if (lv == CRIT_LEVEL_C) | ||
1197 | raw_spin_unlock(&_global_env.lock); | ||
1198 | /* NOTE: drops state->lock */ | ||
1199 | TRACE("mc2_new()\n"); | ||
1200 | |||
1201 | mc2_update_timer_and_unlock(state); | ||
1202 | } else { | ||
1203 | if (lv == CRIT_LEVEL_C) | ||
1204 | raw_spin_unlock(&_global_env.lock); | ||
1205 | raw_spin_unlock(&state->lock); | ||
1206 | //raw_spin_unlock(&_global_env.lock); | ||
1207 | } | ||
1208 | local_irq_restore(flags); | ||
1209 | |||
1210 | if (!release) { | ||
1211 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); | ||
1212 | //release_at(tsk, release); | ||
1213 | } | ||
1214 | else | ||
1215 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | ||
1216 | } | ||
1217 | |||
1218 | /* mc2_reservation_destroy - reservation_destroy system call backend | ||
1219 | */ | ||
1220 | static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | ||
1221 | { | ||
1222 | long ret = -EINVAL; | ||
1223 | struct mc2_cpu_state *state; | ||
1224 | struct reservation *res = NULL, *next; | ||
1225 | struct sup_reservation_environment *sup_env; | ||
1226 | int found = 0; | ||
1227 | //enum crit_level lv = get_task_crit_level(current); | ||
1228 | unsigned long flags; | ||
1229 | |||
1230 | if (cpu == -1) { | ||
1231 | /* if the reservation is global reservation */ | ||
1232 | local_irq_save(flags); | ||
1233 | //state = local_cpu_state(); | ||
1234 | raw_spin_lock(&_global_env.lock); | ||
1235 | //raw_spin_lock(&state->lock); | ||
1236 | |||
1237 | list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { | ||
1238 | if (res->id == reservation_id) { | ||
1239 | list_del(&res->list); | ||
1240 | kfree(res); | ||
1241 | found = 1; | ||
1242 | ret = 0; | ||
1243 | } | ||
1244 | } | ||
1245 | if (!found) { | ||
1246 | list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) { | ||
1247 | if (res->id == reservation_id) { | ||
1248 | list_del(&res->list); | ||
1249 | kfree(res); | ||
1250 | found = 1; | ||
1251 | ret = 0; | ||
1252 | } | ||
1253 | } | ||
1254 | } | ||
1255 | if (!found) { | ||
1256 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | ||
1257 | if (res->id == reservation_id) { | ||
1258 | list_del(&res->list); | ||
1259 | kfree(res); | ||
1260 | found = 1; | ||
1261 | ret = 0; | ||
1262 | } | ||
1263 | } | ||
1264 | } | ||
1265 | |||
1266 | //raw_spin_unlock(&state->lock); | ||
1267 | raw_spin_unlock(&_global_env.lock); | ||
1268 | local_irq_restore(flags); | ||
1269 | } else { | ||
1270 | /* if the reservation is partitioned reservation */ | ||
1271 | state = cpu_state_for(cpu); | ||
1272 | local_irq_save(flags); | ||
1273 | raw_spin_lock(&state->lock); | ||
1274 | |||
1275 | // res = sup_find_by_id(&state->sup_env, reservation_id); | ||
1276 | sup_env = &state->sup_env; | ||
1277 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | ||
1278 | if (res->id == reservation_id) { | ||
1279 | /* | ||
1280 | if (lv == CRIT_LEVEL_A) { | ||
1281 | struct table_driven_reservation *tdres; | ||
1282 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1283 | kfree(tdres->intervals); | ||
1284 | } | ||
1285 | */ | ||
1286 | list_del(&res->list); | ||
1287 | kfree(res); | ||
1288 | found = 1; | ||
1289 | ret = 0; | ||
1290 | } | ||
1291 | } | ||
1292 | if (!found) { | ||
1293 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { | ||
1294 | if (res->id == reservation_id) { | ||
1295 | /* if (lv == CRIT_LEVEL_A) { | ||
1296 | struct table_driven_reservation *tdres; | ||
1297 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1298 | kfree(tdres->intervals); | ||
1299 | } | ||
1300 | */ | ||
1301 | list_del(&res->list); | ||
1302 | kfree(res); | ||
1303 | found = 1; | ||
1304 | ret = 0; | ||
1305 | } | ||
1306 | } | ||
1307 | } | ||
1308 | if (!found) { | ||
1309 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
1310 | if (res->id == reservation_id) { | ||
1311 | /* if (lv == CRIT_LEVEL_A) { | ||
1312 | struct table_driven_reservation *tdres; | ||
1313 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1314 | kfree(tdres->intervals); | ||
1315 | } | ||
1316 | */ | ||
1317 | list_del(&res->list); | ||
1318 | kfree(res); | ||
1319 | found = 1; | ||
1320 | ret = 0; | ||
1321 | } | ||
1322 | } | ||
1323 | } | ||
1324 | |||
1325 | raw_spin_unlock(&state->lock); | ||
1326 | local_irq_restore(flags); | ||
1327 | } | ||
1328 | |||
1329 | TRACE("Rerservation destroyed ret = %d\n", ret); | ||
1330 | return ret; | ||
1331 | } | ||
1332 | |||
1333 | /* mc2_task_exit - Task became a normal task (not real-time task) | ||
1334 | */ | ||
1335 | static void mc2_task_exit(struct task_struct *tsk) | ||
1336 | { | ||
1337 | unsigned long flags; | ||
1338 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
1339 | struct mc2_cpu_state *state; | ||
1340 | enum crit_level lv = tinfo->mc2_param.crit; | ||
1341 | struct crit_entry* ce; | ||
1342 | int cpu; | ||
1343 | |||
1344 | local_irq_save(flags); | ||
1345 | if (tinfo->cpu != -1) | ||
1346 | state = cpu_state_for(tinfo->cpu); | ||
1347 | else | ||
1348 | state = local_cpu_state(); | ||
1349 | |||
1350 | raw_spin_lock(&state->lock); | ||
1351 | |||
1352 | if (state->scheduled == tsk) | ||
1353 | state->scheduled = NULL; | ||
1354 | |||
1355 | ce = &state->crit_entries[lv]; | ||
1356 | if (ce->running == tsk) | ||
1357 | ce->running = NULL; | ||
1358 | |||
1359 | /* remove from queues */ | ||
1360 | if (is_running(tsk)) { | ||
1361 | /* Assumption: litmus_clock() is synchronized across cores | ||
1362 | * [see comment in pres_task_resume()] */ | ||
1363 | |||
1364 | /* update both global and partitioned */ | ||
1365 | if (lv < CRIT_LEVEL_C) { | ||
1366 | sup_update_time(&state->sup_env, litmus_clock()); | ||
1367 | } | ||
1368 | else if (lv == CRIT_LEVEL_C) { | ||
1369 | raw_spin_lock(&_global_env.lock); | ||
1370 | gmp_update_time(&_global_env, litmus_clock()); | ||
1371 | //raw_spin_unlock(&_global_env.lock); | ||
1372 | } | ||
1373 | /* 9/20/2015 fix | ||
1374 | mc2_update_ghost_state(state); | ||
1375 | */ | ||
1376 | task_departs(tsk, 0); | ||
1377 | if (lv == CRIT_LEVEL_C) | ||
1378 | raw_spin_unlock(&_global_env.lock); | ||
1379 | |||
1380 | /* NOTE: drops state->lock */ | ||
1381 | TRACE("mc2_exit()\n"); | ||
1382 | |||
1383 | mc2_update_timer_and_unlock(state); | ||
1384 | } else { | ||
1385 | raw_spin_unlock(&state->lock); | ||
1386 | |||
1387 | } | ||
1388 | |||
1389 | if (lv == CRIT_LEVEL_C) { | ||
1390 | for_each_online_cpu(cpu) { | ||
1391 | state = cpu_state_for(cpu); | ||
1392 | if (state == local_cpu_state()) | ||
1393 | continue; | ||
1394 | raw_spin_lock(&state->lock); | ||
1395 | |||
1396 | if (state->scheduled == tsk) | ||
1397 | state->scheduled = NULL; | ||
1398 | |||
1399 | ce = &state->crit_entries[lv]; | ||
1400 | if (ce->running == tsk) | ||
1401 | ce->running = NULL; | ||
1402 | |||
1403 | raw_spin_unlock(&state->lock); | ||
1404 | } | ||
1405 | } | ||
1406 | |||
1407 | local_irq_restore(flags); | ||
1408 | |||
1409 | kfree(tsk_rt(tsk)->plugin_state); | ||
1410 | tsk_rt(tsk)->plugin_state = NULL; | ||
1411 | kfree(tsk_rt(tsk)->mc2_data); | ||
1412 | tsk_rt(tsk)->mc2_data = NULL; | ||
1413 | } | ||
1414 | |||
1415 | /* create_polling_reservation - create a new polling reservation | ||
1416 | */ | ||
1417 | static long create_polling_reservation( | ||
1418 | int res_type, | ||
1419 | struct reservation_config *config) | ||
1420 | { | ||
1421 | struct mc2_cpu_state *state; | ||
1422 | struct reservation* res; | ||
1423 | struct polling_reservation *pres; | ||
1424 | unsigned long flags; | ||
1425 | int use_edf = config->priority == LITMUS_NO_PRIORITY; | ||
1426 | int periodic = res_type == PERIODIC_POLLING; | ||
1427 | long err = -EINVAL; | ||
1428 | |||
1429 | /* sanity checks */ | ||
1430 | if (config->polling_params.budget > | ||
1431 | config->polling_params.period) { | ||
1432 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1433 | "budget > period\n", config->id); | ||
1434 | return -EINVAL; | ||
1435 | } | ||
1436 | if (config->polling_params.budget > | ||
1437 | config->polling_params.relative_deadline | ||
1438 | && config->polling_params.relative_deadline) { | ||
1439 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1440 | "budget > deadline\n", config->id); | ||
1441 | return -EINVAL; | ||
1442 | } | ||
1443 | if (config->polling_params.offset > | ||
1444 | config->polling_params.period) { | ||
1445 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1446 | "offset > period\n", config->id); | ||
1447 | return -EINVAL; | ||
1448 | } | ||
1449 | |||
1450 | /* Allocate before we grab a spin lock. | ||
1451 | * Todo: would be nice to use a core-local allocation. | ||
1452 | */ | ||
1453 | pres = kzalloc(sizeof(*pres), GFP_KERNEL); | ||
1454 | if (!pres) | ||
1455 | return -ENOMEM; | ||
1456 | |||
1457 | if (config->cpu != -1) { | ||
1458 | |||
1459 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
1460 | state = cpu_state_for(config->cpu); | ||
1461 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1462 | |||
1463 | res = sup_find_by_id(&state->sup_env, config->id); | ||
1464 | if (!res) { | ||
1465 | polling_reservation_init(pres, use_edf, periodic, | ||
1466 | config->polling_params.budget, | ||
1467 | config->polling_params.period, | ||
1468 | config->polling_params.relative_deadline, | ||
1469 | config->polling_params.offset); | ||
1470 | pres->res.id = config->id; | ||
1471 | pres->res.blocked_by_ghost = 0; | ||
1472 | pres->res.is_ghost = NO_CPU; | ||
1473 | if (!use_edf) | ||
1474 | pres->res.priority = config->priority; | ||
1475 | sup_add_new_reservation(&state->sup_env, &pres->res); | ||
1476 | err = config->id; | ||
1477 | } else { | ||
1478 | err = -EEXIST; | ||
1479 | } | ||
1480 | |||
1481 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1482 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
1483 | |||
1484 | } else { | ||
1485 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
1486 | |||
1487 | res = gmp_find_by_id(&_global_env, config->id); | ||
1488 | if (!res) { | ||
1489 | polling_reservation_init(pres, use_edf, periodic, | ||
1490 | config->polling_params.budget, | ||
1491 | config->polling_params.period, | ||
1492 | config->polling_params.relative_deadline, | ||
1493 | config->polling_params.offset); | ||
1494 | pres->res.id = config->id; | ||
1495 | pres->res.blocked_by_ghost = 0; | ||
1496 | pres->res.scheduled_on = NO_CPU; | ||
1497 | pres->res.is_ghost = NO_CPU; | ||
1498 | if (!use_edf) | ||
1499 | pres->res.priority = config->priority; | ||
1500 | gmp_add_new_reservation(&_global_env, &pres->res); | ||
1501 | err = config->id; | ||
1502 | } else { | ||
1503 | err = -EEXIST; | ||
1504 | } | ||
1505 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
1506 | } | ||
1507 | |||
1508 | if (err < 0) | ||
1509 | kfree(pres); | ||
1510 | |||
1511 | return err; | ||
1512 | } | ||
1513 | |||
1514 | #define MAX_INTERVALS 1024 | ||
1515 | |||
1516 | /* create_table_driven_reservation - create a table_driven reservation | ||
1517 | */ | ||
1518 | static long create_table_driven_reservation( | ||
1519 | struct reservation_config *config) | ||
1520 | { | ||
1521 | struct mc2_cpu_state *state; | ||
1522 | struct reservation* res; | ||
1523 | struct table_driven_reservation *td_res = NULL; | ||
1524 | struct lt_interval *slots = NULL; | ||
1525 | size_t slots_size; | ||
1526 | unsigned int i, num_slots; | ||
1527 | unsigned long flags; | ||
1528 | long err = -EINVAL; | ||
1529 | |||
1530 | |||
1531 | if (!config->table_driven_params.num_intervals) { | ||
1532 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1533 | "no intervals\n", config->id); | ||
1534 | return -EINVAL; | ||
1535 | } | ||
1536 | |||
1537 | if (config->table_driven_params.num_intervals > MAX_INTERVALS) { | ||
1538 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1539 | "too many intervals (max: %d)\n", config->id, MAX_INTERVALS); | ||
1540 | return -EINVAL; | ||
1541 | } | ||
1542 | |||
1543 | num_slots = config->table_driven_params.num_intervals; | ||
1544 | slots_size = sizeof(slots[0]) * num_slots; | ||
1545 | slots = kzalloc(slots_size, GFP_KERNEL); | ||
1546 | if (!slots) | ||
1547 | return -ENOMEM; | ||
1548 | |||
1549 | td_res = kzalloc(sizeof(*td_res), GFP_KERNEL); | ||
1550 | if (!td_res) | ||
1551 | err = -ENOMEM; | ||
1552 | else | ||
1553 | err = copy_from_user(slots, | ||
1554 | config->table_driven_params.intervals, slots_size); | ||
1555 | |||
1556 | if (!err) { | ||
1557 | /* sanity checks */ | ||
1558 | for (i = 0; !err && i < num_slots; i++) | ||
1559 | if (slots[i].end <= slots[i].start) { | ||
1560 | printk(KERN_ERR | ||
1561 | "invalid table-driven reservation (%u): " | ||
1562 | "invalid interval %u => [%llu, %llu]\n", | ||
1563 | config->id, i, | ||
1564 | slots[i].start, slots[i].end); | ||
1565 | err = -EINVAL; | ||
1566 | } | ||
1567 | |||
1568 | for (i = 0; !err && i + 1 < num_slots; i++) | ||
1569 | if (slots[i + 1].start <= slots[i].end) { | ||
1570 | printk(KERN_ERR | ||
1571 | "invalid table-driven reservation (%u): " | ||
1572 | "overlapping intervals %u, %u\n", | ||
1573 | config->id, i, i + 1); | ||
1574 | err = -EINVAL; | ||
1575 | } | ||
1576 | |||
1577 | if (slots[num_slots - 1].end > | ||
1578 | config->table_driven_params.major_cycle_length) { | ||
1579 | printk(KERN_ERR | ||
1580 | "invalid table-driven reservation (%u): last " | ||
1581 | "interval ends past major cycle %llu > %llu\n", | ||
1582 | config->id, | ||
1583 | slots[num_slots - 1].end, | ||
1584 | config->table_driven_params.major_cycle_length); | ||
1585 | err = -EINVAL; | ||
1586 | } | ||
1587 | } | ||
1588 | |||
1589 | if (!err) { | ||
1590 | state = cpu_state_for(config->cpu); | ||
1591 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1592 | |||
1593 | res = sup_find_by_id(&state->sup_env, config->id); | ||
1594 | if (!res) { | ||
1595 | table_driven_reservation_init(td_res, | ||
1596 | config->table_driven_params.major_cycle_length, | ||
1597 | slots, num_slots); | ||
1598 | td_res->res.id = config->id; | ||
1599 | td_res->res.priority = config->priority; | ||
1600 | td_res->res.blocked_by_ghost = 0; | ||
1601 | sup_add_new_reservation(&state->sup_env, &td_res->res); | ||
1602 | err = config->id; | ||
1603 | } else { | ||
1604 | err = -EEXIST; | ||
1605 | } | ||
1606 | |||
1607 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1608 | } | ||
1609 | |||
1610 | if (err < 0) { | ||
1611 | kfree(slots); | ||
1612 | kfree(td_res); | ||
1613 | } | ||
1614 | |||
1615 | return err; | ||
1616 | } | ||
1617 | |||
1618 | /* mc2_reservation_create - reservation_create system call backend | ||
1619 | */ | ||
1620 | static long mc2_reservation_create(int res_type, void* __user _config) | ||
1621 | { | ||
1622 | long ret = -EINVAL; | ||
1623 | struct reservation_config config; | ||
1624 | |||
1625 | TRACE("Attempt to create reservation (%d)\n", res_type); | ||
1626 | |||
1627 | if (copy_from_user(&config, _config, sizeof(config))) | ||
1628 | return -EFAULT; | ||
1629 | |||
1630 | if (config.cpu != -1) { | ||
1631 | if (config.cpu < 0 || !cpu_online(config.cpu)) { | ||
1632 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1633 | "CPU %d offline\n", config.id, config.cpu); | ||
1634 | return -EINVAL; | ||
1635 | } | ||
1636 | } | ||
1637 | |||
1638 | switch (res_type) { | ||
1639 | case PERIODIC_POLLING: | ||
1640 | case SPORADIC_POLLING: | ||
1641 | ret = create_polling_reservation(res_type, &config); | ||
1642 | break; | ||
1643 | |||
1644 | case TABLE_DRIVEN: | ||
1645 | ret = create_table_driven_reservation(&config); | ||
1646 | break; | ||
1647 | |||
1648 | default: | ||
1649 | return -EINVAL; | ||
1650 | }; | ||
1651 | |||
1652 | return ret; | ||
1653 | } | ||
1654 | |||
1655 | static struct domain_proc_info mc2_domain_proc_info; | ||
1656 | |||
1657 | static long mc2_get_domain_proc_info(struct domain_proc_info **ret) | ||
1658 | { | ||
1659 | *ret = &mc2_domain_proc_info; | ||
1660 | return 0; | ||
1661 | } | ||
1662 | |||
1663 | static void mc2_setup_domain_proc(void) | ||
1664 | { | ||
1665 | int i, cpu; | ||
1666 | int num_rt_cpus = num_online_cpus(); | ||
1667 | |||
1668 | struct cd_mapping *cpu_map, *domain_map; | ||
1669 | |||
1670 | memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0); | ||
1671 | init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus); | ||
1672 | mc2_domain_proc_info.num_cpus = num_rt_cpus; | ||
1673 | mc2_domain_proc_info.num_domains = num_rt_cpus; | ||
1674 | |||
1675 | i = 0; | ||
1676 | for_each_online_cpu(cpu) { | ||
1677 | cpu_map = &mc2_domain_proc_info.cpu_to_domains[i]; | ||
1678 | domain_map = &mc2_domain_proc_info.domain_to_cpus[i]; | ||
1679 | |||
1680 | cpu_map->id = cpu; | ||
1681 | domain_map->id = i; | ||
1682 | cpumask_set_cpu(i, cpu_map->mask); | ||
1683 | cpumask_set_cpu(cpu, domain_map->mask); | ||
1684 | ++i; | ||
1685 | } | ||
1686 | } | ||
1687 | |||
1688 | static long mc2_activate_plugin(void) | ||
1689 | { | ||
1690 | int cpu, lv; | ||
1691 | struct mc2_cpu_state *state; | ||
1692 | struct cpu_entry *ce; | ||
1693 | |||
1694 | gmp_init(&_global_env); | ||
1695 | raw_spin_lock_init(&_lowest_prio_cpu.lock); | ||
1696 | |||
1697 | for_each_online_cpu(cpu) { | ||
1698 | TRACE("Initializing CPU%d...\n", cpu); | ||
1699 | |||
1700 | resched_cpu[cpu] = 0; | ||
1701 | state = cpu_state_for(cpu); | ||
1702 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
1703 | |||
1704 | ce->cpu = cpu; | ||
1705 | ce->scheduled = NULL; | ||
1706 | ce->deadline = ULLONG_MAX; | ||
1707 | ce->lv = NUM_CRIT_LEVELS; | ||
1708 | ce->will_schedule = false; | ||
1709 | |||
1710 | raw_spin_lock_init(&state->lock); | ||
1711 | state->cpu = cpu; | ||
1712 | state->scheduled = NULL; | ||
1713 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | ||
1714 | struct crit_entry *cr_entry = &state->crit_entries[lv]; | ||
1715 | cr_entry->level = lv; | ||
1716 | cr_entry->running = NULL; | ||
1717 | } | ||
1718 | sup_init(&state->sup_env); | ||
1719 | |||
1720 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
1721 | state->timer.function = on_scheduling_timer; | ||
1722 | } | ||
1723 | |||
1724 | mc2_setup_domain_proc(); | ||
1725 | |||
1726 | return 0; | ||
1727 | } | ||
1728 | |||
1729 | static void mc2_finish_switch(struct task_struct *prev) | ||
1730 | { | ||
1731 | struct mc2_cpu_state *state = local_cpu_state(); | ||
1732 | |||
1733 | state->scheduled = is_realtime(current) ? current : NULL; | ||
1734 | } | ||
1735 | |||
1736 | static long mc2_deactivate_plugin(void) | ||
1737 | { | ||
1738 | int cpu; | ||
1739 | struct mc2_cpu_state *state; | ||
1740 | struct reservation *res; | ||
1741 | struct next_timer_event *event; | ||
1742 | struct cpu_entry *ce; | ||
1743 | |||
1744 | for_each_online_cpu(cpu) { | ||
1745 | state = cpu_state_for(cpu); | ||
1746 | raw_spin_lock(&state->lock); | ||
1747 | |||
1748 | hrtimer_cancel(&state->timer); | ||
1749 | |||
1750 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
1751 | |||
1752 | ce->cpu = cpu; | ||
1753 | ce->scheduled = NULL; | ||
1754 | ce->deadline = ULLONG_MAX; | ||
1755 | ce->lv = NUM_CRIT_LEVELS; | ||
1756 | ce->will_schedule = false; | ||
1757 | |||
1758 | /* Delete all reservations --- assumes struct reservation | ||
1759 | * is prefix of containing struct. */ | ||
1760 | |||
1761 | while (!list_empty(&state->sup_env.active_reservations)) { | ||
1762 | res = list_first_entry( | ||
1763 | &state->sup_env.active_reservations, | ||
1764 | struct reservation, list); | ||
1765 | list_del(&res->list); | ||
1766 | kfree(res); | ||
1767 | } | ||
1768 | |||
1769 | while (!list_empty(&state->sup_env.inactive_reservations)) { | ||
1770 | res = list_first_entry( | ||
1771 | &state->sup_env.inactive_reservations, | ||
1772 | struct reservation, list); | ||
1773 | list_del(&res->list); | ||
1774 | kfree(res); | ||
1775 | } | ||
1776 | |||
1777 | while (!list_empty(&state->sup_env.depleted_reservations)) { | ||
1778 | res = list_first_entry( | ||
1779 | &state->sup_env.depleted_reservations, | ||
1780 | struct reservation, list); | ||
1781 | list_del(&res->list); | ||
1782 | kfree(res); | ||
1783 | } | ||
1784 | |||
1785 | raw_spin_unlock(&state->lock); | ||
1786 | } | ||
1787 | |||
1788 | raw_spin_lock(&_global_env.lock); | ||
1789 | |||
1790 | while (!list_empty(&_global_env.active_reservations)) { | ||
1791 | res = list_first_entry( | ||
1792 | &_global_env.active_reservations, | ||
1793 | struct reservation, list); | ||
1794 | list_del(&res->list); | ||
1795 | kfree(res); | ||
1796 | } | ||
1797 | |||
1798 | while (!list_empty(&_global_env.inactive_reservations)) { | ||
1799 | res = list_first_entry( | ||
1800 | &_global_env.inactive_reservations, | ||
1801 | struct reservation, list); | ||
1802 | list_del(&res->list); | ||
1803 | kfree(res); | ||
1804 | } | ||
1805 | |||
1806 | while (!list_empty(&_global_env.depleted_reservations)) { | ||
1807 | res = list_first_entry( | ||
1808 | &_global_env.depleted_reservations, | ||
1809 | struct reservation, list); | ||
1810 | list_del(&res->list); | ||
1811 | kfree(res); | ||
1812 | } | ||
1813 | |||
1814 | while (!list_empty(&_global_env.next_events)) { | ||
1815 | event = list_first_entry( | ||
1816 | &_global_env.next_events, | ||
1817 | struct next_timer_event, list); | ||
1818 | list_del(&event->list); | ||
1819 | kfree(event); | ||
1820 | } | ||
1821 | |||
1822 | raw_spin_unlock(&_global_env.lock); | ||
1823 | |||
1824 | destroy_domain_proc_info(&mc2_domain_proc_info); | ||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | static struct sched_plugin mc2_plugin = { | ||
1829 | .plugin_name = "MC2", | ||
1830 | .schedule = mc2_schedule, | ||
1831 | .finish_switch = mc2_finish_switch, | ||
1832 | .task_wake_up = mc2_task_resume, | ||
1833 | .admit_task = mc2_admit_task, | ||
1834 | .task_new = mc2_task_new, | ||
1835 | .task_exit = mc2_task_exit, | ||
1836 | .complete_job = mc2_complete_job, | ||
1837 | .get_domain_proc_info = mc2_get_domain_proc_info, | ||
1838 | .activate_plugin = mc2_activate_plugin, | ||
1839 | .deactivate_plugin = mc2_deactivate_plugin, | ||
1840 | .reservation_create = mc2_reservation_create, | ||
1841 | .reservation_destroy = mc2_reservation_destroy, | ||
1842 | }; | ||
1843 | |||
1844 | static int __init init_mc2(void) | ||
1845 | { | ||
1846 | return register_sched_plugin(&mc2_plugin); | ||
1847 | } | ||
1848 | |||
1849 | module_init(init_mc2); | ||