diff options
-rw-r--r-- | litmus/sched_mc2.c | 2313 |
1 files changed, 2312 insertions, 1 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 78981922613b..33dfc9894976 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -1 +1,2312 @@ | |||
1 | a | 1 | /* |
2 | * litmus/sched_mc2.c | ||
3 | * | ||
4 | * Implementation of the Mixed-Criticality on MultiCore scheduler | ||
5 | * | ||
6 | * Thus plugin implements a scheduling algorithm proposed in | ||
7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. | ||
8 | */ | ||
9 | |||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/rwlock.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | #include <litmus/sched_plugin.h> | ||
16 | #include <litmus/preempt.h> | ||
17 | #include <litmus/debug_trace.h> | ||
18 | |||
19 | #include <litmus/litmus.h> | ||
20 | #include <litmus/jobs.h> | ||
21 | #include <litmus/budget.h> | ||
22 | #include <litmus/litmus_proc.h> | ||
23 | #include <litmus/sched_trace.h> | ||
24 | #include <litmus/cache_proc.h> | ||
25 | #include <litmus/trace.h> | ||
26 | |||
27 | #include <litmus/mc2_common.h> | ||
28 | #include <litmus/reservation.h> | ||
29 | #include <litmus/polling_reservations.h> | ||
30 | |||
31 | #ifdef CONFIG_PGMRT_SUPPORT | ||
32 | #include <litmus/pgm.h> | ||
33 | #endif | ||
34 | |||
35 | //#define TRACE(fmt, args...) do {} while (false) | ||
36 | //#define TRACE_TASK(fmt, args...) do {} while (false) | ||
37 | |||
38 | #define BUDGET_ENFORCEMENT_AT_C 0 | ||
39 | |||
40 | extern void do_partition(enum crit_level lv, int cpu); | ||
41 | |||
42 | /* _global_env - reservation container for level-C tasks*/ | ||
43 | struct gmp_reservation_environment _global_env_modes[NR_MODES]; | ||
44 | struct gmp_reservation_environment *_global_env; | ||
45 | raw_spinlock_t global_lock; | ||
46 | |||
47 | /* cpu_entry - keep track of a running task on a cpu | ||
48 | * This state is used to decide the lowest priority cpu | ||
49 | */ | ||
50 | struct cpu_entry { | ||
51 | struct task_struct *scheduled; | ||
52 | lt_t deadline; | ||
53 | int cpu; | ||
54 | enum crit_level lv; | ||
55 | /* if will_schedule is true, this cpu is already selected and | ||
56 | call mc2_schedule() soon. */ | ||
57 | bool will_schedule; | ||
58 | }; | ||
59 | |||
60 | /* cpu_priority - a global state for choosing the lowest priority CPU */ | ||
61 | struct cpu_priority { | ||
62 | raw_spinlock_t lock; | ||
63 | struct cpu_entry cpu_entries[NR_CPUS]; | ||
64 | }; | ||
65 | |||
66 | struct cpu_priority _lowest_prio_cpu; | ||
67 | |||
68 | /* mc2_task_state - a task state structure */ | ||
69 | struct mc2_task_state { | ||
70 | struct task_client res_info[NR_MODES]; | ||
71 | /* if cpu == -1, this task is a global task (level C) */ | ||
72 | int cpu; | ||
73 | bool has_departed; | ||
74 | struct mc2_task mc2_param; | ||
75 | }; | ||
76 | |||
77 | /* mc2_cpu_state - maintain the scheduled state and ghost jobs | ||
78 | * timer : timer for partitioned tasks (level A and B) | ||
79 | * g_timer : timer for global tasks (level C) | ||
80 | */ | ||
81 | struct mc2_cpu_state { | ||
82 | raw_spinlock_t lock; | ||
83 | |||
84 | struct sup_reservation_environment sup_env_modes[NR_MODES]; | ||
85 | struct sup_reservation_environment *sup_env; | ||
86 | struct hrtimer timer; | ||
87 | |||
88 | int cpu; | ||
89 | struct task_struct* scheduled; | ||
90 | //struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | ||
91 | bool spin_flag; //not used on cpu 0 | ||
92 | }; | ||
93 | |||
94 | |||
95 | static int resched_cpu[NR_CPUS]; | ||
96 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); | ||
97 | //level_a_priorities unused | ||
98 | //static int level_a_priorities[NR_CPUS]; | ||
99 | |||
100 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) | ||
101 | #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) | ||
102 | |||
103 | |||
104 | unsigned int mode; //currently executing mode, from 0 to NR_MODES-1 | ||
105 | unsigned int requested_mode; //The pending mode | ||
106 | /* Prevent multiple requests from entering and prevent request from entering while old | ||
107 | * is being enacted */ | ||
108 | raw_spinlock_t mode_lock; | ||
109 | |||
110 | unsigned int mode_sizes[NR_MODES]; | ||
111 | unsigned int res_reported; | ||
112 | bool cpu_0_spin_flag; | ||
113 | bool seen_once; | ||
114 | bool cpu_0_task_exist; | ||
115 | bool mode_changed; | ||
116 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) | ||
117 | #define pending mode != requested_mode | ||
118 | #define ready !res_reported | ||
119 | |||
120 | /* | ||
121 | * To be called from level A task's with period equal to | ||
122 | * A and B hyperperiod | ||
123 | */ | ||
124 | |||
125 | asmlinkage long sys_enact_mode(void) | ||
126 | { | ||
127 | struct mc2_cpu_state *state = local_cpu_state(); | ||
128 | struct reservation *res; | ||
129 | struct list_head *pos; | ||
130 | //lt_t now = litmus_clock(); | ||
131 | TRACE_TASK(current, "ENACTING MODE TASK\n"); | ||
132 | if (state->cpu == 0){ | ||
133 | preempt_disable(); | ||
134 | raw_spin_lock(&global_lock); | ||
135 | raw_spin_lock(&mode_lock); | ||
136 | mode_changed = false; | ||
137 | if (pending){ //MCR has entered | ||
138 | if (!seen_once){ | ||
139 | TRACE_TASK(current, "NOTICED MCR\n"); | ||
140 | sched_trace_request_mode(current); | ||
141 | //clean up jobs that are already done | ||
142 | //after this jobs report themselves | ||
143 | list_for_each(pos, &_global_env->active_reservations){ | ||
144 | res = list_entry(pos, struct reservation, list); | ||
145 | |||
146 | if (tsk_rt(res->tsk)->completed){ | ||
147 | res->reported = 1; | ||
148 | res_reported--; | ||
149 | } | ||
150 | } | ||
151 | list_for_each(pos, &_global_env->depleted_reservations){ | ||
152 | res = list_entry(pos, struct reservation, list); | ||
153 | if (tsk_rt(res->tsk)->completed){ | ||
154 | res->reported = 1; | ||
155 | res_reported--; | ||
156 | } | ||
157 | |||
158 | } | ||
159 | list_for_each(pos, &_global_env->inactive_reservations){ | ||
160 | res = list_entry(pos, struct reservation, list); | ||
161 | if (tsk_rt(res->tsk)->completed){ | ||
162 | res->reported = 1; | ||
163 | res_reported--; | ||
164 | } | ||
165 | } | ||
166 | seen_once = true; | ||
167 | } | ||
168 | if( ready ){ //C is throttled | ||
169 | lt_t new_mode_basetime = get_release(current); | ||
170 | |||
171 | TRACE("Timer canceled\n"); | ||
172 | hrtimer_cancel(&state->timer);//stop listening to old mode timers | ||
173 | mode = requested_mode; | ||
174 | TRACE("Mode has been changed.\n"); | ||
175 | mode_changed = true; | ||
176 | _global_env = &_global_env_modes[mode]; | ||
177 | //set res->reported for new global tasks | ||
178 | list_for_each(pos, &_global_env->active_reservations){ | ||
179 | res = list_entry(pos, struct reservation, list); | ||
180 | release_at(res->tsk, new_mode_basetime); | ||
181 | res->reported = 0; | ||
182 | } | ||
183 | list_for_each(pos, &_global_env->depleted_reservations){ | ||
184 | res = list_entry(pos, struct reservation, list); | ||
185 | release_at(res->tsk, new_mode_basetime); | ||
186 | res->reported = 0; | ||
187 | } | ||
188 | list_for_each(pos, &_global_env->inactive_reservations){ | ||
189 | res = list_entry(pos, struct reservation, list); | ||
190 | release_at(res->tsk, new_mode_basetime); | ||
191 | res->reported = 0; | ||
192 | } | ||
193 | //gmp_update_time(_global_env, now); | ||
194 | raw_spin_lock(&state->lock); | ||
195 | |||
196 | state->sup_env = &state->sup_env_modes[mode]; | ||
197 | list_for_each(pos, &state->sup_env->active_reservations){ | ||
198 | res = list_entry(pos, struct reservation, list); | ||
199 | release_at(res->tsk, new_mode_basetime); | ||
200 | } | ||
201 | list_for_each(pos, &state->sup_env->depleted_reservations){ | ||
202 | res = list_entry(pos, struct reservation, list); | ||
203 | release_at(res->tsk, new_mode_basetime); | ||
204 | } | ||
205 | list_for_each(pos, &state->sup_env->inactive_reservations){ | ||
206 | res = list_entry(pos, struct reservation, list); | ||
207 | release_at(res->tsk, new_mode_basetime); | ||
208 | } | ||
209 | raw_spin_unlock(&state->lock); | ||
210 | |||
211 | sched_trace_enact_mode(current); | ||
212 | } | ||
213 | |||
214 | |||
215 | } | ||
216 | raw_spin_unlock(&mode_lock); | ||
217 | raw_spin_unlock(&global_lock); | ||
218 | preempt_enable(); | ||
219 | //release other CPUs | ||
220 | |||
221 | cpu_0_spin_flag = !cpu_0_spin_flag; | ||
222 | } | ||
223 | else if (cpu_0_task_exist) { | ||
224 | //spin, wait for CPU 0 to stabilize mode decision | ||
225 | //before scheduling next hyperperiod | ||
226 | TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); | ||
227 | if (state->spin_flag) | ||
228 | while(cpu_0_spin_flag); | ||
229 | else | ||
230 | while(!cpu_0_spin_flag); | ||
231 | TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); | ||
232 | if (mode_changed) { | ||
233 | lt_t new_mode_basetime = get_release(current); | ||
234 | TRACE("CPU%d mode changed\n",state->cpu); | ||
235 | hrtimer_cancel(&state->timer); //stop listening to old mode timers | ||
236 | //preempt_disable(); | ||
237 | raw_spin_lock(&state->lock); | ||
238 | state->sup_env = &state->sup_env_modes[mode]; | ||
239 | list_for_each(pos, &state->sup_env->active_reservations){ | ||
240 | res = list_entry(pos, struct reservation, list); | ||
241 | release_at(res->tsk, new_mode_basetime); | ||
242 | } | ||
243 | list_for_each(pos, &state->sup_env->depleted_reservations){ | ||
244 | res = list_entry(pos, struct reservation, list); | ||
245 | release_at(res->tsk, new_mode_basetime); | ||
246 | } | ||
247 | list_for_each(pos, &state->sup_env->inactive_reservations){ | ||
248 | res = list_entry(pos, struct reservation, list); | ||
249 | release_at(res->tsk, new_mode_basetime); | ||
250 | } | ||
251 | raw_spin_unlock(&state->lock); | ||
252 | //preempt_enable(); | ||
253 | } | ||
254 | state->spin_flag = !state->spin_flag; | ||
255 | } | ||
256 | else | ||
257 | return 0; | ||
258 | //if mode didn't change this has no effect on what's being scheduled | ||
259 | state->sup_env = &state->sup_env_modes[mode]; | ||
260 | //sup_update_time(state->sup_env, now); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | |||
265 | /* | ||
266 | * Called from non-real time program | ||
267 | * Protect by exclusive lock to prevent from occuring while mode change is enacted | ||
268 | */ | ||
269 | asmlinkage long sys_request_mode(int new_mode){ | ||
270 | raw_spin_lock(&mode_lock); | ||
271 | if (pending){ | ||
272 | raw_spin_unlock(&mode_lock); | ||
273 | return -EAGAIN; | ||
274 | } | ||
275 | if (mode == new_mode){ | ||
276 | raw_spin_unlock(&mode_lock); | ||
277 | return 0; | ||
278 | } | ||
279 | requested_mode = new_mode; | ||
280 | TRACE("MCR received\n"); | ||
281 | res_reported = mode_sizes[mode]; | ||
282 | seen_once = false; | ||
283 | raw_spin_unlock(&mode_lock); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | |||
288 | /* get_mc2_state - get the task's state */ | ||
289 | static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) | ||
290 | { | ||
291 | struct mc2_task_state* tinfo; | ||
292 | |||
293 | tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state; | ||
294 | |||
295 | if (tinfo) | ||
296 | return tinfo; | ||
297 | else | ||
298 | return NULL; | ||
299 | } | ||
300 | |||
301 | /* get_task_crit_level - return the criticaility level of a task */ | ||
302 | static enum crit_level get_task_crit_level(struct task_struct *tsk) | ||
303 | { | ||
304 | struct mc2_task *mp; | ||
305 | |||
306 | if (!tsk || !is_realtime(tsk)) | ||
307 | return NUM_CRIT_LEVELS; | ||
308 | |||
309 | mp = tsk_rt(tsk)->mc2_data; | ||
310 | |||
311 | if (!mp) | ||
312 | return NUM_CRIT_LEVELS; | ||
313 | else | ||
314 | return mp->crit; | ||
315 | } | ||
316 | |||
317 | static int is_init_finished(struct task_struct *tsk) | ||
318 | { | ||
319 | struct mc2_task *mp; | ||
320 | |||
321 | if (!tsk || !is_realtime(tsk)) | ||
322 | return 0; | ||
323 | |||
324 | mp = tsk_rt(tsk)->mc2_data; | ||
325 | |||
326 | if (!mp) | ||
327 | return 0; | ||
328 | else | ||
329 | return mp->init_finished; | ||
330 | } | ||
331 | |||
332 | /* task_depart - remove a task from its reservation | ||
333 | * If the job has remaining budget, convert it to a ghost job | ||
334 | * and update crit_entries[] | ||
335 | * | ||
336 | * @job_complete indicate whether job completes or not | ||
337 | */ | ||
338 | static void task_departs(struct task_struct *tsk, int job_complete) | ||
339 | { | ||
340 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
341 | //struct mc2_cpu_state* state = local_cpu_state(); | ||
342 | struct reservation* res = NULL; | ||
343 | struct reservation_client *client = NULL; | ||
344 | int i; | ||
345 | BUG_ON(!is_realtime(tsk)); | ||
346 | |||
347 | for(i = 0; i < NR_MODES; i++){ | ||
348 | if (! in_mode(tsk, i) && i != 0) | ||
349 | continue; | ||
350 | res = tinfo->res_info[i].client.reservation; | ||
351 | client = &tinfo->res_info[i].client; | ||
352 | BUG_ON(!res); | ||
353 | BUG_ON(!client); | ||
354 | |||
355 | if (job_complete) | ||
356 | res->cur_budget = 0; | ||
357 | |||
358 | res->ops->client_departs(res, client, job_complete); | ||
359 | } | ||
360 | |||
361 | /* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ | ||
362 | /* | ||
363 | if (job_complete) { | ||
364 | //res->cur_budget = 0; | ||
365 | } | ||
366 | */ | ||
367 | /* fix end */ | ||
368 | |||
369 | tinfo->has_departed = true; | ||
370 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); | ||
371 | } | ||
372 | |||
373 | /* task_arrive - put a task into its reservation | ||
374 | * If the job was a ghost job, remove it from crit_entries[] | ||
375 | */ | ||
376 | static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | ||
377 | { | ||
378 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
379 | struct reservation* res; | ||
380 | struct reservation_client *client; | ||
381 | enum crit_level lv = get_task_crit_level(tsk); | ||
382 | int i; | ||
383 | |||
384 | switch(lv) { | ||
385 | case CRIT_LEVEL_A: | ||
386 | case CRIT_LEVEL_B: | ||
387 | TS_RELEASE_START; | ||
388 | break; | ||
389 | case CRIT_LEVEL_C: | ||
390 | TS_RELEASE_C_START; | ||
391 | break; | ||
392 | default: | ||
393 | break; | ||
394 | } | ||
395 | |||
396 | tinfo->has_departed = false; | ||
397 | |||
398 | TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); | ||
399 | |||
400 | for(i = 0; i < NR_MODES; i++){ | ||
401 | if (! in_mode(tsk, i) && i != 0) | ||
402 | continue; | ||
403 | res = tinfo->res_info[i].client.reservation; | ||
404 | client = &tinfo->res_info[i].client; | ||
405 | |||
406 | res->ops->client_arrives(res, client); | ||
407 | } | ||
408 | |||
409 | switch(lv) { | ||
410 | case CRIT_LEVEL_A: | ||
411 | case CRIT_LEVEL_B: | ||
412 | TS_RELEASE_END; | ||
413 | break; | ||
414 | case CRIT_LEVEL_C: | ||
415 | TS_RELEASE_C_END; | ||
416 | break; | ||
417 | default: | ||
418 | break; | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /* get_lowest_prio_cpu - return the lowest priority cpu | ||
423 | * This will be used for scheduling level-C tasks. | ||
424 | * If all CPUs are running tasks which has | ||
425 | * higher priority than level C, return NO_CPU. | ||
426 | */ | ||
427 | static int get_lowest_prio_cpu(lt_t priority) | ||
428 | { | ||
429 | struct cpu_entry *ce; | ||
430 | int cpu, ret = NO_CPU; | ||
431 | lt_t latest_deadline = 0; | ||
432 | |||
433 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | ||
434 | if (!ce->will_schedule && !ce->scheduled) { | ||
435 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | ||
436 | return ce->cpu; | ||
437 | } else { | ||
438 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); | ||
439 | } | ||
440 | |||
441 | for_each_online_cpu(cpu) { | ||
442 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
443 | /* If a CPU will call schedule() in the near future, we don't | ||
444 | return that CPU. */ | ||
445 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | ||
446 | ce->scheduled ? (ce->scheduled)->comm : "null", | ||
447 | ce->scheduled ? (ce->scheduled)->pid : 0, | ||
448 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | ||
449 | if (!ce->will_schedule) { | ||
450 | if (!ce->scheduled) { | ||
451 | /* Idle cpu, return this. */ | ||
452 | TRACE("CPU %d is the lowest!\n", ce->cpu); | ||
453 | return ce->cpu; | ||
454 | } else if (ce->lv == CRIT_LEVEL_C && | ||
455 | ce->deadline > latest_deadline) { | ||
456 | latest_deadline = ce->deadline; | ||
457 | ret = ce->cpu; | ||
458 | } | ||
459 | } | ||
460 | } | ||
461 | |||
462 | if (priority >= latest_deadline) | ||
463 | ret = NO_CPU; | ||
464 | |||
465 | TRACE("CPU %d is the lowest!\n", ret); | ||
466 | |||
467 | return ret; | ||
468 | } | ||
469 | |||
470 | /* NOTE: drops state->lock */ | ||
471 | /* mc2_update_timer_and_unlock - set a timer and g_timer and unlock | ||
472 | * Whenever res_env.current_time is updated, | ||
473 | * we check next_scheduler_update and set | ||
474 | * a timer. | ||
475 | * If there exist a global event which is | ||
476 | * not armed on any CPU and g_timer is not | ||
477 | * active, set a g_timer for that event. | ||
478 | */ | ||
479 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | ||
480 | { | ||
481 | int local, cpus; | ||
482 | lt_t update, now; | ||
483 | //enum crit_level lv = get_task_crit_level(state->scheduled); | ||
484 | struct next_timer_event *event, *next; | ||
485 | int reschedule[NR_CPUS]; | ||
486 | |||
487 | for (cpus = 0; cpus<NR_CPUS; cpus++) | ||
488 | reschedule[cpus] = 0; | ||
489 | |||
490 | update = state->sup_env->next_scheduler_update; | ||
491 | now = state->sup_env->env.current_time; | ||
492 | |||
493 | /* Be sure we're actually running on the right core, | ||
494 | * as pres_update_timer() is also called from pres_task_resume(), | ||
495 | * which might be called on any CPU when a thread resumes. | ||
496 | */ | ||
497 | local = local_cpu_state() == state; | ||
498 | |||
499 | raw_spin_lock(&global_lock); | ||
500 | |||
501 | list_for_each_entry_safe(event, next, &_global_env->next_events, list) { | ||
502 | /* If the event time is already passed, we call schedule() on | ||
503 | the lowest priority cpu */ | ||
504 | if (event->next_update >= update) { | ||
505 | break; | ||
506 | } | ||
507 | |||
508 | if (event->next_update < litmus_clock()) { | ||
509 | if (event->timer_armed_on == NO_CPU) { | ||
510 | struct reservation *res = gmp_find_by_id(_global_env, event->id); | ||
511 | int cpu = get_lowest_prio_cpu(res?res->priority:0); | ||
512 | TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); | ||
513 | list_del(&event->list); | ||
514 | kfree(event); | ||
515 | if (cpu != NO_CPU) { | ||
516 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
517 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
518 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
519 | if (cpu == local_cpu_state()->cpu) | ||
520 | litmus_reschedule_local(); | ||
521 | else | ||
522 | reschedule[cpu] = 1; | ||
523 | } | ||
524 | } | ||
525 | } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { | ||
526 | event->timer_armed_on = state->cpu; | ||
527 | update = event->next_update; | ||
528 | break; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | /* Must drop state lock before calling into hrtimer_start(), which | ||
533 | * may raise a softirq, which in turn may wake ksoftirqd. */ | ||
534 | raw_spin_unlock(&global_lock); | ||
535 | raw_spin_unlock(&state->lock); | ||
536 | |||
537 | if (update <= now || reschedule[state->cpu]) { | ||
538 | reschedule[state->cpu] = 0; | ||
539 | litmus_reschedule(state->cpu); | ||
540 | /* | ||
541 | raw_spin_lock(&state->lock); | ||
542 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
543 | raw_spin_unlock(&state->lock); | ||
544 | */ | ||
545 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | ||
546 | /* Reprogram only if not already set correctly. */ | ||
547 | if (!hrtimer_active(&state->timer) || | ||
548 | ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { | ||
549 | TRACE("canceling timer...at %llu\n", | ||
550 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
551 | hrtimer_cancel(&state->timer); | ||
552 | TRACE("setting scheduler timer for %llu\n", update); | ||
553 | /* We cannot use hrtimer_start() here because the | ||
554 | * wakeup flag must be set to zero. */ | ||
555 | __hrtimer_start_range_ns(&state->timer, | ||
556 | ns_to_ktime(update), | ||
557 | 0 /* timer coalescing slack */, | ||
558 | HRTIMER_MODE_ABS_PINNED, | ||
559 | 0 /* wakeup */); | ||
560 | if (update < litmus_clock()) { | ||
561 | /* uh oh, timer expired while trying to set it */ | ||
562 | TRACE("timer expired during setting " | ||
563 | "update:%llu now:%llu actual:%llu\n", | ||
564 | update, now, litmus_clock()); | ||
565 | /* The timer HW may not have been reprogrammed | ||
566 | * correctly; force rescheduling now. */ | ||
567 | litmus_reschedule(state->cpu); | ||
568 | } | ||
569 | } | ||
570 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { | ||
571 | /* Poke remote core only if timer needs to be set earlier than | ||
572 | * it is currently set. | ||
573 | */ | ||
574 | TRACE("mc2_update_timer for remote CPU %d (update=%llu, " | ||
575 | "active:%d, set:%llu)\n", | ||
576 | state->cpu, | ||
577 | update, | ||
578 | hrtimer_active(&state->timer), | ||
579 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
580 | if (!hrtimer_active(&state->timer) || | ||
581 | ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { | ||
582 | TRACE("poking CPU %d so that it can update its " | ||
583 | "scheduling timer (active:%d, set:%llu)\n", | ||
584 | state->cpu, | ||
585 | hrtimer_active(&state->timer), | ||
586 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
587 | //litmus_reschedule(state->cpu); | ||
588 | /* | ||
589 | raw_spin_lock(&state->lock); | ||
590 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
591 | raw_spin_unlock(&state->lock); | ||
592 | reschedule[state->cpu] = 0; | ||
593 | */ | ||
594 | } | ||
595 | } | ||
596 | /* | ||
597 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
598 | if (reschedule[cpus]) { | ||
599 | litmus_reschedule(cpus); | ||
600 | } | ||
601 | } | ||
602 | */ | ||
603 | } | ||
604 | |||
605 | /* update_cpu_prio - Update cpu's priority | ||
606 | * When a cpu picks a new task, call this function | ||
607 | * to update cpu priorities. | ||
608 | */ | ||
609 | static void update_cpu_prio(struct mc2_cpu_state *state) | ||
610 | { | ||
611 | struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu]; | ||
612 | enum crit_level lv = get_task_crit_level(state->scheduled); | ||
613 | |||
614 | if (!state->scheduled) { | ||
615 | /* cpu is idle. */ | ||
616 | ce->scheduled = NULL; | ||
617 | ce->deadline = ULLONG_MAX; | ||
618 | ce->lv = NUM_CRIT_LEVELS; | ||
619 | } else if (lv == CRIT_LEVEL_C) { | ||
620 | ce->scheduled = state->scheduled; | ||
621 | ce->deadline = get_deadline(state->scheduled); | ||
622 | ce->lv = lv; | ||
623 | } else if (lv < CRIT_LEVEL_C) { | ||
624 | /* If cpu is running level A or B tasks, it is not eligible | ||
625 | to run level-C tasks */ | ||
626 | ce->scheduled = state->scheduled; | ||
627 | ce->deadline = 0; | ||
628 | ce->lv = lv; | ||
629 | } | ||
630 | }; | ||
631 | |||
632 | /* on_scheduling_timer - timer event for partitioned tasks | ||
633 | */ | ||
634 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | ||
635 | { | ||
636 | unsigned long flags; | ||
637 | enum hrtimer_restart restart = HRTIMER_NORESTART; | ||
638 | struct mc2_cpu_state *state; | ||
639 | lt_t update, now; | ||
640 | int global_schedule_now; | ||
641 | //lt_t remain_budget; // no ghost jobs | ||
642 | int reschedule[NR_CPUS]; | ||
643 | int cpus; | ||
644 | |||
645 | for (cpus = 0; cpus<NR_CPUS; cpus++) | ||
646 | reschedule[cpus] = 0; | ||
647 | |||
648 | state = container_of(timer, struct mc2_cpu_state, timer); | ||
649 | |||
650 | /* The scheduling timer should only fire on the local CPU, because | ||
651 | * otherwise deadlocks via timer_cancel() are possible. | ||
652 | * Note: this does not interfere with dedicated interrupt handling, as | ||
653 | * even under dedicated interrupt handling scheduling timers for | ||
654 | * budget enforcement must occur locally on each CPU. | ||
655 | */ | ||
656 | BUG_ON(state->cpu != raw_smp_processor_id()); | ||
657 | |||
658 | TS_ISR_START; | ||
659 | |||
660 | TRACE("Timer fired at %llu\n", litmus_clock()); | ||
661 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
662 | raw_spin_lock_irqsave(&state->lock, flags); | ||
663 | now = litmus_clock(); | ||
664 | sup_update_time(state->sup_env, now); | ||
665 | |||
666 | /* 9/20/2015 fix - no ghost job | ||
667 | remain_budget = mc2_update_ghost_state(state); | ||
668 | */ | ||
669 | update = state->sup_env->next_scheduler_update; | ||
670 | now = state->sup_env->env.current_time; | ||
671 | |||
672 | |||
673 | if (update <= now) { | ||
674 | litmus_reschedule_local(); | ||
675 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { | ||
676 | hrtimer_set_expires(timer, ns_to_ktime(update)); | ||
677 | restart = HRTIMER_RESTART; | ||
678 | } | ||
679 | |||
680 | raw_spin_lock(&global_lock); | ||
681 | global_schedule_now = gmp_update_time(_global_env, now); | ||
682 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); | ||
683 | |||
684 | /* Find the lowest cpu, and call reschedule */ | ||
685 | while (global_schedule_now--) { | ||
686 | int cpu = get_lowest_prio_cpu(0); | ||
687 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
688 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
689 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
690 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
691 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
692 | if (cpu == state->cpu && update > now) | ||
693 | litmus_reschedule_local(); | ||
694 | else | ||
695 | reschedule[cpu] = 1; | ||
696 | } | ||
697 | } | ||
698 | raw_spin_unlock(&global_lock); | ||
699 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
700 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
701 | |||
702 | TS_ISR_END; | ||
703 | |||
704 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
705 | if (reschedule[cpus]) { | ||
706 | litmus_reschedule(cpus); | ||
707 | /* | ||
708 | struct mc2_cpu_state *remote_state; | ||
709 | |||
710 | remote_state = cpu_state_for(cpus); | ||
711 | raw_spin_lock(&remote_state->lock); | ||
712 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | ||
713 | raw_spin_unlock(&remote_state->lock); | ||
714 | */ | ||
715 | } | ||
716 | } | ||
717 | |||
718 | |||
719 | return restart; | ||
720 | } | ||
721 | |||
722 | /* mc2_complete_job - syscall backend for job completions | ||
723 | */ | ||
724 | static long mc2_complete_job(void) | ||
725 | { | ||
726 | ktime_t next_release; | ||
727 | long err; | ||
728 | |||
729 | enum crit_level lv; | ||
730 | |||
731 | raw_spin_lock(&mode_lock); | ||
732 | tsk_rt(current)->completed = 1; | ||
733 | raw_spin_unlock(&mode_lock); | ||
734 | |||
735 | lv = get_task_crit_level(current); | ||
736 | |||
737 | /* If this the first job instance, we need to reset replenish | ||
738 | time to the next release time */ | ||
739 | if (tsk_rt(current)->sporadic_release) { | ||
740 | struct mc2_cpu_state *state; | ||
741 | struct reservation_environment *env; | ||
742 | struct mc2_task_state *tinfo; | ||
743 | struct reservation *res = NULL; | ||
744 | unsigned long flags; | ||
745 | |||
746 | preempt_disable(); | ||
747 | local_irq_save(flags); | ||
748 | |||
749 | tinfo = get_mc2_state(current); | ||
750 | |||
751 | if (lv < CRIT_LEVEL_C) { | ||
752 | state = cpu_state_for(tinfo->cpu); | ||
753 | raw_spin_lock(&state->lock); | ||
754 | env = &(state->sup_env->env); | ||
755 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); | ||
756 | env->time_zero = tsk_rt(current)->sporadic_release_time; | ||
757 | } | ||
758 | else if (lv == CRIT_LEVEL_C) { | ||
759 | state = local_cpu_state(); | ||
760 | raw_spin_lock(&state->lock); | ||
761 | raw_spin_lock(&global_lock); | ||
762 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); | ||
763 | _global_env->env.time_zero = tsk_rt(current)->sporadic_release_time; | ||
764 | } | ||
765 | else | ||
766 | BUG(); | ||
767 | |||
768 | /* set next_replenishtime to synchronous release time */ | ||
769 | BUG_ON(!res); | ||
770 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | ||
771 | /* | ||
772 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | ||
773 | struct table_driven_reservation *tdres; | ||
774 | tdres = container_of(res, struct table_driven_reservation, res); | ||
775 | tdres->next_interval = 0; | ||
776 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | ||
777 | res->next_replenishment += tdres->intervals[0].start; | ||
778 | } | ||
779 | */ | ||
780 | res->cur_budget = 0; | ||
781 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
782 | |||
783 | TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); | ||
784 | |||
785 | //if (lv < CRIT_LEVEL_C) | ||
786 | // raw_spin_unlock(&state->lock); | ||
787 | //else | ||
788 | if (lv == CRIT_LEVEL_C) | ||
789 | raw_spin_unlock(&global_lock); | ||
790 | |||
791 | raw_spin_unlock(&state->lock); | ||
792 | local_irq_restore(flags); | ||
793 | preempt_enable(); | ||
794 | } | ||
795 | |||
796 | sched_trace_task_completion(current, 0); | ||
797 | /* update the next release time and deadline */ | ||
798 | prepare_for_next_period(current); | ||
799 | sched_trace_task_release(current); | ||
800 | next_release = ns_to_ktime(get_release(current)); | ||
801 | preempt_disable(); | ||
802 | TRACE_CUR("next_release=%llu\n", get_release(current)); | ||
803 | |||
804 | /* | ||
805 | * Changed logic for mode switch case | ||
806 | * In case of mode switch, do not want to release | ||
807 | * new job even if release time has passed | ||
808 | */ | ||
809 | |||
810 | raw_spin_lock(&mode_lock); | ||
811 | if (lv == CRIT_LEVEL_C && pending){ | ||
812 | struct reservation *res = NULL; | ||
813 | res = gmp_find_by_id(_global_env, tsk_mc2_data(current)->res_id); | ||
814 | if (res && !res->reported){ | ||
815 | res_reported--; | ||
816 | |||
817 | res->reported = 1; | ||
818 | //Current task doesn't exist in new mode | ||
819 | if ( !in_mode(current, requested_mode) ){ | ||
820 | raw_spin_unlock(&mode_lock); | ||
821 | litmus_reschedule_local(); | ||
822 | } | ||
823 | //Otherwise schedule normally | ||
824 | else | ||
825 | raw_spin_unlock(&mode_lock); | ||
826 | } | ||
827 | else | ||
828 | raw_spin_unlock(&mode_lock); | ||
829 | |||
830 | } | ||
831 | else | ||
832 | raw_spin_unlock(&mode_lock); | ||
833 | |||
834 | if (get_release(current) > litmus_clock()) { | ||
835 | /* sleep until next_release */ | ||
836 | set_current_state(TASK_INTERRUPTIBLE); | ||
837 | preempt_enable_no_resched(); | ||
838 | err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); | ||
839 | } else { | ||
840 | /* release the next job immediately */ | ||
841 | err = 0; | ||
842 | TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock()); | ||
843 | preempt_enable(); | ||
844 | } | ||
845 | |||
846 | TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); | ||
847 | |||
848 | raw_spin_lock(&mode_lock); | ||
849 | tsk_rt(current)->completed = 0; | ||
850 | raw_spin_unlock(&mode_lock); | ||
851 | return err; | ||
852 | } | ||
853 | |||
854 | /* mc2_dispatch - Select the next task to schedule. | ||
855 | */ | ||
856 | struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state) | ||
857 | { | ||
858 | struct reservation *res, *next; | ||
859 | struct task_struct *tsk = NULL; | ||
860 | //struct crit_entry *ce; | ||
861 | enum crit_level lv; | ||
862 | lt_t time_slice; | ||
863 | |||
864 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
865 | if (res->state == RESERVATION_ACTIVE) { | ||
866 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
867 | if (likely(tsk)) { | ||
868 | lv = get_task_crit_level(tsk); | ||
869 | if (lv == NUM_CRIT_LEVELS) { | ||
870 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
871 | return tsk; | ||
872 | } else { | ||
873 | TRACE_TASK(tsk, "@@@@@DISPATCH@@@@@@@ init_finished? %s\n", is_init_finished(tsk)?"true":"false"); | ||
874 | if (!is_init_finished(tsk)) { | ||
875 | //ce = &state->crit_entries[lv]; | ||
876 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
877 | res->blocked_by_ghost = 0; | ||
878 | res->is_ghost = NO_CPU; | ||
879 | return tsk; | ||
880 | } else if (res->mode == mode) { | ||
881 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
882 | res->blocked_by_ghost = 0; | ||
883 | res->is_ghost = NO_CPU; | ||
884 | return tsk; | ||
885 | } | ||
886 | } | ||
887 | } | ||
888 | } | ||
889 | } | ||
890 | |||
891 | return NULL; | ||
892 | } | ||
893 | |||
894 | struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | ||
895 | { | ||
896 | struct reservation *res, *next; | ||
897 | struct task_struct *tsk = NULL; | ||
898 | //struct crit_entry *ce; | ||
899 | enum crit_level lv; | ||
900 | lt_t time_slice; | ||
901 | |||
902 | raw_spin_lock(&mode_lock); | ||
903 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | ||
904 | BUG_ON(!res); | ||
905 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | ||
906 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
907 | if (pending && res->reported && !in_mode(tsk, requested_mode)){ | ||
908 | continue; | ||
909 | } | ||
910 | if (likely(tsk)) { | ||
911 | lv = get_task_crit_level(tsk); | ||
912 | if (lv == NUM_CRIT_LEVELS) { | ||
913 | #if BUDGET_ENFORCEMENT_AT_C | ||
914 | gmp_add_event_after(_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
915 | #endif | ||
916 | res->event_added = 1; | ||
917 | res->blocked_by_ghost = 0; | ||
918 | res->is_ghost = NO_CPU; | ||
919 | res->scheduled_on = state->cpu; | ||
920 | raw_spin_unlock(&mode_lock); | ||
921 | return tsk; | ||
922 | } else if (lv == CRIT_LEVEL_C) { | ||
923 | //ce = &state->crit_entries[lv]; | ||
924 | //if (likely(!ce->running)) { | ||
925 | #if BUDGET_ENFORCEMENT_AT_C | ||
926 | gmp_add_event_after(_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
927 | #endif | ||
928 | res->event_added = 1; | ||
929 | res->blocked_by_ghost = 0; | ||
930 | res->is_ghost = NO_CPU; | ||
931 | res->scheduled_on = state->cpu; | ||
932 | raw_spin_unlock(&mode_lock); | ||
933 | return tsk; | ||
934 | //} else { | ||
935 | // res->blocked_by_ghost = 1; | ||
936 | // TRACE_TASK(ce->running, " is GHOST\n"); | ||
937 | // return NULL; | ||
938 | //} | ||
939 | } else { | ||
940 | BUG(); | ||
941 | } | ||
942 | } | ||
943 | } | ||
944 | } | ||
945 | raw_spin_unlock(&mode_lock); | ||
946 | return NULL; | ||
947 | } | ||
948 | |||
949 | static inline void pre_schedule(struct task_struct *prev, int cpu) | ||
950 | { | ||
951 | TS_SCHED_A_START; | ||
952 | TS_SCHED_C_START; | ||
953 | |||
954 | if (!prev || !is_realtime(prev)) | ||
955 | return; | ||
956 | |||
957 | do_partition(CRIT_LEVEL_C, cpu); | ||
958 | } | ||
959 | |||
960 | static inline void post_schedule(struct task_struct *next, int cpu) | ||
961 | { | ||
962 | enum crit_level lev; | ||
963 | if ((!next) || !is_realtime(next)) | ||
964 | return; | ||
965 | |||
966 | lev = get_task_crit_level(next); | ||
967 | if (is_mode_poll_task(next)) { | ||
968 | lev = MODE_POLL_TASK; | ||
969 | } | ||
970 | |||
971 | do_partition(lev, cpu); | ||
972 | |||
973 | switch(lev) { | ||
974 | case CRIT_LEVEL_A: | ||
975 | case CRIT_LEVEL_B: | ||
976 | case MODE_POLL_TASK: | ||
977 | TS_SCHED_A_END(next); | ||
978 | break; | ||
979 | case CRIT_LEVEL_C: | ||
980 | TS_SCHED_C_END(next); | ||
981 | break; | ||
982 | default: | ||
983 | break; | ||
984 | } | ||
985 | |||
986 | } | ||
987 | |||
988 | /* mc2_schedule - main scheduler function. pick the next task to run | ||
989 | */ | ||
990 | static struct task_struct* mc2_schedule(struct task_struct * prev) | ||
991 | { | ||
992 | int np, blocks, exists, preempt, to_schedule; | ||
993 | /* next == NULL means "schedule background work". */ | ||
994 | lt_t now = litmus_clock(); | ||
995 | struct mc2_cpu_state *state = local_cpu_state(); | ||
996 | |||
997 | raw_spin_lock(&state->lock); | ||
998 | |||
999 | raw_spin_lock(&global_lock); | ||
1000 | preempt = resched_cpu[state->cpu]; | ||
1001 | resched_cpu[state->cpu] = 0; | ||
1002 | raw_spin_unlock(&global_lock); | ||
1003 | |||
1004 | pre_schedule(prev, state->cpu); | ||
1005 | |||
1006 | BUG_ON(state->scheduled && state->scheduled != prev); | ||
1007 | BUG_ON(state->scheduled && !is_realtime(prev)); | ||
1008 | |||
1009 | //if (state->scheduled && state->scheduled != prev) | ||
1010 | // printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | ||
1011 | //if (state->scheduled && !is_realtime(prev)) | ||
1012 | // printk(KERN_ALERT "BUG2!!!!!!!! \n"); | ||
1013 | |||
1014 | /* (0) Determine state */ | ||
1015 | exists = state->scheduled != NULL; | ||
1016 | blocks = exists && !is_current_running(); | ||
1017 | np = exists && is_np(state->scheduled); | ||
1018 | |||
1019 | /* update time */ | ||
1020 | state->sup_env->will_schedule = true; | ||
1021 | sup_update_time(state->sup_env, now); | ||
1022 | /* 9/20/2015 fix */ | ||
1023 | //raw_spin_lock(&_global_env.lock); | ||
1024 | //to_schedule = gmp_update_time(&_global_env, now); | ||
1025 | //raw_spin_unlock(&_global_env.lock); | ||
1026 | |||
1027 | /* 9/20/2015 fix | ||
1028 | mc2_update_ghost_state(state); | ||
1029 | */ | ||
1030 | |||
1031 | /* remove task from reservation if it blocks */ | ||
1032 | /* | ||
1033 | if (is_realtime(prev) && !is_running(prev)) { | ||
1034 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
1035 | raw_spin_lock(&_global_env.lock); | ||
1036 | task_departs(prev, is_completed(prev)); | ||
1037 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
1038 | raw_spin_unlock(&_global_env.lock); | ||
1039 | }*/ | ||
1040 | if (is_realtime(current) && blocks) { | ||
1041 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | ||
1042 | raw_spin_lock(&global_lock); | ||
1043 | task_departs(current, is_completed(current)); | ||
1044 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | ||
1045 | raw_spin_unlock(&global_lock); | ||
1046 | } | ||
1047 | |||
1048 | /* figure out what to schedule next */ | ||
1049 | if (!np) | ||
1050 | state->scheduled = mc2_dispatch(state->sup_env, state); | ||
1051 | |||
1052 | if (!state->scheduled) { | ||
1053 | raw_spin_lock(&global_lock); | ||
1054 | if (is_realtime(prev)) | ||
1055 | gmp_update_time(_global_env, now); | ||
1056 | state->scheduled = mc2_global_dispatch(state); | ||
1057 | raw_spin_unlock(&global_lock); | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | if (!state->scheduled) { | ||
1062 | raw_spin_lock(&global_lock); | ||
1063 | //to_schedule = gmp_update_time(_global_env, now); | ||
1064 | state->scheduled = mc2_global_dispatch(state); | ||
1065 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1066 | update_cpu_prio(state); | ||
1067 | raw_spin_unlock(&global_lock); | ||
1068 | } else { | ||
1069 | raw_spin_lock(&global_lock); | ||
1070 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1071 | update_cpu_prio(state); | ||
1072 | raw_spin_unlock(&global_lock); | ||
1073 | } | ||
1074 | */ | ||
1075 | |||
1076 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
1077 | //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1078 | //update_cpu_prio(state); | ||
1079 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
1080 | |||
1081 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | ||
1082 | sched_state_task_picked(); | ||
1083 | |||
1084 | /* program scheduler timer */ | ||
1085 | state->sup_env->will_schedule = false; | ||
1086 | |||
1087 | /* NOTE: drops state->lock */ | ||
1088 | mc2_update_timer_and_unlock(state); | ||
1089 | |||
1090 | raw_spin_lock(&state->lock); | ||
1091 | if (prev != state->scheduled && is_realtime(prev)) { | ||
1092 | struct mc2_task_state* tinfo = get_mc2_state(prev); | ||
1093 | struct reservation* res = tinfo->res_info[mode].client.reservation; | ||
1094 | TRACE_TASK(prev, "PREEPT_COUNT %d\n", preempt_count()); | ||
1095 | if (res) { | ||
1096 | TRACE_TASK(prev, "PREV JOB was scheduled_on = P%d\n", res->scheduled_on); | ||
1097 | res->scheduled_on = NO_CPU; | ||
1098 | } | ||
1099 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); | ||
1100 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | ||
1101 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | ||
1102 | int cpu; | ||
1103 | raw_spin_lock(&global_lock); | ||
1104 | cpu = get_lowest_prio_cpu(res?res->priority:0); | ||
1105 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | ||
1106 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
1107 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
1108 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
1109 | resched_cpu[cpu] = 1; | ||
1110 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
1111 | } | ||
1112 | raw_spin_unlock(&global_lock); | ||
1113 | } | ||
1114 | } | ||
1115 | |||
1116 | |||
1117 | if (to_schedule != 0) { | ||
1118 | raw_spin_lock(&global_lock); | ||
1119 | while (to_schedule--) { | ||
1120 | int cpu = get_lowest_prio_cpu(0); | ||
1121 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
1122 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
1123 | resched_cpu[cpu] = 1; | ||
1124 | } | ||
1125 | } | ||
1126 | raw_spin_unlock(&global_lock); | ||
1127 | } | ||
1128 | |||
1129 | post_schedule(state->scheduled, state->cpu); | ||
1130 | |||
1131 | raw_spin_lock(&global_lock); | ||
1132 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1133 | update_cpu_prio(state); | ||
1134 | raw_spin_unlock(&global_lock); | ||
1135 | |||
1136 | raw_spin_unlock(&state->lock); | ||
1137 | /* if (state->scheduled) { | ||
1138 | TRACE_TASK(state->scheduled, "scheduled.\n"); | ||
1139 | } | ||
1140 | */ | ||
1141 | |||
1142 | return state->scheduled; | ||
1143 | } | ||
1144 | |||
1145 | static void resume_legacy_task_model_updates(struct task_struct *tsk) | ||
1146 | { | ||
1147 | lt_t now; | ||
1148 | if (is_sporadic(tsk)) { | ||
1149 | /* If this sporadic task was gone for a "long" time and woke up past | ||
1150 | * its deadline, then give it a new budget by triggering a job | ||
1151 | * release. This is purely cosmetic and has no effect on the | ||
1152 | * MC2 scheduler. */ | ||
1153 | |||
1154 | now = litmus_clock(); | ||
1155 | if (is_tardy(tsk, now)) { | ||
1156 | release_at(tsk, now); | ||
1157 | //sched_trace_task_release(tsk); | ||
1158 | } | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | /* mc2_task_resume - Called when the state of tsk changes back to | ||
1163 | * TASK_RUNNING. We need to requeue the task. | ||
1164 | */ | ||
1165 | static void mc2_task_resume(struct task_struct *tsk) | ||
1166 | { | ||
1167 | unsigned long flags; | ||
1168 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
1169 | struct mc2_cpu_state *state; | ||
1170 | |||
1171 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | ||
1172 | |||
1173 | local_irq_save(flags); | ||
1174 | if (tinfo->cpu != -1) | ||
1175 | state = cpu_state_for(tinfo->cpu); | ||
1176 | else | ||
1177 | state = local_cpu_state(); | ||
1178 | |||
1179 | /* 9/20/2015 fix | ||
1180 | raw_spin_lock(&_global_env.lock); | ||
1181 | */ | ||
1182 | /* Requeue only if self-suspension was already processed. */ | ||
1183 | if (tinfo->has_departed) | ||
1184 | { | ||
1185 | /* We don't want to consider jobs before synchronous releases */ | ||
1186 | if (tsk_rt(tsk)->job_params.job_no > 3) { | ||
1187 | switch(get_task_crit_level(tsk)) { | ||
1188 | case CRIT_LEVEL_A: | ||
1189 | TS_RELEASE_LATENCY_A(get_release(tsk)); | ||
1190 | break; | ||
1191 | case CRIT_LEVEL_B: | ||
1192 | TS_RELEASE_LATENCY_B(get_release(tsk)); | ||
1193 | break; | ||
1194 | case CRIT_LEVEL_C: | ||
1195 | TS_RELEASE_LATENCY_C(get_release(tsk)); | ||
1196 | break; | ||
1197 | default: | ||
1198 | break; | ||
1199 | } | ||
1200 | TRACE_CUR("INIT_FINISHED is SET\n"); | ||
1201 | tsk_mc2_data(tsk)->init_finished = 1; | ||
1202 | } | ||
1203 | |||
1204 | raw_spin_lock(&state->lock); | ||
1205 | /* Assumption: litmus_clock() is synchronized across cores, | ||
1206 | * since we might not actually be executing on tinfo->cpu | ||
1207 | * at the moment. */ | ||
1208 | if (tinfo->cpu != -1) { | ||
1209 | sup_update_time(state->sup_env, litmus_clock()); | ||
1210 | task_arrives(state, tsk); | ||
1211 | } else { | ||
1212 | raw_spin_lock(&global_lock); | ||
1213 | gmp_update_time(_global_env, litmus_clock()); | ||
1214 | task_arrives(state, tsk); | ||
1215 | raw_spin_unlock(&global_lock); | ||
1216 | } | ||
1217 | |||
1218 | /* 9/20/2015 fix | ||
1219 | mc2_update_ghost_state(state); | ||
1220 | */ | ||
1221 | //task_arrives(state, tsk); | ||
1222 | /* NOTE: drops state->lock */ | ||
1223 | TRACE_TASK(tsk, "mc2_resume()\n"); | ||
1224 | mc2_update_timer_and_unlock(state); | ||
1225 | } else { | ||
1226 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | ||
1227 | //raw_spin_unlock(&_global_env.lock); | ||
1228 | } | ||
1229 | |||
1230 | local_irq_restore(flags); | ||
1231 | |||
1232 | //gmp_free_passed_event(); | ||
1233 | resume_legacy_task_model_updates(tsk); | ||
1234 | } | ||
1235 | |||
1236 | |||
1237 | /* mc2_admit_task - Setup mc2 task parameters | ||
1238 | */ | ||
1239 | static long mc2_admit_task(struct task_struct *tsk) | ||
1240 | { | ||
1241 | long err = 0; | ||
1242 | unsigned long flags; | ||
1243 | struct reservation *res; | ||
1244 | struct mc2_cpu_state *state; | ||
1245 | struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); | ||
1246 | struct mc2_task *mp = tsk_rt(tsk)->mc2_data; | ||
1247 | enum crit_level lv; | ||
1248 | int i; | ||
1249 | |||
1250 | TRACE_TASK(tsk, "MC2 admitting task\n"); | ||
1251 | if (!tinfo) | ||
1252 | return -ENOMEM; | ||
1253 | |||
1254 | if (!mp) { | ||
1255 | printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); | ||
1256 | TRACE("mc2_admit_task: criticality level has not been set\n"); | ||
1257 | return -ESRCH; | ||
1258 | } | ||
1259 | |||
1260 | lv = mp->crit; | ||
1261 | preempt_disable(); | ||
1262 | |||
1263 | |||
1264 | if (lv < CRIT_LEVEL_C) { | ||
1265 | state = cpu_state_for(task_cpu(tsk)); | ||
1266 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1267 | |||
1268 | tinfo->mc2_param.crit = mp->crit; | ||
1269 | tinfo->cpu = task_cpu(tsk); | ||
1270 | tinfo->has_departed = true; | ||
1271 | tinfo->mc2_param.res_id = mp->res_id; | ||
1272 | tinfo->mc2_param.mode_mask = mp->mode_mask; | ||
1273 | tinfo->mc2_param.init_finished = 0; | ||
1274 | TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); | ||
1275 | |||
1276 | TRACE_TASK(tsk, "Mode 0\n"); | ||
1277 | res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id); | ||
1278 | |||
1279 | /* found the appropriate reservation */ | ||
1280 | if (res) { | ||
1281 | TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n"); | ||
1282 | |||
1283 | /* initial values */ | ||
1284 | err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); | ||
1285 | } | ||
1286 | else { | ||
1287 | //failed to find an expected reservation | ||
1288 | err = -ESRCH; | ||
1289 | } | ||
1290 | |||
1291 | for(i = 1; i < NR_MODES; i++){ | ||
1292 | if (!in_mode(tsk, i)){ | ||
1293 | //task not present in mode | ||
1294 | continue; | ||
1295 | } | ||
1296 | TRACE_TASK(tsk, "Mode %d\n",i); | ||
1297 | res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); | ||
1298 | |||
1299 | /* found the appropriate reservation */ | ||
1300 | if (res) { | ||
1301 | TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i); | ||
1302 | |||
1303 | /* initial values */ | ||
1304 | err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); | ||
1305 | } | ||
1306 | else{ | ||
1307 | //failed to find an expected reservation | ||
1308 | err = -ESRCH; | ||
1309 | } | ||
1310 | } | ||
1311 | |||
1312 | if (!err){ | ||
1313 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
1314 | tsk_rt(tsk)->plugin_state = tinfo; | ||
1315 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
1316 | } | ||
1317 | TRACE_CUR("ctrl_page mode_poll_task %d, cpu = %d, tsk_rt->ctrl_page = %x\n", tsk_rt(tsk)->ctrl_page->mode_poll_task, tinfo->cpu, tsk_rt(tsk)->ctrl_page); | ||
1318 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { | ||
1319 | TRACE_CUR("CPU0_TASK_EXIST set\n"); | ||
1320 | cpu_0_task_exist = true; | ||
1321 | } | ||
1322 | |||
1323 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1324 | } else if (lv == CRIT_LEVEL_C) { | ||
1325 | TRACE_TASK(tsk, "Task being admitted is Level C\n"); | ||
1326 | state = local_cpu_state(); | ||
1327 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1328 | raw_spin_lock(&global_lock); | ||
1329 | //state = local_cpu_state(); | ||
1330 | |||
1331 | //raw_spin_lock(&state->lock); | ||
1332 | |||
1333 | tinfo->mc2_param.crit = mp->crit; | ||
1334 | tinfo->cpu = -1; | ||
1335 | tinfo->has_departed = true; | ||
1336 | tinfo->mc2_param.res_id = mp->res_id; | ||
1337 | tinfo->mc2_param.mode_mask = mp->mode_mask; | ||
1338 | tinfo->mc2_param.init_finished = 0; | ||
1339 | |||
1340 | TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); | ||
1341 | |||
1342 | TRACE_TASK(tsk, "Mode 0\n"); | ||
1343 | res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); | ||
1344 | |||
1345 | /* found the appropriate reservation */ | ||
1346 | if (res) { | ||
1347 | TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n"); | ||
1348 | |||
1349 | /* initial values */ | ||
1350 | err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); | ||
1351 | } | ||
1352 | else { | ||
1353 | //failed to find an expected reservation | ||
1354 | err = -ESRCH; | ||
1355 | } | ||
1356 | |||
1357 | for(i = 1; i < NR_MODES; i++){ | ||
1358 | if (!in_mode(tsk, i)) | ||
1359 | continue; | ||
1360 | res = gmp_find_by_id(&(_global_env_modes[i]), mp->res_id); | ||
1361 | |||
1362 | /* found the appropriate reservation (or vCPU) */ | ||
1363 | if (res) { | ||
1364 | TRACE_TASK(tsk, "GMP FOUND RES ID in mode %d\n", i); | ||
1365 | |||
1366 | /* initial values */ | ||
1367 | err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); | ||
1368 | |||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | if (!err){ | ||
1373 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
1374 | tsk_rt(tsk)->plugin_state = tinfo; | ||
1375 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
1376 | raw_spin_lock(&mode_lock); | ||
1377 | for(i = 0; i < NR_MODES; i++){ | ||
1378 | if (in_mode(tsk, i)){ | ||
1379 | mode_sizes[i]++; | ||
1380 | } | ||
1381 | } | ||
1382 | raw_spin_unlock(&mode_lock); | ||
1383 | |||
1384 | } | ||
1385 | |||
1386 | raw_spin_unlock(&global_lock); | ||
1387 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1388 | } | ||
1389 | |||
1390 | preempt_enable(); | ||
1391 | |||
1392 | if (err) | ||
1393 | kfree(tinfo); | ||
1394 | |||
1395 | TRACE_TASK(tsk, "MC2 task admitted %d\n", err); | ||
1396 | return err; | ||
1397 | } | ||
1398 | |||
1399 | /* mc2_task_new - A new real-time job is arrived. Release the next job | ||
1400 | * at the next reservation replenish time | ||
1401 | */ | ||
1402 | static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | ||
1403 | int is_running) | ||
1404 | { | ||
1405 | unsigned long flags; | ||
1406 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
1407 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); | ||
1408 | struct reservation *res; | ||
1409 | enum crit_level lv = get_task_crit_level(tsk); | ||
1410 | lt_t release = 0; | ||
1411 | |||
1412 | BUG_ON(lv < CRIT_LEVEL_A || lv > CRIT_LEVEL_C); | ||
1413 | |||
1414 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", | ||
1415 | litmus_clock(), on_runqueue, is_running); | ||
1416 | |||
1417 | if (tinfo->cpu == -1) | ||
1418 | state = local_cpu_state(); | ||
1419 | else | ||
1420 | state = cpu_state_for(tinfo->cpu); | ||
1421 | |||
1422 | local_irq_save(flags); | ||
1423 | |||
1424 | /* acquire the lock protecting the state and disable interrupts */ | ||
1425 | //raw_spin_lock(&_global_env.lock); | ||
1426 | //raw_spin_lock(&state->lock); | ||
1427 | if (is_running) { | ||
1428 | state->scheduled = tsk; | ||
1429 | /* make sure this task should actually be running */ | ||
1430 | litmus_reschedule_local(); | ||
1431 | } | ||
1432 | |||
1433 | raw_spin_lock(&state->lock); | ||
1434 | |||
1435 | if (lv == CRIT_LEVEL_C) { | ||
1436 | raw_spin_lock(&global_lock); | ||
1437 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); | ||
1438 | } | ||
1439 | else { | ||
1440 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); | ||
1441 | } | ||
1442 | |||
1443 | //BUG_ON(!res); | ||
1444 | // the current mode doesn't have this task. | ||
1445 | // do not update timer and set the next release time. | ||
1446 | |||
1447 | //res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
1448 | BUG_ON(!res); | ||
1449 | |||
1450 | release = res->next_replenishment; | ||
1451 | |||
1452 | if (on_runqueue || is_running) { | ||
1453 | /* Assumption: litmus_clock() is synchronized across cores | ||
1454 | * [see comment in pres_task_resume()] */ | ||
1455 | if (lv == CRIT_LEVEL_C) { | ||
1456 | gmp_update_time(_global_env, litmus_clock()); | ||
1457 | //raw_spin_unlock(&_global_env.lock); | ||
1458 | } | ||
1459 | else | ||
1460 | sup_update_time(state->sup_env, litmus_clock()); | ||
1461 | //mc2_update_time(lv, state, litmus_clock()); | ||
1462 | /* 9/20/2015 fix | ||
1463 | mc2_update_ghost_state(state); | ||
1464 | */ | ||
1465 | task_arrives(state, tsk); | ||
1466 | if (lv == CRIT_LEVEL_C) | ||
1467 | raw_spin_unlock(&global_lock); | ||
1468 | /* NOTE: drops state->lock */ | ||
1469 | TRACE("mc2_new()\n"); | ||
1470 | |||
1471 | mc2_update_timer_and_unlock(state); | ||
1472 | } else { | ||
1473 | if (lv == CRIT_LEVEL_C) | ||
1474 | raw_spin_unlock(&global_lock); | ||
1475 | raw_spin_unlock(&state->lock); | ||
1476 | //raw_spin_unlock(&_global_env.lock); | ||
1477 | } | ||
1478 | local_irq_restore(flags); | ||
1479 | |||
1480 | if (!release) { | ||
1481 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); | ||
1482 | release_at(tsk, release); | ||
1483 | } | ||
1484 | else | ||
1485 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | ||
1486 | } | ||
1487 | |||
1488 | /* mc2_reservation_destroy - reservation_destroy system call backend | ||
1489 | */ | ||
1490 | static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | ||
1491 | { | ||
1492 | long ret = -EINVAL; | ||
1493 | struct mc2_cpu_state *state; | ||
1494 | struct reservation *res = NULL, *next; | ||
1495 | struct sup_reservation_environment *sup_env; | ||
1496 | int found = 0; | ||
1497 | //enum crit_level lv = get_task_crit_level(current); | ||
1498 | unsigned long flags; | ||
1499 | int i; | ||
1500 | |||
1501 | if (cpu == -1) { | ||
1502 | struct next_timer_event *event, *e_next; | ||
1503 | local_irq_save(flags); | ||
1504 | raw_spin_lock(&global_lock); | ||
1505 | |||
1506 | /* if the reservation is global reservation */ | ||
1507 | //state = local_cpu_state(); | ||
1508 | //delete reservation id in all modes | ||
1509 | for(i = 0; i < NR_MODES; i++) { | ||
1510 | //raw_spin_lock(&state->lock); | ||
1511 | |||
1512 | list_for_each_entry_safe(res, next, &_global_env_modes[i].depleted_reservations, list) { | ||
1513 | if (res->id == reservation_id) { | ||
1514 | list_del(&res->list); | ||
1515 | kfree(res); | ||
1516 | found = 1; | ||
1517 | ret = 0; | ||
1518 | } | ||
1519 | } | ||
1520 | if (!found) { | ||
1521 | list_for_each_entry_safe(res, next, &_global_env_modes[i].inactive_reservations, list) { | ||
1522 | if (res->id == reservation_id) { | ||
1523 | list_del(&res->list); | ||
1524 | kfree(res); | ||
1525 | found = 1; | ||
1526 | ret = 0; | ||
1527 | } | ||
1528 | } | ||
1529 | } | ||
1530 | if (!found) { | ||
1531 | list_for_each_entry_safe(res, next, &_global_env_modes[i].active_reservations, list) { | ||
1532 | if (res->id == reservation_id) { | ||
1533 | list_del(&res->list); | ||
1534 | kfree(res); | ||
1535 | found = 1; | ||
1536 | ret = 0; | ||
1537 | } | ||
1538 | } | ||
1539 | } | ||
1540 | |||
1541 | //raw_spin_unlock(&state->lock); | ||
1542 | list_for_each_entry_safe(event, e_next, &_global_env_modes[i].next_events, list) { | ||
1543 | if (event->id == reservation_id) { | ||
1544 | list_del(&event->list); | ||
1545 | TRACE("EVENT id %d deleted\n", event->id); | ||
1546 | kfree(event); | ||
1547 | } | ||
1548 | } | ||
1549 | } | ||
1550 | |||
1551 | raw_spin_unlock(&global_lock); | ||
1552 | local_irq_restore(flags); | ||
1553 | } else { | ||
1554 | /* if the reservation is partitioned reservation */ | ||
1555 | state = cpu_state_for(cpu); | ||
1556 | for (i = 0; i < NR_MODES; i++){ | ||
1557 | local_irq_save(flags); | ||
1558 | raw_spin_lock(&state->lock); | ||
1559 | |||
1560 | // res = sup_find_by_id(state->sup_env, reservation_id); | ||
1561 | sup_env = &(state->sup_env_modes[i]); | ||
1562 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | ||
1563 | if (res->id == reservation_id) { | ||
1564 | /* | ||
1565 | if (lv == CRIT_LEVEL_A) { | ||
1566 | struct table_driven_reservation *tdres; | ||
1567 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1568 | kfree(tdres->intervals); | ||
1569 | } | ||
1570 | */ | ||
1571 | list_del(&res->list); | ||
1572 | kfree(res); | ||
1573 | found = 1; | ||
1574 | ret = 0; | ||
1575 | TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); | ||
1576 | } | ||
1577 | } | ||
1578 | if (!found) { | ||
1579 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { | ||
1580 | if (res->id == reservation_id) { | ||
1581 | /* if (lv == CRIT_LEVEL_A) { | ||
1582 | struct table_driven_reservation *tdres; | ||
1583 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1584 | kfree(tdres->intervals); | ||
1585 | } | ||
1586 | */ | ||
1587 | list_del(&res->list); | ||
1588 | kfree(res); | ||
1589 | found = 1; | ||
1590 | ret = 0; | ||
1591 | TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); | ||
1592 | } | ||
1593 | } | ||
1594 | } | ||
1595 | if (!found) { | ||
1596 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
1597 | if (res->id == reservation_id) { | ||
1598 | /* if (lv == CRIT_LEVEL_A) { | ||
1599 | struct table_driven_reservation *tdres; | ||
1600 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1601 | kfree(tdres->intervals); | ||
1602 | } | ||
1603 | */ | ||
1604 | list_del(&res->list); | ||
1605 | kfree(res); | ||
1606 | found = 1; | ||
1607 | ret = 0; | ||
1608 | TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); | ||
1609 | } | ||
1610 | } | ||
1611 | } | ||
1612 | |||
1613 | raw_spin_unlock(&state->lock); | ||
1614 | local_irq_restore(flags); | ||
1615 | } | ||
1616 | } | ||
1617 | |||
1618 | TRACE("Rerservation destroyed ret = %d\n", ret); | ||
1619 | return ret; | ||
1620 | } | ||
1621 | |||
1622 | /* mc2_task_exit - Task became a normal task (not real-time task) | ||
1623 | */ | ||
1624 | static void mc2_task_exit(struct task_struct *tsk) | ||
1625 | { | ||
1626 | unsigned long flags; | ||
1627 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
1628 | struct mc2_cpu_state *state; | ||
1629 | enum crit_level lv = tinfo->mc2_param.crit; | ||
1630 | //struct crit_entry* ce; | ||
1631 | int cpu; | ||
1632 | int i; | ||
1633 | |||
1634 | local_irq_save(flags); | ||
1635 | if (tinfo->cpu != -1) | ||
1636 | state = cpu_state_for(tinfo->cpu); | ||
1637 | else | ||
1638 | state = local_cpu_state(); | ||
1639 | |||
1640 | raw_spin_lock(&state->lock); | ||
1641 | |||
1642 | if (state->scheduled == tsk) | ||
1643 | state->scheduled = NULL; | ||
1644 | |||
1645 | //ce = &state->crit_entries[lv]; | ||
1646 | //if (ce->running == tsk) | ||
1647 | // ce->running = NULL; | ||
1648 | |||
1649 | /* remove from queues */ | ||
1650 | if (is_running(tsk)) { | ||
1651 | /* Assumption: litmus_clock() is synchronized across cores | ||
1652 | * [see comment in pres_task_resume()] */ | ||
1653 | |||
1654 | /* update both global and partitioned */ | ||
1655 | if (lv < CRIT_LEVEL_C) { | ||
1656 | sup_update_time(state->sup_env, litmus_clock()); | ||
1657 | raw_spin_lock(&global_lock); | ||
1658 | gmp_update_time(_global_env, litmus_clock()); | ||
1659 | raw_spin_unlock(&global_lock); | ||
1660 | } | ||
1661 | else if (lv == CRIT_LEVEL_C) { | ||
1662 | raw_spin_lock(&global_lock); | ||
1663 | gmp_update_time(_global_env, litmus_clock()); | ||
1664 | //raw_spin_unlock(&_global_env.lock); | ||
1665 | } | ||
1666 | /* 9/20/2015 fix | ||
1667 | mc2_update_ghost_state(state); | ||
1668 | */ | ||
1669 | task_departs(tsk, 0); | ||
1670 | if (lv == CRIT_LEVEL_C) | ||
1671 | raw_spin_unlock(&global_lock); | ||
1672 | |||
1673 | /* NOTE: drops state->lock */ | ||
1674 | TRACE("mc2_exit()\n"); | ||
1675 | |||
1676 | mc2_update_timer_and_unlock(state); | ||
1677 | } else { | ||
1678 | raw_spin_unlock(&state->lock); | ||
1679 | |||
1680 | } | ||
1681 | |||
1682 | if (lv == CRIT_LEVEL_C) { | ||
1683 | |||
1684 | raw_spin_lock(&mode_lock); | ||
1685 | for(i = 0; i < NR_MODES; i++){ | ||
1686 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) | ||
1687 | continue; | ||
1688 | mode_sizes[i]--; | ||
1689 | } | ||
1690 | raw_spin_unlock(&mode_lock); | ||
1691 | |||
1692 | for_each_online_cpu(cpu) { | ||
1693 | state = cpu_state_for(cpu); | ||
1694 | if (state == local_cpu_state()) | ||
1695 | continue; | ||
1696 | raw_spin_lock(&state->lock); | ||
1697 | |||
1698 | if (state->scheduled == tsk) | ||
1699 | state->scheduled = NULL; | ||
1700 | |||
1701 | //ce = &state->crit_entries[lv]; | ||
1702 | //if (ce->running == tsk) | ||
1703 | // ce->running = NULL; | ||
1704 | |||
1705 | raw_spin_unlock(&state->lock); | ||
1706 | } | ||
1707 | } | ||
1708 | |||
1709 | local_irq_restore(flags); | ||
1710 | |||
1711 | if (is_mode_poll_task(tsk) && (tinfo->cpu == 0)) { | ||
1712 | cpu_0_spin_flag = !cpu_0_spin_flag; // release other cpu before exit. | ||
1713 | cpu_0_task_exist = false; | ||
1714 | } | ||
1715 | |||
1716 | kfree(tsk_rt(tsk)->plugin_state); | ||
1717 | tsk_rt(tsk)->plugin_state = NULL; | ||
1718 | kfree(tsk_rt(tsk)->mc2_data); | ||
1719 | tsk_rt(tsk)->mc2_data = NULL; | ||
1720 | } | ||
1721 | |||
1722 | /* create_polling_reservation - create a new polling reservation | ||
1723 | */ | ||
1724 | static long create_polling_reservation( | ||
1725 | int res_type, | ||
1726 | struct reservation_config *config) | ||
1727 | { | ||
1728 | struct mc2_cpu_state *state = NULL; | ||
1729 | //struct reservation* res = NULL; | ||
1730 | struct polling_reservation *pres; | ||
1731 | unsigned long flags; | ||
1732 | int use_edf = config->priority == LITMUS_NO_PRIORITY; | ||
1733 | int periodic = res_type == PERIODIC_POLLING; | ||
1734 | long err = -EINVAL; | ||
1735 | bool resExist = false; | ||
1736 | |||
1737 | /* sanity checks */ | ||
1738 | if (config->polling_params.budget > | ||
1739 | config->polling_params.period) { | ||
1740 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1741 | "budget > period\n", config->id); | ||
1742 | return -EINVAL; | ||
1743 | } | ||
1744 | if (config->polling_params.budget > | ||
1745 | config->polling_params.relative_deadline | ||
1746 | && config->polling_params.relative_deadline) { | ||
1747 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1748 | "budget > deadline\n", config->id); | ||
1749 | return -EINVAL; | ||
1750 | } | ||
1751 | if (config->polling_params.offset > | ||
1752 | config->polling_params.period) { | ||
1753 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1754 | "offset > period\n", config->id); | ||
1755 | return -EINVAL; | ||
1756 | } | ||
1757 | //Added sanity check for mode | ||
1758 | if (config->mode < 0 || config->mode >= NR_MODES){ | ||
1759 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1760 | "Mode outside range\n", config->id); | ||
1761 | return -EINVAL; | ||
1762 | } | ||
1763 | |||
1764 | /* Allocate before we grab a spin lock. | ||
1765 | * Todo: would be nice to use a core-local allocation. | ||
1766 | */ | ||
1767 | pres = kzalloc(sizeof(*pres), GFP_KERNEL); | ||
1768 | if (!pres) | ||
1769 | return -ENOMEM; | ||
1770 | |||
1771 | TRACE("CREATE_POLLING_RESERVATION id %d mode %d\n", config->id, config->mode); | ||
1772 | if (config->cpu != -1) { | ||
1773 | int i, is_exist = 0; | ||
1774 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
1775 | state = cpu_state_for(config->cpu); | ||
1776 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1777 | |||
1778 | /* check if it is the first creation of reservartion */ | ||
1779 | for (i = 0; i < NR_MODES; i++) { | ||
1780 | if( sup_find_by_id(&(state->sup_env_modes[i]), config->id) ) | ||
1781 | is_exist = 1; | ||
1782 | } | ||
1783 | if (!is_exist && config->mode != 0) { | ||
1784 | /* create mode 0 reservation first */ | ||
1785 | struct polling_reservation *pres_0 = kzalloc(sizeof(*pres_0), GFP_ATOMIC); | ||
1786 | |||
1787 | TRACE_CUR("The first mode_num = %d\n",config->mode); | ||
1788 | |||
1789 | if (!pres_0) { | ||
1790 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1791 | kfree(pres); | ||
1792 | return -ENOMEM; | ||
1793 | } | ||
1794 | polling_reservation_init(pres_0, use_edf, periodic, | ||
1795 | config->polling_params.budget, | ||
1796 | config->polling_params.period, | ||
1797 | config->polling_params.relative_deadline, | ||
1798 | config->polling_params.offset); | ||
1799 | pres_0->res.id = config->id; | ||
1800 | pres_0->res.blocked_by_ghost = 0; | ||
1801 | pres_0->res.is_ghost = NO_CPU; | ||
1802 | pres_0->res.mode = config->mode; | ||
1803 | |||
1804 | if (!use_edf) | ||
1805 | pres_0->res.priority = config->priority; | ||
1806 | sup_add_new_reservation(&(state->sup_env_modes[0]), &pres_0->res); | ||
1807 | TRACE_CUR("SUP reservation created R%d for mode 0 priority : %llu\n", config->id, pres_0->res.priority); | ||
1808 | pres_0->res.reported = 0; | ||
1809 | pres_0->res.tsk = current; | ||
1810 | } | ||
1811 | |||
1812 | //force reservation id unique inside of res_config->mode | ||
1813 | if( sup_find_by_id(&(state->sup_env_modes[config->mode]), config->id) ){ | ||
1814 | resExist = true; | ||
1815 | } | ||
1816 | if (!resExist) { | ||
1817 | polling_reservation_init(pres, use_edf, periodic, | ||
1818 | config->polling_params.budget, | ||
1819 | config->polling_params.period, | ||
1820 | config->polling_params.relative_deadline, | ||
1821 | config->polling_params.offset); | ||
1822 | pres->res.id = config->id; | ||
1823 | pres->res.blocked_by_ghost = 0; | ||
1824 | pres->res.is_ghost = NO_CPU; | ||
1825 | pres->res.mode = config->mode; | ||
1826 | /*if (config->priority == LITMUS_MAX_PRIORITY) { | ||
1827 | level_a_priorities[config->cpu]++; | ||
1828 | pres->res.priority = level_a_priorities[config->cpu]; | ||
1829 | }*/ | ||
1830 | if (!use_edf) | ||
1831 | pres->res.priority = config->priority; | ||
1832 | sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res); | ||
1833 | err = config->id; | ||
1834 | TRACE_CUR("SUP reservation created R%d for mode %d priority : %llu\n", config->id, config->mode, pres->res.priority); | ||
1835 | } else { | ||
1836 | err = -EEXIST; | ||
1837 | } | ||
1838 | |||
1839 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1840 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
1841 | |||
1842 | } else { | ||
1843 | int i, is_exist = 0; | ||
1844 | raw_spin_lock_irqsave(&global_lock, flags); | ||
1845 | |||
1846 | /* check if it is the first creation of reservartion */ | ||
1847 | for (i = 0; i < NR_MODES; i++) { | ||
1848 | if(gmp_find_by_id(&(_global_env_modes[i]), config->id)) | ||
1849 | is_exist = 1; | ||
1850 | } | ||
1851 | if (!is_exist && config->mode != 0) { | ||
1852 | /* create mode 0 reservation first */ | ||
1853 | struct polling_reservation *pres_0 = kzalloc(sizeof(*pres_0), GFP_ATOMIC); | ||
1854 | |||
1855 | TRACE_CUR("The first mode_num = %d\n",config->mode); | ||
1856 | |||
1857 | if (!pres_0) { | ||
1858 | raw_spin_unlock_irqrestore(&global_lock, flags); | ||
1859 | kfree(pres); | ||
1860 | return -ENOMEM; | ||
1861 | } | ||
1862 | polling_reservation_init(pres_0, use_edf, periodic, | ||
1863 | config->polling_params.budget, | ||
1864 | config->polling_params.period, | ||
1865 | config->polling_params.relative_deadline, | ||
1866 | config->polling_params.offset); | ||
1867 | pres_0->res.id = config->id; | ||
1868 | pres_0->res.blocked_by_ghost = 0; | ||
1869 | pres_0->res.scheduled_on = NO_CPU; | ||
1870 | pres_0->res.is_ghost = NO_CPU; | ||
1871 | pres_0->res.mode = config->mode; | ||
1872 | |||
1873 | if (!use_edf) | ||
1874 | pres_0->res.priority = config->priority; | ||
1875 | gmp_add_new_reservation(&(_global_env_modes[0]), &pres_0->res); | ||
1876 | TRACE_CUR("GMP reservation created R%d for mode 0 priority : %llu\n", config->id, pres_0->res.priority); | ||
1877 | pres_0->res.reported = 0; | ||
1878 | pres_0->res.tsk = current; | ||
1879 | } | ||
1880 | |||
1881 | |||
1882 | //force id's unique within desired mode | ||
1883 | if (gmp_find_by_id(&(_global_env_modes[config->mode]), config->id)){ | ||
1884 | resExist = true; | ||
1885 | } | ||
1886 | if (!resExist) { | ||
1887 | polling_reservation_init(pres, use_edf, periodic, | ||
1888 | config->polling_params.budget, | ||
1889 | config->polling_params.period, | ||
1890 | config->polling_params.relative_deadline, | ||
1891 | config->polling_params.offset); | ||
1892 | pres->res.id = config->id; | ||
1893 | pres->res.blocked_by_ghost = 0; | ||
1894 | pres->res.scheduled_on = NO_CPU; | ||
1895 | pres->res.is_ghost = NO_CPU; | ||
1896 | pres->res.mode = config->mode; | ||
1897 | if (!use_edf) | ||
1898 | pres->res.priority = config->priority; | ||
1899 | gmp_add_new_reservation(&(_global_env_modes[config->mode]), &pres->res); | ||
1900 | TRACE_CUR("GMP reservation created R%d for mode %d priority : %llu\n", config->id, config->mode, pres->res.priority); | ||
1901 | err = config->id; | ||
1902 | } else { | ||
1903 | err = -EEXIST; | ||
1904 | } | ||
1905 | raw_spin_unlock_irqrestore(&global_lock, flags); | ||
1906 | } | ||
1907 | |||
1908 | |||
1909 | pres->res.reported = 0; | ||
1910 | pres->res.tsk = current; | ||
1911 | |||
1912 | if (err < 0) | ||
1913 | kfree(pres); | ||
1914 | |||
1915 | return err; | ||
1916 | } | ||
1917 | |||
1918 | #define MAX_INTERVALS 1024 | ||
1919 | |||
1920 | /* create_table_driven_reservation - create a table_driven reservation | ||
1921 | */ | ||
1922 | static long create_table_driven_reservation( | ||
1923 | struct reservation_config *config) | ||
1924 | { | ||
1925 | struct mc2_cpu_state *state; | ||
1926 | //struct reservation* res = NULL; | ||
1927 | struct table_driven_reservation *td_res = NULL; | ||
1928 | struct lt_interval *slots = NULL; | ||
1929 | size_t slots_size; | ||
1930 | unsigned int i, num_slots; | ||
1931 | unsigned long flags; | ||
1932 | long err = -EINVAL; | ||
1933 | bool resExist = false; | ||
1934 | |||
1935 | if (!config->table_driven_params.num_intervals) { | ||
1936 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1937 | "no intervals\n", config->id); | ||
1938 | return -EINVAL; | ||
1939 | } | ||
1940 | |||
1941 | if (config->table_driven_params.num_intervals > MAX_INTERVALS) { | ||
1942 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1943 | "too many intervals (max: %d)\n", config->id, MAX_INTERVALS); | ||
1944 | return -EINVAL; | ||
1945 | } | ||
1946 | |||
1947 | if (config->mode >= NR_MODES || config->mode < 0){ | ||
1948 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1949 | "mode outside of range\n", config->id); | ||
1950 | return -EINVAL; | ||
1951 | } | ||
1952 | |||
1953 | num_slots = config->table_driven_params.num_intervals; | ||
1954 | slots_size = sizeof(slots[0]) * num_slots; | ||
1955 | slots = kzalloc(slots_size, GFP_KERNEL); | ||
1956 | if (!slots) | ||
1957 | return -ENOMEM; | ||
1958 | |||
1959 | td_res = kzalloc(sizeof(*td_res), GFP_KERNEL); | ||
1960 | if (!td_res) | ||
1961 | err = -ENOMEM; | ||
1962 | else | ||
1963 | err = copy_from_user(slots, | ||
1964 | config->table_driven_params.intervals, slots_size); | ||
1965 | |||
1966 | if (!err) { | ||
1967 | /* sanity checks */ | ||
1968 | for (i = 0; !err && i < num_slots; i++) | ||
1969 | if (slots[i].end <= slots[i].start) { | ||
1970 | printk(KERN_ERR | ||
1971 | "invalid table-driven reservation (%u): " | ||
1972 | "invalid interval %u => [%llu, %llu]\n", | ||
1973 | config->id, i, | ||
1974 | slots[i].start, slots[i].end); | ||
1975 | err = -EINVAL; | ||
1976 | } | ||
1977 | |||
1978 | for (i = 0; !err && i + 1 < num_slots; i++) | ||
1979 | if (slots[i + 1].start <= slots[i].end) { | ||
1980 | printk(KERN_ERR | ||
1981 | "invalid table-driven reservation (%u): " | ||
1982 | "overlapping intervals %u, %u\n", | ||
1983 | config->id, i, i + 1); | ||
1984 | err = -EINVAL; | ||
1985 | } | ||
1986 | |||
1987 | if (slots[num_slots - 1].end > | ||
1988 | config->table_driven_params.major_cycle_length) { | ||
1989 | printk(KERN_ERR | ||
1990 | "invalid table-driven reservation (%u): last " | ||
1991 | "interval ends past major cycle %llu > %llu\n", | ||
1992 | config->id, | ||
1993 | slots[num_slots - 1].end, | ||
1994 | config->table_driven_params.major_cycle_length); | ||
1995 | err = -EINVAL; | ||
1996 | } | ||
1997 | } | ||
1998 | |||
1999 | if (!err) { | ||
2000 | state = cpu_state_for(config->cpu); | ||
2001 | raw_spin_lock_irqsave(&state->lock, flags); | ||
2002 | |||
2003 | //force unique id's across all modes | ||
2004 | for(i = 0; i < NR_MODES; i++){ | ||
2005 | if (sup_find_by_id(&(state->sup_env_modes[i]), config->id)){ | ||
2006 | resExist = true; | ||
2007 | break; | ||
2008 | } | ||
2009 | } | ||
2010 | if (!resExist) { | ||
2011 | table_driven_reservation_init(td_res, | ||
2012 | config->table_driven_params.major_cycle_length, | ||
2013 | slots, num_slots); | ||
2014 | td_res->res.id = config->id; | ||
2015 | td_res->res.priority = config->priority; | ||
2016 | td_res->res.blocked_by_ghost = 0; | ||
2017 | td_res->res.mode = config->mode; | ||
2018 | sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &td_res->res); | ||
2019 | err = config->id; | ||
2020 | } else { | ||
2021 | err = -EEXIST; | ||
2022 | } | ||
2023 | |||
2024 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
2025 | } | ||
2026 | |||
2027 | td_res->res.reported = 0; | ||
2028 | td_res->res.tsk = current; | ||
2029 | |||
2030 | if (err < 0) { | ||
2031 | kfree(slots); | ||
2032 | kfree(td_res); | ||
2033 | } | ||
2034 | |||
2035 | return err; | ||
2036 | } | ||
2037 | |||
2038 | /* mc2_reservation_create - reservation_create system call backend | ||
2039 | */ | ||
2040 | static long mc2_reservation_create(int res_type, void* __user _config) | ||
2041 | { | ||
2042 | long ret = -EINVAL; | ||
2043 | struct reservation_config config; | ||
2044 | |||
2045 | TRACE("Attempt to create reservation (%d)\n", res_type); | ||
2046 | |||
2047 | if (copy_from_user(&config, _config, sizeof(config))) | ||
2048 | return -EFAULT; | ||
2049 | |||
2050 | TRACE("Attempt to create reservation id %d mode %d\n", config.id, config.mode); | ||
2051 | |||
2052 | if (config.cpu != -1) { | ||
2053 | if (config.cpu < 0 || !cpu_online(config.cpu)) { | ||
2054 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
2055 | "CPU %d offline\n", config.id, config.cpu); | ||
2056 | return -EINVAL; | ||
2057 | } | ||
2058 | } | ||
2059 | |||
2060 | switch (res_type) { | ||
2061 | case PERIODIC_POLLING: | ||
2062 | case SPORADIC_POLLING: | ||
2063 | ret = create_polling_reservation(res_type, &config); | ||
2064 | break; | ||
2065 | |||
2066 | case TABLE_DRIVEN: | ||
2067 | ret = create_table_driven_reservation(&config); | ||
2068 | break; | ||
2069 | |||
2070 | default: | ||
2071 | return -EINVAL; | ||
2072 | }; | ||
2073 | |||
2074 | return ret; | ||
2075 | } | ||
2076 | |||
2077 | static struct domain_proc_info mc2_domain_proc_info; | ||
2078 | |||
2079 | static long mc2_get_domain_proc_info(struct domain_proc_info **ret) | ||
2080 | { | ||
2081 | *ret = &mc2_domain_proc_info; | ||
2082 | return 0; | ||
2083 | } | ||
2084 | |||
2085 | static void mc2_setup_domain_proc(void) | ||
2086 | { | ||
2087 | int i, cpu; | ||
2088 | int num_rt_cpus = num_online_cpus(); | ||
2089 | |||
2090 | struct cd_mapping *cpu_map, *domain_map; | ||
2091 | |||
2092 | memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0); | ||
2093 | init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus); | ||
2094 | mc2_domain_proc_info.num_cpus = num_rt_cpus; | ||
2095 | mc2_domain_proc_info.num_domains = num_rt_cpus; | ||
2096 | |||
2097 | i = 0; | ||
2098 | for_each_online_cpu(cpu) { | ||
2099 | cpu_map = &mc2_domain_proc_info.cpu_to_domains[i]; | ||
2100 | domain_map = &mc2_domain_proc_info.domain_to_cpus[i]; | ||
2101 | |||
2102 | cpu_map->id = cpu; | ||
2103 | domain_map->id = i; | ||
2104 | cpumask_set_cpu(i, cpu_map->mask); | ||
2105 | cpumask_set_cpu(cpu, domain_map->mask); | ||
2106 | ++i; | ||
2107 | } | ||
2108 | } | ||
2109 | |||
2110 | static long mc2_activate_plugin(void) | ||
2111 | { | ||
2112 | int cpu;//, lv; | ||
2113 | struct mc2_cpu_state *state; | ||
2114 | struct cpu_entry *ce; | ||
2115 | int i; | ||
2116 | |||
2117 | for(i = 0; i < NR_MODES; i++){ | ||
2118 | gmp_init(&(_global_env_modes[i])); | ||
2119 | } | ||
2120 | _global_env = &_global_env_modes[0]; | ||
2121 | |||
2122 | raw_spin_lock_init(&_lowest_prio_cpu.lock); | ||
2123 | raw_spin_lock_init(&mode_lock); | ||
2124 | raw_spin_lock_init(&global_lock); | ||
2125 | |||
2126 | seen_once = false; | ||
2127 | |||
2128 | for_each_online_cpu(cpu) { | ||
2129 | TRACE("Initializing CPU%d...\n", cpu); | ||
2130 | |||
2131 | resched_cpu[cpu] = 0; | ||
2132 | //level_a_priorities[cpu] = 0; | ||
2133 | state = cpu_state_for(cpu); | ||
2134 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
2135 | |||
2136 | ce->cpu = cpu; | ||
2137 | ce->scheduled = NULL; | ||
2138 | ce->deadline = ULLONG_MAX; | ||
2139 | ce->lv = NUM_CRIT_LEVELS; | ||
2140 | ce->will_schedule = false; | ||
2141 | |||
2142 | raw_spin_lock_init(&state->lock); | ||
2143 | state->cpu = cpu; | ||
2144 | state->scheduled = NULL; | ||
2145 | //for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | ||
2146 | // struct crit_entry *cr_entry = &state->crit_entries[lv]; | ||
2147 | // cr_entry->level = lv; | ||
2148 | // cr_entry->running = NULL; | ||
2149 | //} | ||
2150 | |||
2151 | for(i = 0; i < NR_MODES; i++){ | ||
2152 | sup_init(&(state->sup_env_modes[i])); | ||
2153 | } | ||
2154 | state->sup_env = &(state->sup_env_modes[0]); | ||
2155 | |||
2156 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
2157 | state->timer.function = on_scheduling_timer; | ||
2158 | state->spin_flag = false; | ||
2159 | } | ||
2160 | |||
2161 | mc2_setup_domain_proc(); | ||
2162 | |||
2163 | mode = 0; | ||
2164 | requested_mode = 0; | ||
2165 | |||
2166 | for(i = 0; i < NR_MODES; i++){ | ||
2167 | mode_sizes[i] = 0; | ||
2168 | } | ||
2169 | res_reported = 0; | ||
2170 | cpu_0_spin_flag = false; | ||
2171 | cpu_0_task_exist = false; | ||
2172 | |||
2173 | return 0; | ||
2174 | } | ||
2175 | |||
2176 | static void mc2_finish_switch(struct task_struct *prev) | ||
2177 | { | ||
2178 | int cpus; | ||
2179 | enum crit_level lv = get_task_crit_level(prev); | ||
2180 | struct mc2_cpu_state *state = local_cpu_state(); | ||
2181 | |||
2182 | state->scheduled = is_realtime(current) ? current : NULL; | ||
2183 | if (lv == CRIT_LEVEL_C) { | ||
2184 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
2185 | if (resched_cpu[cpus]) { | ||
2186 | litmus_reschedule(cpus); | ||
2187 | } | ||
2188 | } | ||
2189 | } | ||
2190 | } | ||
2191 | |||
2192 | static long mc2_deactivate_plugin(void) | ||
2193 | { | ||
2194 | int cpu; | ||
2195 | struct mc2_cpu_state *state; | ||
2196 | struct reservation *res; | ||
2197 | struct next_timer_event *event; | ||
2198 | struct cpu_entry *ce; | ||
2199 | int i; | ||
2200 | |||
2201 | for_each_online_cpu(cpu) { | ||
2202 | state = cpu_state_for(cpu); | ||
2203 | raw_spin_lock(&state->lock); | ||
2204 | |||
2205 | hrtimer_cancel(&state->timer); | ||
2206 | |||
2207 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
2208 | |||
2209 | ce->cpu = cpu; | ||
2210 | ce->scheduled = NULL; | ||
2211 | ce->deadline = ULLONG_MAX; | ||
2212 | ce->lv = NUM_CRIT_LEVELS; | ||
2213 | ce->will_schedule = false; | ||
2214 | |||
2215 | |||
2216 | for(i = 0; i < NR_MODES; i++){ | ||
2217 | /* Delete all reservations --- assumes struct reservation | ||
2218 | * is prefix of containing struct. */ | ||
2219 | state->sup_env = &(state->sup_env_modes[i]); | ||
2220 | while (!list_empty(&state->sup_env->active_reservations)) { | ||
2221 | res = list_first_entry( | ||
2222 | &state->sup_env->active_reservations, | ||
2223 | struct reservation, list); | ||
2224 | list_del(&res->list); | ||
2225 | kfree(res); | ||
2226 | } | ||
2227 | |||
2228 | while (!list_empty(&state->sup_env->inactive_reservations)) { | ||
2229 | res = list_first_entry( | ||
2230 | &state->sup_env->inactive_reservations, | ||
2231 | struct reservation, list); | ||
2232 | list_del(&res->list); | ||
2233 | kfree(res); | ||
2234 | } | ||
2235 | |||
2236 | while (!list_empty(&state->sup_env->depleted_reservations)) { | ||
2237 | res = list_first_entry( | ||
2238 | &state->sup_env->depleted_reservations, | ||
2239 | struct reservation, list); | ||
2240 | list_del(&res->list); | ||
2241 | kfree(res); | ||
2242 | } | ||
2243 | } | ||
2244 | |||
2245 | raw_spin_unlock(&state->lock); | ||
2246 | } | ||
2247 | |||
2248 | raw_spin_lock(&global_lock); | ||
2249 | for(i = 0; i < NR_MODES; i++){ | ||
2250 | _global_env = &_global_env_modes[i]; | ||
2251 | while (!list_empty(&_global_env->active_reservations)) { | ||
2252 | res = list_first_entry( | ||
2253 | &_global_env->active_reservations, | ||
2254 | struct reservation, list); | ||
2255 | list_del(&res->list); | ||
2256 | kfree(res); | ||
2257 | } | ||
2258 | |||
2259 | while (!list_empty(&_global_env->inactive_reservations)) { | ||
2260 | res = list_first_entry( | ||
2261 | &_global_env->inactive_reservations, | ||
2262 | struct reservation, list); | ||
2263 | list_del(&res->list); | ||
2264 | kfree(res); | ||
2265 | } | ||
2266 | |||
2267 | while (!list_empty(&_global_env->depleted_reservations)) { | ||
2268 | res = list_first_entry( | ||
2269 | &_global_env->depleted_reservations, | ||
2270 | struct reservation, list); | ||
2271 | list_del(&res->list); | ||
2272 | kfree(res); | ||
2273 | } | ||
2274 | |||
2275 | while (!list_empty(&_global_env->next_events)) { | ||
2276 | event = list_first_entry( | ||
2277 | &_global_env->next_events, | ||
2278 | struct next_timer_event, list); | ||
2279 | list_del(&event->list); | ||
2280 | kfree(event); | ||
2281 | } | ||
2282 | |||
2283 | } | ||
2284 | |||
2285 | raw_spin_unlock(&global_lock); | ||
2286 | destroy_domain_proc_info(&mc2_domain_proc_info); | ||
2287 | return 0; | ||
2288 | } | ||
2289 | |||
2290 | static struct sched_plugin mc2_plugin = { | ||
2291 | .plugin_name = "MC2", | ||
2292 | .schedule = mc2_schedule, | ||
2293 | .finish_switch = mc2_finish_switch, | ||
2294 | .task_wake_up = mc2_task_resume, | ||
2295 | .admit_task = mc2_admit_task, | ||
2296 | .task_new = mc2_task_new, | ||
2297 | .task_exit = mc2_task_exit, | ||
2298 | .complete_job = mc2_complete_job, | ||
2299 | .get_domain_proc_info = mc2_get_domain_proc_info, | ||
2300 | .activate_plugin = mc2_activate_plugin, | ||
2301 | .deactivate_plugin = mc2_deactivate_plugin, | ||
2302 | .reservation_create = mc2_reservation_create, | ||
2303 | .reservation_destroy = mc2_reservation_destroy, | ||
2304 | }; | ||
2305 | |||
2306 | static int __init init_mc2(void) | ||
2307 | { | ||
2308 | return register_sched_plugin(&mc2_plugin); | ||
2309 | } | ||
2310 | |||
2311 | module_init(init_mc2); | ||
2312 | >>>>>>> c0d86034b5c4983e2a142e5f2b1cbde69661db3f | ||