aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2014-11-03 21:54:53 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2014-11-03 21:54:53 -0500
commit709a4f8279a10ad85f5688808d11ffabff9ef25c (patch)
tree13e97ae70abf72738d47e77b7cd02b8d2fe224ce
parentd0cc5b0897b74201fe1ca363ce1d980b5dbefff5 (diff)
Add MC2 plugin
-rw-r--r--include/litmus/mc2_common.h39
-rw-r--r--litmus/mc2_common.c30
-rw-r--r--litmus/sched_mc2.c842
3 files changed, 911 insertions, 0 deletions
diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h
new file mode 100644
index 000000000000..a1d571f0280f
--- /dev/null
+++ b/include/litmus/mc2_common.h
@@ -0,0 +1,39 @@
1/*
2 * MC^2 common data structures
3 */
4
5#ifndef __UNC_MC2_COMMON_H__
6#define __UNC_MC2_COMMON_H__
7
8enum crit_level {
9 CRIT_LEVEL_A = 0,
10 CRIT_LEVEL_B = 1,
11 CRIT_LEVEL_C = 2,
12 NUM_CRIT_LEVELS = 3,
13};
14
15struct mc2_task {
16 enum crit_level crit;
17 pid_t pid;
18 lt_t hyperperiod;
19};
20
21#ifdef __KERNEL__
22
23#include <litmus/reservation.h>
24
25struct mc2_param{
26 struct mc2_task mc2_task;
27};
28
29struct mc2_task_client {
30 struct task_client tc;
31 struct mc2_param mc2;
32};
33
34long mc2_task_client_init(struct mc2_task_client *mtc, struct task_struct *tsk,
35 struct reservation *res);
36
37#endif /* __KERNEL__ */
38
39#endif \ No newline at end of file
diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c
new file mode 100644
index 000000000000..56ef6b506a86
--- /dev/null
+++ b/litmus/mc2_common.c
@@ -0,0 +1,30 @@
1/*
2 * litmus/mc2_common.c
3 *
4 * Common functions for MC2 plugin.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h>
14
15#include <litmus/mc2_common.h>
16
17long mc2_task_client_init(
18 struct mc2_task_client *mtc,
19 struct task_struct *tsk,
20 struct reservation *res
21)
22{
23 task_client_init(&mtc->tc, tsk, res);
24 if ((mtc->mc2.mc2_task.crit < CRIT_LEVEL_A) ||
25 (mtc->mc2.mc2_task.crit > CRIT_LEVEL_C))
26 return -EINVAL;
27
28 TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mtc->mc2.mc2_task.crit);
29 return 0;
30} \ No newline at end of file
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
new file mode 100644
index 000000000000..ab35008668d6
--- /dev/null
+++ b/litmus/sched_mc2.c
@@ -0,0 +1,842 @@
1#include <linux/percpu.h>
2#include <linux/slab.h>
3#include <asm/uaccess.h>
4
5#include <litmus/sched_plugin.h>
6#include <litmus/preempt.h>
7#include <litmus/debug_trace.h>
8
9#include <litmus/litmus.h>
10#include <litmus/jobs.h>
11#include <litmus/budget.h>
12#include <litmus/litmus_proc.h>
13
14#include <litmus/mc2_common.h>
15#include <litmus/reservation.h>
16#include <litmus/polling_reservations.h>
17
18struct mc2_task_state {
19 struct mc2_task_client res_info;
20 int cpu;
21 bool has_departed;
22};
23
24struct mc2_cpu_state {
25 raw_spinlock_t lock;
26
27 struct sup_reservation_environment sup_env;
28 struct hrtimer timer;
29
30 int cpu;
31 struct task_struct* scheduled;
32
33#ifdef CONFIG_RELEASE_MASTER
34 int release_master;
35 /* used to delegate releases */
36 struct hrtimer_start_on_info info;
37#endif
38};
39
40static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
41
42#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id))
43#define local_cpu_state() (&__get_cpu_var(mc2_cpu_state))
44
45static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
46{
47 return (struct mc2_task_state*) tsk_rt(tsk)->plugin_state;
48}
49
50static void task_departs(struct task_struct *tsk, int job_complete)
51{
52 struct mc2_task_state* state = get_mc2_state(tsk);
53 struct reservation* res;
54 struct reservation_client *client;
55
56 res = state->res_info.tc.client.reservation;
57 client = &state->res_info.tc.client;
58
59 res->ops->client_departs(res, client, job_complete);
60 state->has_departed = true;
61}
62
63static void task_arrives(struct task_struct *tsk)
64{
65 struct mc2_task_state* state = get_mc2_state(tsk);
66 struct reservation* res;
67 struct reservation_client *client;
68
69 res = state->res_info.tc.client.reservation;
70 client = &state->res_info.tc.client;
71
72 state->has_departed = false;
73 res->ops->client_arrives(res, client);
74}
75
76/* NOTE: drops state->lock */
77static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
78{
79 int local;
80 lt_t update, now;
81
82 update = state->sup_env.next_scheduler_update;
83 now = state->sup_env.env.current_time;
84
85 /* Be sure we're actually running on the right core,
86 * as mc2_update_timer() is also called from mc2_task_resume(),
87 * which might be called on any CPU when a thread resumes.
88 */
89 local = local_cpu_state() == state;
90
91 /* Must drop state lock before calling into hrtimer_start(), which
92 * may raise a softirq, which in turn may wake ksoftirqd. */
93 raw_spin_unlock(&state->lock);
94
95 if (update <= now) {
96 litmus_reschedule(state->cpu);
97 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
98 /* Reprogram only if not already set correctly. */
99 if (!hrtimer_active(&state->timer) ||
100 ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
101 TRACE("canceling timer...\n");
102 hrtimer_cancel(&state->timer);
103 TRACE("setting scheduler timer for %llu\n", update);
104 /* We cannot use hrtimer_start() here because the
105 * wakeup flag must be set to zero. */
106 __hrtimer_start_range_ns(&state->timer,
107 ns_to_ktime(update),
108 0 /* timer coalescing slack */,
109 HRTIMER_MODE_ABS_PINNED,
110 0 /* wakeup */);
111 }
112 } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) {
113 /* Poke remote core only if timer needs to be set earlier than
114 * it is currently set.
115 */
116 TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
117 "active:%d, set:%llu)\n",
118 state->cpu,
119 update,
120 hrtimer_active(&state->timer),
121 ktime_to_ns(hrtimer_get_expires(&state->timer)));
122 if (!hrtimer_active(&state->timer) ||
123 ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
124 TRACE("poking CPU %d so that it can update its "
125 "scheduling timer (active:%d, set:%llu)\n",
126 state->cpu,
127 hrtimer_active(&state->timer),
128 ktime_to_ns(hrtimer_get_expires(&state->timer)));
129 litmus_reschedule(state->cpu);
130 }
131 }
132}
133
134static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
135{
136 unsigned long flags;
137 enum hrtimer_restart restart = HRTIMER_NORESTART;
138 struct mc2_cpu_state *state;
139 lt_t update, now;
140
141 state = container_of(timer, struct mc2_cpu_state, timer);
142
143 /* The scheduling timer should only fire on the local CPU, because
144 * otherwise deadlocks via timer_cancel() are possible.
145 * Note: this does not interfere with dedicated interrupt handling, as
146 * even under dedicated interrupt handling scheduling timers for
147 * budget enforcement must occur locally on each CPU.
148 */
149 BUG_ON(state->cpu != raw_smp_processor_id());
150
151 raw_spin_lock_irqsave(&state->lock, flags);
152 sup_update_time(&state->sup_env, litmus_clock());
153
154 update = state->sup_env.next_scheduler_update;
155 now = state->sup_env.env.current_time;
156
157 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n",
158 now, update, state->cpu);
159
160 if (update <= now) {
161 litmus_reschedule_local();
162 } else if (update != SUP_NO_SCHEDULER_UPDATE) {
163 hrtimer_set_expires(timer, ns_to_ktime(update));
164 restart = HRTIMER_RESTART;
165 }
166
167 raw_spin_unlock_irqrestore(&state->lock, flags);
168
169 return restart;
170}
171
172static long mc2_complete_job(void)
173{
174 ktime_t next_release;
175 long err;
176
177 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(),
178 get_deadline(current));
179
180 tsk_rt(current)->completed = 1;
181 prepare_for_next_period(current);
182 next_release = ns_to_ktime(get_release(current));
183 preempt_disable();
184 TRACE_CUR("next_release=%llu\n", get_release(current));
185 if (get_release(current) > litmus_clock()) {
186 set_current_state(TASK_INTERRUPTIBLE);
187 preempt_enable_no_resched();
188 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
189 } else {
190 err = 0;
191 TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
192 preempt_enable();
193 }
194
195 TRACE_CUR("mc2_complete_job returns [%d] at %llu\n", err, litmus_clock());
196 return err;
197}
198
199static struct task_struct* mc2_schedule(struct task_struct * prev)
200{
201 /* next == NULL means "schedule background work". */
202 struct mc2_cpu_state *state = local_cpu_state();
203
204 raw_spin_lock(&state->lock);
205
206 BUG_ON(state->scheduled && state->scheduled != prev);
207 BUG_ON(state->scheduled && !is_realtime(prev));
208
209 /* update time */
210 state->sup_env.will_schedule = true;
211 sup_update_time(&state->sup_env, litmus_clock());
212
213 /* check if prev task complete */
214 if (is_realtime(prev)) {
215 TRACE_TASK(prev, "EXEC_TIME = %llu, EXEC_COST = %llu, REMAINED = %llu\n",
216 get_exec_time(prev), get_exec_cost(prev), get_exec_cost(prev)-get_exec_time(prev));
217 }
218 if (is_realtime(prev) && (get_exec_time(prev) >= get_exec_cost(prev))) {
219 TRACE_TASK(prev, "JOB COMPLETED! but is_completed = %d\n", is_completed(prev));
220// mc2_complete_job(prev);
221 }
222
223 /* remove task from reservation if it blocks */
224 if (is_realtime(prev) && !is_running(prev))
225 task_departs(prev, is_completed(prev));
226
227 /* figure out what to schedule next */
228 state->scheduled = sup_dispatch(&state->sup_env);
229
230 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
231 sched_state_task_picked();
232
233 /* program scheduler timer */
234 state->sup_env.will_schedule = false;
235 /* NOTE: drops state->lock */
236 mc2_update_timer_and_unlock(state);
237
238 if (prev != state->scheduled && is_realtime(prev))
239 TRACE_TASK(prev, "descheduled.\n");
240 if (state->scheduled)
241 TRACE_TASK(state->scheduled, "scheduled.\n");
242
243 return state->scheduled;
244}
245
246static void resume_legacy_task_model_updates(struct task_struct *tsk)
247{
248 lt_t now;
249 if (is_sporadic(tsk)) {
250 /* If this sporadic task was gone for a "long" time and woke up past
251 * its deadline, then give it a new budget by triggering a job
252 * release. This is purely cosmetic and has no effect on the
253 * P-RES scheduler. */
254
255 now = litmus_clock();
256 if (is_tardy(tsk, now))
257 release_at(tsk, now);
258 }
259}
260
261/* Called when the state of tsk changes back to TASK_RUNNING.
262 * We need to requeue the task.
263 */
264static void mc2_task_resume(struct task_struct *tsk)
265{
266 unsigned long flags;
267 struct mc2_task_state* tinfo = get_mc2_state(tsk);
268 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu);
269
270 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
271
272 raw_spin_lock_irqsave(&state->lock, flags);
273 /* Requeue only if self-suspension was already processed. */
274 if (tinfo->has_departed)
275 {
276 /* Assumption: litmus_clock() is synchronized across cores,
277 * since we might not actually be executing on tinfo->cpu
278 * at the moment. */
279 sup_update_time(&state->sup_env, litmus_clock());
280 task_arrives(tsk);
281 /* NOTE: drops state->lock */
282 mc2_update_timer_and_unlock(state);
283 local_irq_restore(flags);
284 } else {
285 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
286 raw_spin_unlock_irqrestore(&state->lock, flags);
287 }
288
289 resume_legacy_task_model_updates(tsk);
290}
291
292static long mc2_admit_task(struct task_struct *tsk)
293{
294 long err = -ESRCH;
295 unsigned long flags;
296 struct reservation *res;
297 struct mc2_cpu_state *state;
298 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
299 struct mc2_task *mp = tsk_rt(tsk)->plugin_state;
300
301 if (!tinfo)
302 return -ENOMEM;
303
304 if (!mp) {
305 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
306 return err;
307 }
308
309 preempt_disable();
310
311 state = cpu_state_for(task_cpu(tsk));
312 raw_spin_lock_irqsave(&state->lock, flags);
313
314 res = sup_find_by_id(&state->sup_env, mp->pid);
315
316 /* found the appropriate reservation (or vCPU) */
317 if (res) {
318 TRACE_TASK(tsk, "FOUND RES\n");
319 tinfo->res_info.mc2.mc2_task.crit = mp->crit;
320
321 kfree(tsk_rt(tsk)->plugin_state);
322 tsk_rt(tsk)->plugin_state = NULL;
323
324 err = mc2_task_client_init(&tinfo->res_info, tsk, res);
325 tinfo->cpu = task_cpu(tsk);
326 tinfo->has_departed = true;
327 tsk_rt(tsk)->plugin_state = tinfo;
328
329 /* disable LITMUS^RT's per-thread budget enforcement */
330 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
331 }
332
333 raw_spin_unlock_irqrestore(&state->lock, flags);
334
335 preempt_enable();
336
337 if (err)
338 kfree(tinfo);
339
340 return err;
341}
342
343static void task_new_legacy_task_model_updates(struct task_struct *tsk)
344{
345 lt_t now = litmus_clock();
346
347 /* the first job exists starting as of right now */
348 release_at(tsk, now);
349}
350
351static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
352 int is_running)
353{
354 unsigned long flags;
355 struct mc2_task_state* tinfo = get_mc2_state(tsk);
356 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu);
357
358 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
359 litmus_clock(), on_runqueue, is_running);
360
361 /* acquire the lock protecting the state and disable interrupts */
362 raw_spin_lock_irqsave(&state->lock, flags);
363
364 if (is_running) {
365 state->scheduled = tsk;
366 /* make sure this task should actually be running */
367 litmus_reschedule_local();
368 }
369
370 if (on_runqueue || is_running) {
371 /* Assumption: litmus_clock() is synchronized across cores
372 * [see comment in pres_task_resume()] */
373 sup_update_time(&state->sup_env, litmus_clock());
374 task_arrives(tsk);
375 /* NOTE: drops state->lock */
376 mc2_update_timer_and_unlock(state);
377 local_irq_restore(flags);
378 } else
379 raw_spin_unlock_irqrestore(&state->lock, flags);
380
381 task_new_legacy_task_model_updates(tsk);
382}
383
384static void mc2_task_exit(struct task_struct *tsk)
385{
386 unsigned long flags;
387 struct mc2_task_state* tinfo = get_mc2_state(tsk);
388 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu);
389
390 raw_spin_lock_irqsave(&state->lock, flags);
391
392 if (state->scheduled == tsk)
393 state->scheduled = NULL;
394
395 /* remove from queues */
396 if (is_running(tsk)) {
397 /* Assumption: litmus_clock() is synchronized across cores
398 * [see comment in pres_task_resume()] */
399 sup_update_time(&state->sup_env, litmus_clock());
400 task_departs(tsk, 0);
401 /* NOTE: drops state->lock */
402 mc2_update_timer_and_unlock(state);
403 local_irq_restore(flags);
404 } else
405 raw_spin_unlock_irqrestore(&state->lock, flags);
406
407 kfree(tsk_rt(tsk)->plugin_state);
408 tsk_rt(tsk)->plugin_state = NULL;
409}
410
411asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
412{
413 struct task_struct *target;
414 int retval = -EINVAL;
415 struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
416
417 if (!mp)
418 return -ENOMEM;
419
420 printk("Setting up mc^2 task parameters for process %d.\n", pid);
421
422 if (pid < 0 || param == 0) {
423 goto out;
424 }
425 if (copy_from_user(mp, param, sizeof(*mp))) {
426 retval = -EFAULT;
427 goto out;
428 }
429
430 /* Task search and manipulation must be protected */
431 read_lock_irq(&tasklist_lock);
432 if (!(target = find_task_by_vpid(pid))) {
433 retval = -ESRCH;
434 goto out_unlock;
435 }
436
437 if (is_realtime(target)) {
438 /* The task is already a real-time task.
439 * We cannot not allow parameter changes at this point.
440 */
441 retval = -EBUSY;
442 goto out_unlock;
443 }
444 if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
445 printk(KERN_INFO "litmus: real-time task %d rejected "
446 "because of invalid criticality level\n", pid);
447 goto out_unlock;
448 }
449
450 target->rt_param.plugin_state = mp;
451
452 retval = 0;
453 out_unlock:
454 read_unlock_irq(&tasklist_lock);
455 out:
456 return retval;
457}
458
459static long create_polling_reservation(
460 int res_type,
461 struct reservation_config *config)
462{
463 struct mc2_cpu_state *state;
464 struct reservation* res;
465 struct polling_reservation *pres;
466 unsigned long flags;
467 int use_edf = config->priority == LITMUS_NO_PRIORITY;
468 int periodic = res_type == PERIODIC_POLLING;
469 long err = -EINVAL;
470
471 if (config->polling_params.budget >
472 config->polling_params.period) {
473 printk(KERN_ERR "invalid polling reservation (%u): "
474 "budget > period\n", config->id);
475 return -EINVAL;
476 }
477 if (config->polling_params.budget >
478 config->polling_params.relative_deadline
479 && config->polling_params.relative_deadline) {
480 printk(KERN_ERR "invalid polling reservation (%u): "
481 "budget > deadline\n", config->id);
482 return -EINVAL;
483 }
484 if (config->polling_params.offset >
485 config->polling_params.period) {
486 printk(KERN_ERR "invalid polling reservation (%u): "
487 "offset > period\n", config->id);
488 return -EINVAL;
489 }
490
491 /* Allocate before we grab a spin lock.
492 * Todo: would be nice to use a core-local allocation.
493 */
494 pres = kzalloc(sizeof(*pres), GFP_KERNEL);
495 if (!pres)
496 return -ENOMEM;
497
498 state = cpu_state_for(config->cpu);
499 raw_spin_lock_irqsave(&state->lock, flags);
500
501 res = sup_find_by_id(&state->sup_env, config->id);
502 if (!res) {
503 polling_reservation_init(pres, use_edf, periodic,
504 config->polling_params.budget,
505 config->polling_params.period,
506 config->polling_params.relative_deadline,
507 config->polling_params.offset);
508 pres->res.id = config->id;
509 if (!use_edf)
510 pres->res.priority = config->priority;
511 sup_add_new_reservation(&state->sup_env, &pres->res);
512 err = config->id;
513 } else {
514 err = -EEXIST;
515 }
516
517 raw_spin_unlock_irqrestore(&state->lock, flags);
518
519 if (err < 0)
520 kfree(pres);
521
522 return err;
523}
524
525#define MAX_INTERVALS 1024
526
527static long create_table_driven_reservation(
528 struct reservation_config *config)
529{
530 struct mc2_cpu_state *state;
531 struct reservation* res;
532 struct table_driven_reservation *td_res = NULL;
533 struct lt_interval *slots = NULL;
534 size_t slots_size;
535 unsigned int i, num_slots;
536 unsigned long flags;
537 long err = -EINVAL;
538
539
540 if (!config->table_driven_params.num_intervals) {
541 printk(KERN_ERR "invalid table-driven reservation (%u): "
542 "no intervals\n", config->id);
543 return -EINVAL;
544 }
545
546 if (config->table_driven_params.num_intervals > MAX_INTERVALS) {
547 printk(KERN_ERR "invalid table-driven reservation (%u): "
548 "too many intervals (max: %d)\n", config->id, MAX_INTERVALS);
549 return -EINVAL;
550 }
551
552 num_slots = config->table_driven_params.num_intervals;
553 slots_size = sizeof(slots[0]) * num_slots;
554 slots = kzalloc(slots_size, GFP_KERNEL);
555 if (!slots)
556 return -ENOMEM;
557
558 td_res = kzalloc(sizeof(*td_res), GFP_KERNEL);
559 if (!td_res)
560 err = -ENOMEM;
561 else
562 err = copy_from_user(slots,
563 config->table_driven_params.intervals, slots_size);
564
565 for (i=0; i<num_slots;i++) {
566 TRACE("###### [%llu, %llu]\n", slots[i].start, slots[i].end);
567 }
568
569 if (!err) {
570 /* sanity checks */
571 for (i = 0; !err && i < num_slots; i++)
572 if (slots[i].end <= slots[i].start) {
573 printk(KERN_ERR
574 "invalid table-driven reservation (%u): "
575 "invalid interval %u => [%llu, %llu]\n",
576 config->id, i,
577 slots[i].start, slots[i].end);
578 err = -EINVAL;
579 }
580
581 for (i = 0; !err && i + 1 < num_slots; i++)
582 if (slots[i + 1].start <= slots[i].end) {
583 printk(KERN_ERR
584 "invalid table-driven reservation (%u): "
585 "overlapping intervals %u, %u\n",
586 config->id, i, i + 1);
587 err = -EINVAL;
588 }
589
590 if (slots[num_slots - 1].end >
591 config->table_driven_params.major_cycle_length) {
592 printk(KERN_ERR
593 "invalid table-driven reservation (%u): last "
594 "interval ends past major cycle %llu > %llu\n",
595 config->id,
596 slots[num_slots - 1].end,
597 config->table_driven_params.major_cycle_length);
598 err = -EINVAL;
599 }
600 }
601
602 if (!err) {
603 state = cpu_state_for(config->cpu);
604 raw_spin_lock_irqsave(&state->lock, flags);
605
606 res = sup_find_by_id(&state->sup_env, config->id);
607 if (!res) {
608 table_driven_reservation_init(td_res,
609 config->table_driven_params.major_cycle_length,
610 slots, num_slots);
611 td_res->res.id = config->id;
612 td_res->res.priority = config->priority;
613 sup_add_new_reservation(&state->sup_env, &td_res->res);
614 err = config->id;
615 } else {
616 err = -EEXIST;
617 }
618
619 raw_spin_unlock_irqrestore(&state->lock, flags);
620 }
621
622 if (err < 0) {
623 kfree(slots);
624 kfree(td_res);
625 }
626
627 TRACE("CREATE_TABLE_DRIVEN_RES = %d\n", err);
628 return err;
629}
630
631static long mc2_reservation_create(int res_type, void* __user _config)
632{
633 long ret = -EINVAL;
634 struct reservation_config config;
635
636 TRACE("Attempt to create reservation (%d)\n", res_type);
637
638 if (copy_from_user(&config, _config, sizeof(config)))
639 return -EFAULT;
640
641 if (config.cpu < 0 || !cpu_online(config.cpu)) {
642 printk(KERN_ERR "invalid polling reservation (%u): "
643 "CPU %d offline\n", config.id, config.cpu);
644 return -EINVAL;
645 }
646
647 switch (res_type) {
648 case PERIODIC_POLLING:
649 case SPORADIC_POLLING:
650 ret = create_polling_reservation(res_type, &config);
651 break;
652
653 case TABLE_DRIVEN:
654 ret = create_table_driven_reservation(&config);
655 break;
656
657 default:
658 return -EINVAL;
659 };
660
661 return ret;
662}
663
664static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
665{
666 long ret = -EINVAL;
667 struct mc2_cpu_state *state;
668 struct reservation *res, *next;
669 struct sup_reservation_environment *sup_env;
670 unsigned long flags;
671 int found = 0;
672
673 state = cpu_state_for(cpu);
674 raw_spin_lock_irqsave(&state->lock, flags);
675
676 //res = sup_find_by_id(&state->sup_env, reservation_id);
677 sup_env = &state->sup_env;
678 //if (!res) {
679 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
680 if (res->id == reservation_id) {
681 list_del(&res->list);
682 found = 1;
683 ret = 0;
684 }
685 }
686 if (!found) {
687 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
688 if (res->id == reservation_id) {
689 list_del(&res->list);
690 found = 1;
691 ret = 0;
692 }
693 }
694 }
695 if (!found) {
696 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
697 if (res->id == reservation_id) {
698 list_del(&res->list);
699 found = 1;
700 ret = 0;
701 }
702 }
703 }
704 //}
705
706 raw_spin_unlock_irqrestore(&state->lock, flags);
707
708 TRACE("RESERVATION_DESTROY ret = %d\n", ret);
709 return ret;
710}
711
712static struct domain_proc_info mc2_domain_proc_info;
713
714static long mc2_get_domain_proc_info(struct domain_proc_info **ret)
715{
716 *ret = &mc2_domain_proc_info;
717 return 0;
718}
719
720static void mc2_setup_domain_proc(void)
721{
722 int i, cpu;
723 int num_rt_cpus = num_online_cpus();
724
725 struct cd_mapping *cpu_map, *domain_map;
726
727 memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0);
728 init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus);
729 mc2_domain_proc_info.num_cpus = num_rt_cpus;
730 mc2_domain_proc_info.num_domains = num_rt_cpus;
731
732 i = 0;
733 for_each_online_cpu(cpu) {
734 cpu_map = &mc2_domain_proc_info.cpu_to_domains[i];
735 domain_map = &mc2_domain_proc_info.domain_to_cpus[i];
736
737 cpu_map->id = cpu;
738 domain_map->id = i;
739 cpumask_set_cpu(i, cpu_map->mask);
740 cpumask_set_cpu(cpu, domain_map->mask);
741 ++i;
742 }
743}
744
745static long mc2_activate_plugin(void)
746{
747 int cpu;
748 struct mc2_cpu_state *state;
749
750 for_each_online_cpu(cpu) {
751 TRACE("Initializing CPU%d...\n", cpu);
752
753 state = cpu_state_for(cpu);
754
755#ifdef CONFIG_RELEASE_MASTER
756 state->release_master = atomic_read(&release_master_cpu);
757 hrtimer_start_on_info_init(&state->info);
758#endif
759
760 raw_spin_lock_init(&state->lock);
761 state->cpu = cpu;
762 state->scheduled = NULL;
763
764 sup_init(&state->sup_env);
765
766 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
767 state->timer.function = on_scheduling_timer;
768 }
769
770 mc2_setup_domain_proc();
771
772 return 0;
773}
774
775static long mc2_deactivate_plugin(void)
776{
777 int cpu;
778 struct mc2_cpu_state *state;
779 struct reservation *res;
780
781 for_each_online_cpu(cpu) {
782 state = cpu_state_for(cpu);
783 raw_spin_lock(&state->lock);
784
785 hrtimer_cancel(&state->timer);
786
787 /* Delete all reservations --- assumes struct reservation
788 * is prefix of containing struct. */
789
790 while (!list_empty(&state->sup_env.active_reservations)) {
791 res = list_first_entry(
792 &state->sup_env.active_reservations,
793 struct reservation, list);
794 list_del(&res->list);
795 kfree(res);
796 }
797
798 while (!list_empty(&state->sup_env.inactive_reservations)) {
799 res = list_first_entry(
800 &state->sup_env.inactive_reservations,
801 struct reservation, list);
802 list_del(&res->list);
803 kfree(res);
804 }
805
806 while (!list_empty(&state->sup_env.depleted_reservations)) {
807 res = list_first_entry(
808 &state->sup_env.depleted_reservations,
809 struct reservation, list);
810 list_del(&res->list);
811 kfree(res);
812 }
813
814 raw_spin_unlock(&state->lock);
815 }
816
817 destroy_domain_proc_info(&mc2_domain_proc_info);
818 return 0;
819}
820
821static struct sched_plugin mc2_plugin = {
822 .plugin_name = "MC2",
823 .schedule = mc2_schedule,
824 .task_wake_up = mc2_task_resume,
825 .admit_task = mc2_admit_task,
826 .task_new = mc2_task_new,
827 .task_exit = mc2_task_exit,
828 .complete_job = mc2_complete_job,
829 .get_domain_proc_info = mc2_get_domain_proc_info,
830 .activate_plugin = mc2_activate_plugin,
831 .deactivate_plugin = mc2_deactivate_plugin,
832 .reservation_create = mc2_reservation_create,
833 .reservation_destroy = mc2_reservation_destroy,
834};
835
836static int __init init_mc2(void)
837{
838 return register_sched_plugin(&mc2_plugin);
839}
840
841module_init(init_mc2);
842