aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc_ce.c
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2011-09-24 19:23:25 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2011-09-24 19:23:25 -0400
commitbe37a04426a806099b4084452fe4275db5254fea (patch)
treefae6fc6e48b408ab108988bd82cabbc040f9307a /litmus/sched_mc_ce.c
parentd1e50b511a6586da696ef5a61ed18818b8139b67 (diff)
Check point commit for merging CE with MC plugin.
Also started to fix typos in Jonathan's code, but there were too many after I realized that event_group.o was not in the LITMUS Makefile.
Diffstat (limited to 'litmus/sched_mc_ce.c')
-rw-r--r--litmus/sched_mc_ce.c110
1 files changed, 54 insertions, 56 deletions
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index eeee00ab3047..dcb74f4ca67b 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -19,52 +19,32 @@
19#include <litmus/sched_plugin.h> 19#include <litmus/sched_plugin.h>
20#include <litmus/rt_domain.h> 20#include <litmus/rt_domain.h>
21#include <litmus/rt_param.h> 21#include <litmus/rt_param.h>
22#include <litmus/sched_mc.h>
23#include <litmus/litmus_proc.h> 22#include <litmus/litmus_proc.h>
24#include <litmus/sched_trace.h> 23#include <litmus/sched_trace.h>
25#include <litmus/jobs.h> 24#include <litmus/jobs.h>
25#include <litmus/sched_mc.h>
26 26
27static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp; 27static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
28 28
29#define tsk_mc_data(t) (tsk_rt(t)->mc_data)
30#define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit)
31#define is_active_plugin() (litmus == &mc_ce_plugin) 29#define is_active_plugin() (litmus == &mc_ce_plugin)
32 30
33static atomic_t start_time_set = ATOMIC_INIT(-1); 31static atomic_t start_time_set = ATOMIC_INIT(-1);
34static atomic64_t start_time = ATOMIC64_INIT(0); 32static atomic64_t start_time = ATOMIC64_INIT(0);
35static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL; 33static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
36 34
37/*
38 * Cache the budget along with the struct PID for a task so that we don't need
39 * to fetch its task_struct every time we check to see what should be
40 * scheduled.
41 */
42struct ce_dom_pid_entry {
43 struct pid *pid;
44 lt_t budget;
45 /* accumulated (summed) budgets, including this one */
46 lt_t acc_time;
47 int expected_job;
48};
49
50struct ce_dom_data {
51 int cpu;
52 struct task_struct *scheduled, *should_schedule;
53 /*
54 * Each CPU needs a mapping of level A ID (integer) to struct pid so
55 * that we can get its task struct.
56 */
57 struct ce_dom_pid_entry pid_entries[CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS];
58 int num_pid_entries;
59 lt_t cycle_time;
60 struct hrtimer_start_on_info timer_info;
61 struct hrtimer timer;
62};
63 35
64DEFINE_PER_CPU(domain_t, mc_ce_doms); 36DEFINE_PER_CPU(domain_t, mc_ce_doms);
65DEFINE_PER_CPU(rt_domain_t, mc_ce_rts); 37DEFINE_PER_CPU(rt_domain_t, mc_ce_rts);
66DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data); 38DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
67 39
40/* Return the address of the domain_t for this CPU, used by the
41 * mixed-criticality plugin. */
42domain_t *ce_domain_for(int cpu)
43{
44 return &per_cpu(mc_ce_doms, cpu);
45}
46
47
68/* 48/*
69 * Get the offset into the cycle taking the start time into account. 49 * Get the offset into the cycle taking the start time into account.
70 */ 50 */
@@ -237,14 +217,13 @@ static void mc_ce_tick(struct task_struct *ts)
237 * Here we look up the task's PID structure and save it in the proper slot on 217 * Here we look up the task's PID structure and save it in the proper slot on
238 * the CPU this task will run on. 218 * the CPU this task will run on.
239 */ 219 */
240static long mc_ce_admit_task(struct task_struct *ts) 220static long __mc_ce_admit_task(struct task_struct *ts)
241{ 221{
242 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 222 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts));
243 struct ce_dom_data *ce_data = dom->data; 223 struct ce_dom_data *ce_data = dom->data;
244 struct mc_data *mcd = tsk_mc_data(ts); 224 struct mc_data *mcd = tsk_mc_data(ts);
245 struct pid *pid = NULL; 225 struct pid *pid = NULL;
246 long retval = -EINVAL; 226 long retval = -EINVAL;
247 unsigned long flags;
248 const int lvl_a_id = mcd->mc_task.lvl_a_id; 227 const int lvl_a_id = mcd->mc_task.lvl_a_id;
249 228
250 /* check the task has migrated to the right CPU (like in sched_cedf) */ 229 /* check the task has migrated to the right CPU (like in sched_cedf) */
@@ -269,39 +248,46 @@ static long mc_ce_admit_task(struct task_struct *ts)
269 goto out; 248 goto out;
270 } 249 }
271 250
272 raw_spin_lock_irqsave(dom->lock, flags);
273 if (lvl_a_id >= ce_data->num_pid_entries) { 251 if (lvl_a_id >= ce_data->num_pid_entries) {
274 printk(KERN_INFO "litmus: level A id greater than expected " 252 printk(KERN_INFO "litmus: level A id greater than expected "
275 "number of tasks %d for %d cpu %d\n", 253 "number of tasks %d for %d cpu %d\n",
276 ce_data->num_pid_entries, ts->pid, 254 ce_data->num_pid_entries, ts->pid,
277 get_partition(ts)); 255 get_partition(ts));
278 goto out_put_pid_unlock; 256 goto out_put_pid;
279 } 257 }
280 if (ce_data->pid_entries[lvl_a_id].pid) { 258 if (ce_data->pid_entries[lvl_a_id].pid) {
281 printk(KERN_INFO "litmus: have saved pid info id: %d cpu: %d\n", 259 printk(KERN_INFO "litmus: have saved pid info id: %d cpu: %d\n",
282 lvl_a_id, get_partition(ts)); 260 lvl_a_id, get_partition(ts));
283 goto out_put_pid_unlock; 261 goto out_put_pid;
284 } 262 }
285 if (get_exec_cost(ts) >= ce_data->pid_entries[lvl_a_id].budget) { 263 if (get_exec_cost(ts) >= ce_data->pid_entries[lvl_a_id].budget) {
286 printk(KERN_INFO "litmus: execution cost %llu is larger than " 264 printk(KERN_INFO "litmus: execution cost %llu is larger than "
287 "the budget %llu\n", 265 "the budget %llu\n",
288 get_exec_cost(ts), 266 get_exec_cost(ts),
289 ce_data->pid_entries[lvl_a_id].budget); 267 ce_data->pid_entries[lvl_a_id].budget);
290 goto out_put_pid_unlock; 268 goto out_put_pid;
291 } 269 }
292 ce_data->pid_entries[lvl_a_id].pid = pid; 270 ce_data->pid_entries[lvl_a_id].pid = pid;
293 retval = 0; 271 retval = 0;
294 /* don't call put_pid if we are successful */ 272 /* don't call put_pid if we are successful */
295 goto out_unlock; 273 goto out;
296 274
297out_put_pid_unlock: 275out_put_pid:
298 put_pid(pid); 276 put_pid(pid);
299out_unlock:
300 raw_spin_unlock_irqrestore(dom->lock, flags);
301out: 277out:
302 return retval; 278 return retval;
303} 279}
304 280
281static long mc_ce_admit_task(struct task_struct *ts)
282{
283 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts));
284 unsigned long flags, retval;
285 raw_spin_lock_irqsave(dom->lock, flags);
286 retval = __mc_ce_admit_task(ts);
287 raw_spin_unlock_irqrestore(dom->lock, flags);
288 return retval;
289}
290
305/* 291/*
306 * Called to set up a new real-time task (after the admit_task callback). 292 * Called to set up a new real-time task (after the admit_task callback).
307 * At this point the task's struct PID is already hooked up on the destination 293 * At this point the task's struct PID is already hooked up on the destination
@@ -386,7 +372,7 @@ static void mc_ce_task_exit(struct task_struct *ts)
386 struct ce_dom_data *ce_data = dom->data; 372 struct ce_dom_data *ce_data = dom->data;
387 unsigned long flags; 373 unsigned long flags;
388 struct pid *pid; 374 struct pid *pid;
389 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;; 375 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;
390 376
391 TRACE_TASK(ts, "exited\n"); 377 TRACE_TASK(ts, "exited\n");
392 378
@@ -410,31 +396,23 @@ static void mc_ce_task_exit(struct task_struct *ts)
410 * Timer stuff 396 * Timer stuff
411 **********************************************************/ 397 **********************************************************/
412 398
413/* 399void __mc_ce_timer_callback(struct hrtimer *timer)
414 * What to do when a timer fires. The timer should only be armed if the number
415 * of PID entries is positive.
416 */
417static enum hrtimer_restart timer_callback(struct hrtimer *timer)
418{ 400{
419 /* relative and absolute times for cycles */ 401 /* relative and absolute times for cycles */
420 lt_t now, offset_rel, cycle_start_abs, next_timer_abs; 402 lt_t now, offset_rel, cycle_start_abs, next_timer_abs;
421 struct task_struct *should_schedule; 403 struct task_struct *should_schedule;
422 struct ce_dom_pid_entry *pid_entry; 404 struct ce_dom_pid_entry *pid_entry;
423 struct ce_dom_data *ce_data; 405 struct ce_dom_data *ce_data;
424 unsigned long flags;
425 domain_t *dom; 406 domain_t *dom;
426 int idx, budget_overrun; 407 int idx, budget_overrun;
427 408
428 ce_data = container_of(timer, struct ce_dom_data, timer); 409 ce_data = container_of(timer, struct ce_dom_data, timer);
429 dom = &per_cpu(mc_ce_doms, ce_data->cpu); 410 dom = container_of(((void*)ce_data), domain_t, data);
430 411
431 raw_spin_lock_irqsave(dom->lock, flags);
432
433 now = litmus_clock();
434 TRACE("timer callback at %llu on CPU %d\n", now, ce_data->cpu);
435 /* Based off of the current time, figure out the offset into the cycle 412 /* Based off of the current time, figure out the offset into the cycle
436 * and the cycle's start time, and determine what should be scheduled. 413 * and the cycle's start time, and determine what should be scheduled.
437 */ 414 */
415 now = litmus_clock();
438 offset_rel = get_cycle_offset(now, ce_data->cycle_time); 416 offset_rel = get_cycle_offset(now, ce_data->cycle_time);
439 cycle_start_abs = now - offset_rel; 417 cycle_start_abs = now - offset_rel;
440 idx = mc_ce_schedule_at(dom, offset_rel); 418 idx = mc_ce_schedule_at(dom, offset_rel);
@@ -443,7 +421,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
443 next_timer_abs = cycle_start_abs + pid_entry->acc_time; 421 next_timer_abs = cycle_start_abs + pid_entry->acc_time;
444 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs)); 422 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
445 423
446 TRACE("now: %llu offset_rel: %llu cycle_start_abs: %llu " 424 TRACE("timer: now: %llu offset_rel: %llu cycle_start_abs: %llu "
447 "next_timer_abs: %llu\n", now, offset_rel, 425 "next_timer_abs: %llu\n", now, offset_rel,
448 cycle_start_abs, next_timer_abs); 426 cycle_start_abs, next_timer_abs);
449 427
@@ -478,6 +456,26 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
478 sched_trace_task_release(should_schedule); 456 sched_trace_task_release(should_schedule);
479 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); 457 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING);
480 } 458 }
459}
460
461/*
462 * What to do when a timer fires. The timer should only be armed if the number
463 * of PID entries is positive.
464 */
465static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
466{
467 struct ce_dom_data *ce_data;
468 unsigned long flags;
469 domain_t *dom;
470
471 ce_data = container_of(timer, struct ce_dom_data, timer);
472 dom = container_of(((void*)ce_data), domain_t, data);
473
474 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
475
476 raw_spin_lock_irqsave(dom->lock, flags);
477 __mc_ce_timer_callback(timer);
478
481 if (ce_data->scheduled != ce_data->should_schedule) 479 if (ce_data->scheduled != ce_data->should_schedule)
482 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu); 480 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
483 481
@@ -542,7 +540,7 @@ static void arm_all_timers(void)
542 * call this. We can re-set our notion of the CE period start to make 540 * call this. We can re-set our notion of the CE period start to make
543 * the schedule look pretty. 541 * the schedule look pretty.
544 */ 542 */
545static void mc_ce_release_at(struct task_struct *ts, lt_t start) 543void mc_ce_release_at(struct task_struct *ts, lt_t start)
546{ 544{
547 TRACE_TASK(ts, "release at\n"); 545 TRACE_TASK(ts, "release at\n");
548 if (atomic_inc_and_test(&start_time_set)) { 546 if (atomic_inc_and_test(&start_time_set)) {
@@ -554,7 +552,7 @@ static void mc_ce_release_at(struct task_struct *ts, lt_t start)
554 atomic_dec(&start_time_set); 552 atomic_dec(&start_time_set);
555} 553}
556 554
557static long mc_ce_activate_plugin(void) 555long mc_ce_activate_plugin(void)
558{ 556{
559 struct ce_dom_data *ce_data; 557 struct ce_dom_data *ce_data;
560 domain_t *dom; 558 domain_t *dom;
@@ -598,7 +596,7 @@ static void clear_pid_entries(void)
598 } 596 }
599} 597}
600 598
601static long mc_ce_deactivate_plugin(void) 599long mc_ce_deactivate_plugin(void)
602{ 600{
603 cancel_all_timers(); 601 cancel_all_timers();
604 return 0; 602 return 0;
@@ -638,7 +636,7 @@ static int __init init_sched_mc_ce(void)
638 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 636 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
639 hrtimer_start_on_info_init(&ce_data->timer_info); 637 hrtimer_start_on_info_init(&ce_data->timer_info);
640 ce_data->cpu = cpu; 638 ce_data->cpu = cpu;
641 ce_data->timer.function = timer_callback; 639 ce_data->timer.function = mc_ce_timer_callback;
642 } 640 }
643 clear_pid_entries(); 641 clear_pid_entries();
644 err = setup_proc(); 642 err = setup_proc();