aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/sched_mc.h2
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/sched_mc_ce.c198
3 files changed, 138 insertions, 64 deletions
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h
index d29796298701..069198ab7e28 100644
--- a/include/litmus/sched_mc.h
+++ b/include/litmus/sched_mc.h
@@ -20,6 +20,8 @@ struct mc_task {
20struct mc_job { 20struct mc_job {
21 int is_ghost:1; 21 int is_ghost:1;
22 lt_t ghost_budget; 22 lt_t ghost_budget;
23 /* which invocation of the CE is this */
24 unsigned int expected_job;
23}; 25};
24 26
25#ifdef __KERNEL__ 27#ifdef __KERNEL__
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 437cdfa215ce..89fb4e9aff8c 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -288,7 +288,7 @@ asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param
288 struct task_struct *target; 288 struct task_struct *target;
289 int retval = -EINVAL; 289 int retval = -EINVAL;
290 290
291 printk("Setting up mixed-criicality task parameters for process %d.\n", 291 printk("Setting up mixed-criticality task parameters for process %d.\n",
292 pid); 292 pid);
293 293
294 if (pid < 0 || param == 0) { 294 if (pid < 0 || param == 0) {
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index 0a5a18dc54ff..bda95d9fae41 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -21,6 +21,7 @@
21#include <litmus/rt_param.h> 21#include <litmus/rt_param.h>
22#include <litmus/sched_mc.h> 22#include <litmus/sched_mc.h>
23#include <litmus/litmus_proc.h> 23#include <litmus/litmus_proc.h>
24#include <litmus/sched_trace.h>
24 25
25static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp; 26static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
26 27
@@ -31,7 +32,6 @@ static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
31static atomic_t start_time_set = ATOMIC_INIT(0); 32static atomic_t start_time_set = ATOMIC_INIT(0);
32static atomic64_t start_time = ATOMIC64_INIT(0); 33static atomic64_t start_time = ATOMIC64_INIT(0);
33static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL; 34static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
34static DEFINE_RAW_SPINLOCK(activate_lock);
35 35
36/* 36/*
37 * Cache the budget along with the struct PID for a task so that we don't need 37 * Cache the budget along with the struct PID for a task so that we don't need
@@ -40,9 +40,8 @@ static DEFINE_RAW_SPINLOCK(activate_lock);
40 */ 40 */
41struct ce_dom_pid_entry { 41struct ce_dom_pid_entry {
42 struct pid *pid; 42 struct pid *pid;
43 /* execution cost (sometimes called budget) */ 43 lt_t budget;
44 lt_t exec_cost; 44 /* accumulated (summed) budgets, including this one */
45 /* accumulated (summed) exec costs, including this one */
46 lt_t acc_time; 45 lt_t acc_time;
47}; 46};
48 47
@@ -65,22 +64,29 @@ DEFINE_PER_CPU(rt_domain_t, mc_ce_rts);
65DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data); 64DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
66 65
67/* 66/*
67 * Get the offset into the cycle taking the start time into account.
68 */
69static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time)
70{
71 return (when - atomic64_read(&start_time)) % cycle_time;
72}
73
74/*
68 * Return the index into the PID entries table of what to schedule next. 75 * Return the index into the PID entries table of what to schedule next.
69 * Don't call if the table is empty. Assumes the caller has the domain lock. 76 * Don't call if the table is empty. Assumes the caller has the domain lock.
77 * The offset parameter is the offset into the cycle.
70 * 78 *
71 * TODO Currently O(n) in the number of tasks on the CPU. Binary search? 79 * TODO Currently O(n) in the number of tasks on the CPU. Binary search?
72 */ 80 */
73static int mc_ce_schedule_at(const lt_t when, const domain_t *dom) 81static int mc_ce_schedule_at(const domain_t *dom, lt_t offset)
74{ 82{
75 const struct ce_dom_data *ce_data = dom->data; 83 const struct ce_dom_data *ce_data = dom->data;
76 const struct ce_dom_pid_entry *pid_entry = NULL; 84 const struct ce_dom_pid_entry *pid_entry = NULL;
77 lt_t offset;
78 int i; 85 int i;
79 86
80 BUG_ON(ce_data->cycle_time < 1); 87 BUG_ON(ce_data->cycle_time < 1);
81 BUG_ON(ce_data->num_pid_entries < 1); 88 BUG_ON(ce_data->num_pid_entries < 1);
82 89
83 offset = (when - atomic64_read(&start_time)) % ce_data->cycle_time;
84 for (i = 0; i < ce_data->num_pid_entries; ++i) { 90 for (i = 0; i < ce_data->num_pid_entries; ++i) {
85 pid_entry = &ce_data->pid_entries[i]; 91 pid_entry = &ce_data->pid_entries[i];
86 if (offset < pid_entry->acc_time) { 92 if (offset < pid_entry->acc_time) {
@@ -90,6 +96,7 @@ static int mc_ce_schedule_at(const lt_t when, const domain_t *dom)
90 } 96 }
91 /* can only happen if cycle_time is not right */ 97 /* can only happen if cycle_time is not right */
92 BUG_ON(pid_entry->acc_time > ce_data->cycle_time); 98 BUG_ON(pid_entry->acc_time > ce_data->cycle_time);
99 TRACE("MC-CE schedule at returned task %d for CPU %d\n", i, ce_data->cpu);
93 return i; 100 return i;
94} 101}
95 102
@@ -118,11 +125,14 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev)
118 request_exit_np(ce_data->scheduled); 125 request_exit_np(ce_data->scheduled);
119 next = prev; 126 next = prev;
120 } else if (ce_data->should_schedule && 127 } else if (ce_data->should_schedule &&
121 is_running(ce_data->should_schedule)) { 128 is_running(ce_data->should_schedule) &&
122 /* schedule the task for this period if it's not blocked */ 129 RT_F_SLEEP != get_rt_flags(ce_data->should_schedule)) {
123 next = ce_data->should_schedule; 130 /*
131 * schedule the task that should be executing in the cyclic
132 * schedule if it is not blocked and not sleeping
133 */
134 next = ce_data->should_schedule;
124 } 135 }
125
126 sched_state_task_picked(); 136 sched_state_task_picked();
127 raw_spin_unlock(dom->lock); 137 raw_spin_unlock(dom->lock);
128 return next; 138 return next;
@@ -212,11 +222,11 @@ static long mc_ce_admit_task(struct task_struct *ts)
212 lvl_a_id, get_partition(ts)); 222 lvl_a_id, get_partition(ts));
213 goto out_put_pid_unlock; 223 goto out_put_pid_unlock;
214 } 224 }
215 if (get_exec_cost(ts) != ce_data->pid_entries[lvl_a_id].exec_cost) { 225 if (get_exec_cost(ts) >= ce_data->pid_entries[lvl_a_id].budget) {
216 printk(KERN_INFO "litmus: saved exec cost %llu and task exec " 226 printk(KERN_INFO "litmus: execution cost %llu is larger than "
217 "cost %llu differ\n", 227 "the budget %llu\n",
218 ce_data->pid_entries[lvl_a_id].exec_cost, 228 get_exec_cost(ts),
219 get_exec_cost(ts)); 229 ce_data->pid_entries[lvl_a_id].budget);
220 goto out_put_pid_unlock; 230 goto out_put_pid_unlock;
221 } 231 }
222 ce_data->pid_entries[lvl_a_id].pid = pid; 232 ce_data->pid_entries[lvl_a_id].pid = pid;
@@ -244,12 +254,19 @@ static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
244 struct pid *pid_should_be_running; 254 struct pid *pid_should_be_running;
245 unsigned long flags; 255 unsigned long flags;
246 int idx, should_be_running; 256 int idx, should_be_running;
257 lt_t offset;
247 258
248 /* have to call mc_ce_schedule_at because the task only gets a PID 259 /* have to call mc_ce_schedule_at because the task only gets a PID
249 * entry after calling admit_task */ 260 * entry after calling admit_task */
250 261
251 raw_spin_lock_irqsave(dom->lock, flags); 262 raw_spin_lock_irqsave(dom->lock, flags);
252 idx = mc_ce_schedule_at(litmus_clock(), dom); 263 /* initialize some task state */
264 set_rt_flags(ts, RT_F_RUNNING);
265 tsk_rt(ts)->job_params.job_no = 0;
266 tsk_mc_data(ts)->mc_job.expected_job = 0;
267
268 offset = get_cycle_offset(litmus_clock(), ce_data->cycle_time);
269 idx = mc_ce_schedule_at(dom, offset);
253 pid_should_be_running = ce_data->pid_entries[idx].pid; 270 pid_should_be_running = ce_data->pid_entries[idx].pid;
254 rcu_read_lock(); 271 rcu_read_lock();
255 should_be_running = (ts == pid_task(pid_should_be_running, PIDTYPE_PID)); 272 should_be_running = (ts == pid_task(pid_should_be_running, PIDTYPE_PID));
@@ -302,9 +319,13 @@ static void mc_ce_task_block(struct task_struct *ts)
302 */ 319 */
303static long mc_ce_complete_job(void) 320static long mc_ce_complete_job(void)
304{ 321{
305 /* TODO */ 322 /* mark that the task wishes to sleep */
306 printk(KERN_EMERG "complete job called TODO\n"); 323 set_rt_flags(current, RT_F_SLEEP);
307 BUG(); 324 tsk_rt(current)->job_params.job_no++;
325 /* call schedule, this will return when a new job arrives
326 * it also takes care of preparing for the next release
327 */
328 schedule();
308 return 0; 329 return 0;
309} 330}
310 331
@@ -328,6 +349,10 @@ static void mc_ce_task_exit(struct task_struct *ts)
328 BUG_ON(!pid); 349 BUG_ON(!pid);
329 put_pid(pid); 350 put_pid(pid);
330 ce_data->pid_entries[lvl_a_id].pid = NULL; 351 ce_data->pid_entries[lvl_a_id].pid = NULL;
352 if (ce_data->scheduled == ts)
353 ce_data->scheduled = NULL;
354 if (ce_data->should_schedule == ts)
355 ce_data->should_schedule = NULL;
331 raw_spin_unlock_irqrestore(dom->lock, flags); 356 raw_spin_unlock_irqrestore(dom->lock, flags);
332} 357}
333 358
@@ -341,28 +366,64 @@ static void mc_ce_task_exit(struct task_struct *ts)
341 */ 366 */
342static enum hrtimer_restart timer_callback(struct hrtimer *timer) 367static enum hrtimer_restart timer_callback(struct hrtimer *timer)
343{ 368{
344 struct ce_dom_data *ce_data = container_of(timer,
345 struct ce_dom_data, timer);
346 domain_t *dom = &per_cpu(mc_ce_doms, ce_data->cpu);
347 /* relative and absolute times for cycles */ 369 /* relative and absolute times for cycles */
348 lt_t now, offset_rel, cycle_start_abs, next_timer_abs; 370 lt_t now, offset_rel, cycle_start_abs, next_timer_abs;
371 struct task_struct *should_schedule;
349 struct ce_dom_pid_entry *pid_entry; 372 struct ce_dom_pid_entry *pid_entry;
373 struct ce_dom_data *ce_data;
350 unsigned long flags; 374 unsigned long flags;
375 domain_t *dom;
351 int idx; 376 int idx;
352 377
378 ce_data = container_of(timer, struct ce_dom_data, timer);
379 dom = &per_cpu(mc_ce_doms, ce_data->cpu);
380
353 raw_spin_lock_irqsave(dom->lock, flags); 381 raw_spin_lock_irqsave(dom->lock, flags);
354 382
355 now = litmus_clock(); 383 now = litmus_clock();
356 offset_rel = (now - atomic64_read(&start_time)) % ce_data->cycle_time; 384 TRACE("timer callback at %llu on CPU %d\n", now, ce_data->cpu);
385 /* Based off of the current time, figure out the offset into the cycle
386 * and the cycle's start time, and determine what should be scheduled.
387 */
388 offset_rel = get_cycle_offset(now, ce_data->cycle_time);
357 cycle_start_abs = now - offset_rel; 389 cycle_start_abs = now - offset_rel;
358 idx = mc_ce_schedule_at(now, dom); 390 idx = mc_ce_schedule_at(dom, offset_rel);
359 pid_entry = &ce_data->pid_entries[idx]; 391 pid_entry = &ce_data->pid_entries[idx];
392 /* set the timer to fire at the next cycle start */
360 next_timer_abs = cycle_start_abs + pid_entry->acc_time; 393 next_timer_abs = cycle_start_abs + pid_entry->acc_time;
361 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs)); 394 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
395
362 /* get the task_struct (pid_task can accept a NULL) */ 396 /* get the task_struct (pid_task can accept a NULL) */
363 rcu_read_lock(); 397 rcu_read_lock();
364 ce_data->should_schedule = pid_task(pid_entry->pid, PIDTYPE_PID); 398 should_schedule = pid_task(pid_entry->pid, PIDTYPE_PID);
365 rcu_read_unlock(); 399 rcu_read_unlock();
400 ce_data->should_schedule = should_schedule;
401
402 if (should_schedule && atomic_read(&start_time_set)) {
403 /* we only start tracking this after the job release syscall */
404 tsk_mc_data(should_schedule)->mc_job.expected_job++;
405 /*
406 * If jobs are not overrunning their budgets, then this
407 * should not happen.
408 */
409 WARN(tsk_mc_data(should_schedule)->mc_job.expected_job !=
410 tsk_rt(should_schedule)->job_params.job_no,
411 "LITMUS MC-CE level A timer fired, "
412 "but the task %d seems to be "
413 "overrunning its budget\n",
414 should_schedule->pid);
415 }
416
417 if (ce_data->should_schedule) {
418 tsk_rt(should_schedule)->job_params.deadline =
419 cycle_start_abs + pid_entry->acc_time;
420 tsk_rt(should_schedule)->job_params.release =
421 tsk_rt(should_schedule)->job_params.deadline -
422 pid_entry->budget;
423 tsk_rt(should_schedule)->job_params.exec_time = 0;
424 sched_trace_task_release(should_schedule);
425 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING);
426 }
366 if (ce_data->scheduled != ce_data->should_schedule) 427 if (ce_data->scheduled != ce_data->should_schedule)
367 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu); 428 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
368 429
@@ -378,12 +439,13 @@ static int cancel_all_timers(void)
378{ 439{
379 struct ce_dom_data *ce_data; 440 struct ce_dom_data *ce_data;
380 domain_t *dom; 441 domain_t *dom;
381 int cpu, ret = 0; 442 int cpu, ret = 0, cancel_res;
382 443
383 for_each_online_cpu(cpu) { 444 for_each_online_cpu(cpu) {
384 dom = &per_cpu(mc_ce_doms, cpu); 445 dom = &per_cpu(mc_ce_doms, cpu);
385 ce_data = dom->data; 446 ce_data = dom->data;
386 ret = ret || hrtimer_cancel(&ce_data->timer); 447 cancel_res = hrtimer_cancel(&ce_data->timer);
448 ret = ret || cancel_res;
387 } 449 }
388 return ret; 450 return ret;
389} 451}
@@ -411,9 +473,9 @@ static void arm_all_timers(void)
411} 473}
412 474
413/* 475/*
414 * There are no real releases in the CE, but the task releasing code will 476 * There are no real releases in the CE, but the task releease syscall will
415 * call this. We can re-set our notion of the CE period start to make 477 * call this. We can re-set our notion of the CE period start to make
416 * the schedule line up. 478 * the schedule look pretty.
417 */ 479 */
418static void mc_ce_release_at(struct task_struct *ts, lt_t start) 480static void mc_ce_release_at(struct task_struct *ts, lt_t start)
419{ 481{
@@ -432,8 +494,6 @@ static long mc_ce_activate_plugin(void)
432 domain_t *dom; 494 domain_t *dom;
433 int cpu; 495 int cpu;
434 496
435 raw_spin_lock(&activate_lock);
436
437 for_each_online_cpu(cpu) { 497 for_each_online_cpu(cpu) {
438 dom = &per_cpu(mc_ce_doms, cpu); 498 dom = &per_cpu(mc_ce_doms, cpu);
439 ce_data = dom->data; 499 ce_data = dom->data;
@@ -444,7 +504,6 @@ static long mc_ce_activate_plugin(void)
444 atomic_set(&start_time_set, 0); 504 atomic_set(&start_time_set, 0);
445 atomic64_set(&start_time, litmus_clock()); 505 atomic64_set(&start_time, litmus_clock());
446 arm_all_timers(); 506 arm_all_timers();
447 raw_spin_unlock(&activate_lock);
448 return 0; 507 return 0;
449} 508}
450 509
@@ -465,13 +524,12 @@ static void clear_pid_entries(void)
465 put_pid(ce_data->pid_entries[entry].pid); 524 put_pid(ce_data->pid_entries[entry].pid);
466 ce_data->pid_entries[entry].pid = NULL; 525 ce_data->pid_entries[entry].pid = NULL;
467 } 526 }
468 ce_data->pid_entries[entry].exec_cost = 0; 527 ce_data->pid_entries[entry].budget = 0;
469 ce_data->pid_entries[entry].acc_time = 0; 528 ce_data->pid_entries[entry].acc_time = 0;
470 } 529 }
471 } 530 }
472} 531}
473 532
474static void tear_down_proc(void);
475static long mc_ce_deactivate_plugin(void) 533static long mc_ce_deactivate_plugin(void)
476{ 534{
477 domain_t *dom; 535 domain_t *dom;
@@ -485,8 +543,6 @@ static long mc_ce_deactivate_plugin(void)
485 atomic_set(&ce_data->timer_info.state, 543 atomic_set(&ce_data->timer_info.state,
486 HRTIMER_START_ON_INACTIVE); 544 HRTIMER_START_ON_INACTIVE);
487 } 545 }
488 clear_pid_entries();
489 tear_down_proc();
490 return 0; 546 return 0;
491} 547}
492 548
@@ -515,7 +571,6 @@ static int __init init_sched_mc_ce(void)
515 rt_domain_t *rt; 571 rt_domain_t *rt;
516 int cpu, err; 572 int cpu, err;
517 573
518 clear_pid_entries();
519 for_each_online_cpu(cpu) { 574 for_each_online_cpu(cpu) {
520 dom = &per_cpu(mc_ce_doms, cpu); 575 dom = &per_cpu(mc_ce_doms, cpu);
521 rt = &per_cpu(mc_ce_rts, cpu); 576 rt = &per_cpu(mc_ce_rts, cpu);
@@ -527,6 +582,7 @@ static int __init init_sched_mc_ce(void)
527 ce_data->cpu = cpu; 582 ce_data->cpu = cpu;
528 ce_data->timer.function = timer_callback; 583 ce_data->timer.function = timer_callback;
529 } 584 }
585 clear_pid_entries();
530 err = setup_proc(); 586 err = setup_proc();
531 if (!err) 587 if (!err)
532 err = register_sched_plugin(&mc_ce_plugin); 588 err = register_sched_plugin(&mc_ce_plugin);
@@ -540,15 +596,18 @@ static int write_into_proc(char *proc_buf, const int proc_size, char *fmt, ...)
540 int n; 596 int n;
541 va_list args; 597 va_list args;
542 598
599 /* When writing to procfs, we don't care about the trailing null that
600 * is not included in the count returned by vscnprintf.
601 */
543 va_start(args, fmt); 602 va_start(args, fmt);
544 n = vscnprintf(buf, BUF_SIZE, fmt, args); 603 n = vsnprintf(buf, BUF_SIZE, fmt, args);
545 va_end(args); 604 va_end(args);
546 if (BUF_SIZE <= n || proc_size <= n - 1) { 605 if (BUF_SIZE <= n || proc_size <= n) {
547 /* too big for formatting buffer or proc (less null byte) */ 606 /* too big for formatting buffer or proc (less null byte) */
548 n = -EINVAL; 607 n = -EINVAL;
549 goto out; 608 goto out;
550 } 609 }
551 memcpy(proc_buf, buf, n - 1); 610 memcpy(proc_buf, buf, n);
552out: 611out:
553 return n; 612 return n;
554} 613}
@@ -556,6 +615,9 @@ out:
556 615
557/* 616/*
558 * Writes a PID entry to the procfs. 617 * Writes a PID entry to the procfs.
618 *
619 * @page buffer to write into.
620 * @count bytes available in the buffer
559 */ 621 */
560#define PID_SPACE 15 622#define PID_SPACE 15
561#define TASK_INFO_BUF (PID_SPACE + TASK_COMM_LEN) 623#define TASK_INFO_BUF (PID_SPACE + TASK_COMM_LEN)
@@ -582,28 +644,28 @@ static int write_pid_entry(char *page, const int count, const int cpu,
582 *ti_b = ' '; /* nuke the null byte */ 644 *ti_b = ' '; /* nuke the null byte */
583 ti_b++; 645 ti_b++;
584 get_task_comm(ti_b, ts); 646 get_task_comm(ti_b, ts);
585 task_info[TASK_INFO_BUF - 1] = '\0';
586 } else { 647 } else {
587 const char *msg = "pid_task() failed :("; 648 strncpy(task_info, "pid_task() failed :(",
588 strncpy(task_info, msg, sizeof(msg)); 649 TASK_INFO_BUF);
589 } 650 }
590 651
591 } else 652 } else
592 strncpy(task_info, "no", 3); 653 strncpy(task_info, "no", TASK_INFO_BUF);
654 task_info[TASK_INFO_BUF - 1] = '\0'; /* just to be sure */
593 655
594 err = write_into_proc(page, count - n, "# task: %s\n", task_info); 656 err = write_into_proc(page + n, count - n, "# task: %s\n", task_info);
595 if (err < 0) { 657 if (err < 0) {
596 n = -ENOSPC; 658 n = -ENOSPC;
597 goto out; 659 goto out;
598 } 660 }
599 n += err; 661 n += err;
600 err = write_into_proc(page, count - n, "%d, %d, %llu\n", 662 err = write_into_proc(page + n, count - n, "%d, %d, %llu\n",
601 cpu, task, pid_entry->exec_cost); 663 cpu, task, pid_entry->budget);
602 if (err < 0) { 664 if (err < 0) {
603 n = -ENOSPC; 665 n = -ENOSPC;
604 goto out; 666 goto out;
605 } 667 }
606 n =+ err; 668 n += err;
607out: 669out:
608 return n; 670 return n;
609} 671}
@@ -621,15 +683,15 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
621 domain_t *dom; 683 domain_t *dom;
622 684
623 if (off > 0) { 685 if (off > 0) {
624 *eof = 1; 686 printk(KERN_INFO "litmus: MC-CE called read with off > 0\n");
625 return 0; 687 goto out;
626 } 688 }
627 689
628 for_each_online_cpu(cpu) { 690 for_each_online_cpu(cpu) {
629 dom = &per_cpu(mc_ce_doms, cpu); 691 dom = &per_cpu(mc_ce_doms, cpu);
630 ce_data = dom->data; 692 ce_data = dom->data;
631 for (t = 0; t < ce_data->num_pid_entries; ++t) { 693 for (t = 0; t < ce_data->num_pid_entries; ++t) {
632 err = write_pid_entry(page, count - n, 694 err = write_pid_entry(page + n, count - n,
633 cpu, t, &ce_data->pid_entries[t]); 695 cpu, t, &ce_data->pid_entries[t]);
634 if (err < 0) { 696 if (err < 0) {
635 n = -ENOSPC; 697 n = -ENOSPC;
@@ -638,8 +700,8 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
638 n += err; 700 n += err;
639 } 701 }
640 } 702 }
641 *eof = 1;
642out: 703out:
704 *eof = 1;
643 return n; 705 return n;
644} 706}
645 707
@@ -663,8 +725,8 @@ static int skip_comment(const char *buf, const unsigned long max)
663 return i; 725 return i;
664} 726}
665 727
666/* a budget of 1 millisecond is probably reasonable */ 728/* a budget of 5 milliseconds is probably reasonable */
667#define BUDGET_THRESHOLD 1000000ULL 729#define BUDGET_THRESHOLD 5000000ULL
668static int setup_pid_entry(const int cpu, const int task, const lt_t budget) 730static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
669{ 731{
670 domain_t *dom = &per_cpu(mc_ce_doms, cpu); 732 domain_t *dom = &per_cpu(mc_ce_doms, cpu);
@@ -673,9 +735,10 @@ static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
673 int err = 0; 735 int err = 0;
674 736
675 /* check the inputs */ 737 /* check the inputs */
676 if (cpu < 0 || cpu >= NR_CPUS || task < 1 || task > PID_MAX_DEFAULT || 738 if (cpu < 0 || NR_CPUS <= cpu || task < 0 ||
739 CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= task ||
677 budget < 1) { 740 budget < 1) {
678 printk(KERN_INFO "litmus: bad cpu or task ID sent to " 741 printk(KERN_INFO "litmus: bad cpu, task ID, or budget sent to "
679 "MC-CE proc\n"); 742 "MC-CE proc\n");
680 err = -EINVAL; 743 err = -EINVAL;
681 goto out; 744 goto out;
@@ -695,7 +758,7 @@ static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
695 /* add the new entry */ 758 /* add the new entry */
696 new_entry = &ce_data->pid_entries[ce_data->num_pid_entries]; 759 new_entry = &ce_data->pid_entries[ce_data->num_pid_entries];
697 BUG_ON(NULL != new_entry->pid); 760 BUG_ON(NULL != new_entry->pid);
698 new_entry->exec_cost = budget; 761 new_entry->budget = budget;
699 new_entry->acc_time = ce_data->cycle_time + budget; 762 new_entry->acc_time = ce_data->cycle_time + budget;
700 /* update the domain entry */ 763 /* update the domain entry */
701 ce_data->cycle_time += budget; 764 ce_data->cycle_time += budget;
@@ -720,8 +783,6 @@ static int proc_write_ce_file(struct file *file, const char __user *buffer,
720 int cpu, task, cnt = 0, chars_read, converted, err; 783 int cpu, task, cnt = 0, chars_read, converted, err;
721 lt_t budget; 784 lt_t budget;
722 785
723 raw_spin_lock(&activate_lock);
724
725 if (is_active_plugin()) { 786 if (is_active_plugin()) {
726 printk(KERN_INFO "litmus: can't edit MC-CE proc when plugin " 787 printk(KERN_INFO "litmus: can't edit MC-CE proc when plugin "
727 "active\n"); 788 "active\n");
@@ -730,11 +791,15 @@ static int proc_write_ce_file(struct file *file, const char __user *buffer,
730 } 791 }
731 792
732 if (count > PROCFS_MAX_SIZE) { 793 if (count > PROCFS_MAX_SIZE) {
794 printk(KERN_INFO "litmus: MC-CE procfs got too many bytes "
795 "from user-space.\n");
733 cnt = -EINVAL; 796 cnt = -EINVAL;
734 goto out; 797 goto out;
735 } 798 }
736 799
737 if (copy_from_user(kbuf, buffer, count)) { 800 if (copy_from_user(kbuf, buffer, count)) {
801 printk(KERN_INFO "litmus: couldn't copy from user %s\n",
802 __FUNCTION__);
738 cnt = -EFAULT; 803 cnt = -EFAULT;
739 goto out; 804 goto out;
740 } 805 }
@@ -753,9 +818,11 @@ static int proc_write_ce_file(struct file *file, const char __user *buffer,
753 c += chars_read; 818 c += chars_read;
754 continue; 819 continue;
755 } 820 }
756 chars_read = sscanf(c, "%d, %d, %llu%n", &cpu, &task, &budget, 821 converted = sscanf(c, "%d, %d, %llu%n", &cpu, &task, &budget,
757 &converted); 822 &chars_read);
758 if (3 != converted) { 823 if (3 != converted) {
824 printk(KERN_INFO "litmus: MC-CE procfs expected three "
825 "arguments, but got %d.\n", converted);
759 cnt = -EINVAL; 826 cnt = -EINVAL;
760 goto out; 827 goto out;
761 } 828 }
@@ -768,7 +835,6 @@ static int proc_write_ce_file(struct file *file, const char __user *buffer,
768 } 835 }
769 } 836 }
770out: 837out:
771 raw_spin_unlock(&activate_lock);
772 return cnt; 838 return cnt;
773} 839}
774#undef PROCFS_MAX_SIZE 840#undef PROCFS_MAX_SIZE
@@ -806,4 +872,10 @@ out:
806} 872}
807#undef CE_FILE_PROC_NAME 873#undef CE_FILE_PROC_NAME
808 874
875static void clean_sched_mc_ce(void)
876{
877 tear_down_proc();
878}
879
809module_init(init_sched_mc_ce); 880module_init(init_sched_mc_ce);
881module_exit(clean_sched_mc_ce);