aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-01-14 02:31:12 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-01-14 02:31:12 -0500
commit6583dcfbda43e420921e3adf7f2e46dc719e8d26 (patch)
tree21b0a664bcaeb4c82e9e0b3d168af86b8d0e5401
parent07d5680c4c476a4b68bd3cff134d99ca996b2481 (diff)
MC2 levels A, B
-rw-r--r--include/litmus/mc2_common.h14
-rw-r--r--litmus/litmus.c63
-rw-r--r--litmus/mc2_common.c65
-rw-r--r--litmus/polling_reservations.c9
-rw-r--r--litmus/sched_mc2.c357
-rw-r--r--litmus/sched_pres.c10
6 files changed, 288 insertions, 230 deletions
diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h
index a1d571f0280f..bdc3a6d82a56 100644
--- a/include/litmus/mc2_common.h
+++ b/include/litmus/mc2_common.h
@@ -14,24 +14,14 @@ enum crit_level {
14 14
15struct mc2_task { 15struct mc2_task {
16 enum crit_level crit; 16 enum crit_level crit;
17 pid_t pid; 17 unsigned int res_id;
18 lt_t hyperperiod;
19}; 18};
20 19
21#ifdef __KERNEL__ 20#ifdef __KERNEL__
22 21
23#include <litmus/reservation.h> 22#include <litmus/reservation.h>
24 23
25struct mc2_param{ 24long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
26 struct mc2_task mc2_task;
27};
28
29struct mc2_task_client {
30 struct task_client tc;
31 struct mc2_param mc2;
32};
33
34long mc2_task_client_init(struct mc2_task_client *mtc, struct task_struct *tsk,
35 struct reservation *res); 25 struct reservation *res);
36 26
37#endif /* __KERNEL__ */ 27#endif /* __KERNEL__ */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 88cc3e043b0b..6034ff8731af 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -402,10 +402,12 @@ asmlinkage long sys_set_page_color(int cpu)
402 long ret = 0; 402 long ret = 0;
403 //struct task_page *task_page_itr = NULL; 403 //struct task_page *task_page_itr = NULL;
404 //struct task_page *task_page_itr_next = NULL; 404 //struct task_page *task_page_itr_next = NULL;
405 struct page *page_itr = NULL;
405 struct vm_area_struct *vma_itr = NULL; 406 struct vm_area_struct *vma_itr = NULL;
406 //struct task_page *entry = NULL; 407 //struct task_page *entry = NULL;
407 int nr_pages = 0; 408 int nr_pages = 0, nr_shared_pages = 0;
408 LIST_HEAD(pagelist); 409 LIST_HEAD(pagelist);
410 LIST_HEAD(shared_pagelist);
409 411
410 down_read(&current->mm->mmap_sem); 412 down_read(&current->mm->mmap_sem);
411 printk(KERN_INFO "SYSCALL set_page_color\n"); 413 printk(KERN_INFO "SYSCALL set_page_color\n");
@@ -423,8 +425,8 @@ asmlinkage long sys_set_page_color(int cpu)
423 */ 425 */
424 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; 426 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
425 // print vma flags 427 // print vma flags
426 //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); 428 printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags);
427 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); 429 printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE);
428 430
429 for (i = 0; i < num_pages; i++) { 431 for (i = 0; i < num_pages; i++) {
430/* 432/*
@@ -440,20 +442,32 @@ asmlinkage long sys_set_page_color(int cpu)
440 continue; 442 continue;
441 if (!old_page) 443 if (!old_page)
442 continue; 444 continue;
443 if (PageReserved(old_page)) 445 if (PageReserved(old_page)) {
444 goto put_and_next; 446 put_page(old_page);
445 447 continue;
446 ret = isolate_lru_page(old_page); 448 }
447 //if (pfn_valid(__page_to_pfn(old_page))) 449
448 if (!ret) { 450 printk(KERN_INFO "addr: %lu, pfn: %lu, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page));
449 //printk(KERN_INFO "page_mapcount = %d\n", page_mapcount(old_page)); 451
450 printk(KERN_INFO "addr: %lu, pfn: %lu mapcount: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page)); 452 if (page_mapcount(old_page) == 1) {
451 list_add_tail(&old_page->lru, &pagelist); 453 ret = isolate_lru_page(old_page);
452 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 454 //if (pfn_valid(__page_to_pfn(old_page)))
453 nr_pages++; 455 if (!ret) {
456 list_add_tail(&old_page->lru, &pagelist);
457 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
458 nr_pages++;
459 }
460 put_page(old_page);
454 } 461 }
455put_and_next: 462 else {
463 ret = isolate_lru_page(old_page);
464 if (!ret) {
465 list_add_tail(&old_page->lru, &shared_pagelist);
466 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
467 nr_shared_pages++;
468 }
456 put_page(old_page); 469 put_page(old_page);
470 }
457 } 471 }
458 472
459 //INIT_LIST_HEAD(&entry->list); 473 //INIT_LIST_HEAD(&entry->list);
@@ -462,6 +476,10 @@ put_and_next:
462 vma_itr = vma_itr->vm_next; 476 vma_itr = vma_itr->vm_next;
463 } 477 }
464 478
479 //list_for_each_entry(page_itr, &pagelist, lru) {
480// printk(KERN_INFO "B _mapcount = %d, _count = %d\n", page_mapcount(page_itr), page_count(page_itr));
481// }
482
465 ret = 0; 483 ret = 0;
466 if (!list_empty(&pagelist)) { 484 if (!list_empty(&pagelist)) {
467 ret = migrate_pages(&pagelist, new_alloc_page, 0, MIGRATE_ASYNC, MR_SYSCALL); 485 ret = migrate_pages(&pagelist, new_alloc_page, 0, MIGRATE_ASYNC, MR_SYSCALL);
@@ -471,8 +489,23 @@ put_and_next:
471 } 489 }
472 } 490 }
473 491
492 /* copy shared pages HERE */
493/*
494 ret = 0;
495 if (!list_empty(&shared_pagelist)) {
496 ret = migrate_shared_pages(&shared_pagelist, new_alloc_page, 0, MIGRATE_ASYNC, MR_SYSCALL);
497 if (ret) {
498 printk(KERN_INFO "%ld shared pages not migrated.\n", ret);
499 putback_lru_pages(&shared_pagelist);
500 }
501 }
502*/
474 up_read(&current->mm->mmap_sem); 503 up_read(&current->mm->mmap_sem);
475 504
505 list_for_each_entry(page_itr, &shared_pagelist, lru) {
506 printk(KERN_INFO "S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr));
507 }
508
476/* 509/*
477 list_for_each_entry_safe(task_page_itr, task_page_itr_next, &task_page_list, list) { 510 list_for_each_entry_safe(task_page_itr, task_page_itr_next, &task_page_list, list) {
478 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", task_page_itr->vm_start, task_page_itr->vm_end, (task_page_itr->vm_end - task_page_itr->vm_start)/PAGE_SIZE); 511 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", task_page_itr->vm_start, task_page_itr->vm_end, (task_page_itr->vm_end - task_page_itr->vm_start)/PAGE_SIZE);
diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c
index 56ef6b506a86..d0a42c69d65c 100644
--- a/litmus/mc2_common.c
+++ b/litmus/mc2_common.c
@@ -7,6 +7,8 @@
7#include <linux/percpu.h> 7#include <linux/percpu.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/slab.h>
11#include <asm/uaccess.h>
10 12
11#include <litmus/litmus.h> 13#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h> 14#include <litmus/sched_plugin.h>
@@ -14,17 +16,62 @@
14 16
15#include <litmus/mc2_common.h> 17#include <litmus/mc2_common.h>
16 18
17long mc2_task_client_init( 19long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, struct reservation *res)
18 struct mc2_task_client *mtc,
19 struct task_struct *tsk,
20 struct reservation *res
21)
22{ 20{
23 task_client_init(&mtc->tc, tsk, res); 21 task_client_init(tc, tsk, res);
24 if ((mtc->mc2.mc2_task.crit < CRIT_LEVEL_A) || 22 if ((mc2_param->crit < CRIT_LEVEL_A) ||
25 (mtc->mc2.mc2_task.crit > CRIT_LEVEL_C)) 23 (mc2_param->crit > CRIT_LEVEL_C))
26 return -EINVAL; 24 return -EINVAL;
27 25
28 TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mtc->mc2.mc2_task.crit); 26 TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mc2_param->crit);
27
29 return 0; 28 return 0;
29}
30
31asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
32{
33 struct task_struct *target;
34 int retval = -EINVAL;
35 struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
36
37 if (!mp)
38 return -ENOMEM;
39
40 printk("Setting up mc^2 task parameters for process %d.\n", pid);
41
42 if (pid < 0 || param == 0) {
43 goto out;
44 }
45 if (copy_from_user(mp, param, sizeof(*mp))) {
46 retval = -EFAULT;
47 goto out;
48 }
49
50 /* Task search and manipulation must be protected */
51 read_lock_irq(&tasklist_lock);
52 if (!(target = find_task_by_vpid(pid))) {
53 retval = -ESRCH;
54 goto out_unlock;
55 }
56
57 if (is_realtime(target)) {
58 /* The task is already a real-time task.
59 * We cannot not allow parameter changes at this point.
60 */
61 retval = -EBUSY;
62 goto out_unlock;
63 }
64 if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
65 printk(KERN_INFO "litmus: real-time task %d rejected "
66 "because of invalid criticality level\n", pid);
67 goto out_unlock;
68 }
69
70 target->rt_param.plugin_state = mp;
71
72 retval = 0;
73out_unlock:
74 read_unlock_irq(&tasklist_lock);
75out:
76 return retval;
30} \ No newline at end of file 77} \ No newline at end of file
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
index 86a32068a3a8..4c07ee74bf39 100644
--- a/litmus/polling_reservations.c
+++ b/litmus/polling_reservations.c
@@ -452,7 +452,10 @@ static void td_drain_budget(
452 * how much time is left in this allocation interval. */ 452 * how much time is left in this allocation interval. */
453 453
454 /* sanity check: we should never try to drain from future slots */ 454 /* sanity check: we should never try to drain from future slots */
455 BUG_ON(tdres->cur_interval.start > res->env->current_time); 455 TRACE("TD_DRAIN STATE(%d) [%llu,%llu] %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time);
456 //BUG_ON(tdres->cur_interval.start > res->env->current_time);
457 if (tdres->cur_interval.start > res->env->current_time)
458 TRACE("TD_DRAIN BUG!!!!!!!!!!\n");
456 459
457 switch (res->state) { 460 switch (res->state) {
458 case RESERVATION_DEPLETED: 461 case RESERVATION_DEPLETED:
@@ -470,8 +473,8 @@ static void td_drain_budget(
470 RESERVATION_DEPLETED); 473 RESERVATION_DEPLETED);
471 } else { 474 } else {
472 /* sanity check budget calculation */ 475 /* sanity check budget calculation */
473 BUG_ON(res->env->current_time >= tdres->cur_interval.end); 476 //BUG_ON(res->env->current_time >= tdres->cur_interval.end);
474 BUG_ON(res->env->current_time < tdres->cur_interval.start); 477 //BUG_ON(res->env->current_time < tdres->cur_interval.start);
475 } 478 }
476 479
477 break; 480 break;
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 1e5b28ba0ac3..b9f05238461b 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -16,9 +16,10 @@
16#include <litmus/polling_reservations.h> 16#include <litmus/polling_reservations.h>
17 17
18struct mc2_task_state { 18struct mc2_task_state {
19 struct mc2_task_client res_info; 19 struct task_client res_info;
20 int cpu; 20 int cpu;
21 bool has_departed; 21 bool has_departed;
22 struct mc2_task mc2_param;
22}; 23};
23 24
24struct mc2_cpu_state { 25struct mc2_cpu_state {
@@ -29,12 +30,7 @@ struct mc2_cpu_state {
29 30
30 int cpu; 31 int cpu;
31 struct task_struct* scheduled; 32 struct task_struct* scheduled;
32 33 enum crit_level run_level;
33#ifdef CONFIG_RELEASE_MASTER
34 int release_master;
35 /* used to delegate releases */
36 struct hrtimer_start_on_info info;
37#endif
38}; 34};
39 35
40static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); 36static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
@@ -53,8 +49,8 @@ static void task_departs(struct task_struct *tsk, int job_complete)
53 struct reservation* res; 49 struct reservation* res;
54 struct reservation_client *client; 50 struct reservation_client *client;
55 51
56 res = state->res_info.tc.client.reservation; 52 res = state->res_info.client.reservation;
57 client = &state->res_info.tc.client; 53 client = &state->res_info.client;
58 54
59 res->ops->client_departs(res, client, job_complete); 55 res->ops->client_departs(res, client, job_complete);
60 state->has_departed = true; 56 state->has_departed = true;
@@ -66,8 +62,8 @@ static void task_arrives(struct task_struct *tsk)
66 struct reservation* res; 62 struct reservation* res;
67 struct reservation_client *client; 63 struct reservation_client *client;
68 64
69 res = state->res_info.tc.client.reservation; 65 res = state->res_info.client.reservation;
70 client = &state->res_info.tc.client; 66 client = &state->res_info.client;
71 67
72 state->has_departed = false; 68 state->has_departed = false;
73 res->ops->client_arrives(res, client); 69 res->ops->client_arrives(res, client);
@@ -81,13 +77,13 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
81 77
82 update = state->sup_env.next_scheduler_update; 78 update = state->sup_env.next_scheduler_update;
83 now = state->sup_env.env.current_time; 79 now = state->sup_env.env.current_time;
84 80
85 /* Be sure we're actually running on the right core, 81 /* Be sure we're actually running on the right core,
86 * as mc2_update_timer() is also called from mc2_task_resume(), 82 * as pres_update_timer() is also called from pres_task_resume(),
87 * which might be called on any CPU when a thread resumes. 83 * which might be called on any CPU when a thread resumes.
88 */ 84 */
89 local = local_cpu_state() == state; 85 local = local_cpu_state() == state;
90 86
91 /* Must drop state lock before calling into hrtimer_start(), which 87 /* Must drop state lock before calling into hrtimer_start(), which
92 * may raise a softirq, which in turn may wake ksoftirqd. */ 88 * may raise a softirq, which in turn may wake ksoftirqd. */
93 raw_spin_unlock(&state->lock); 89 raw_spin_unlock(&state->lock);
@@ -169,36 +165,10 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
169 return restart; 165 return restart;
170} 166}
171 167
172static long mc2_complete_job(void)
173{
174 ktime_t next_release;
175 long err;
176
177 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(),
178 get_deadline(current));
179
180 tsk_rt(current)->completed = 1;
181 prepare_for_next_period(current);
182 next_release = ns_to_ktime(get_release(current));
183 preempt_disable();
184 TRACE_CUR("next_release=%llu\n", get_release(current));
185 if (get_release(current) > litmus_clock()) {
186 set_current_state(TASK_INTERRUPTIBLE);
187 preempt_enable_no_resched();
188 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
189 } else {
190 err = 0;
191 TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
192 preempt_enable();
193 }
194
195 TRACE_CUR("mc2_complete_job returns [%d] at %llu\n", err, litmus_clock());
196 return err;
197}
198
199static struct task_struct* mc2_schedule(struct task_struct * prev) 168static struct task_struct* mc2_schedule(struct task_struct * prev)
200{ 169{
201 /* next == NULL means "schedule background work". */ 170 /* next == NULL means "schedule background work". */
171 struct mc2_task_state *tinfo;
202 struct mc2_cpu_state *state = local_cpu_state(); 172 struct mc2_cpu_state *state = local_cpu_state();
203 173
204 raw_spin_lock(&state->lock); 174 raw_spin_lock(&state->lock);
@@ -210,16 +180,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
210 state->sup_env.will_schedule = true; 180 state->sup_env.will_schedule = true;
211 sup_update_time(&state->sup_env, litmus_clock()); 181 sup_update_time(&state->sup_env, litmus_clock());
212 182
213 /* check if prev task complete */
214 if (is_realtime(prev)) {
215 TRACE_TASK(prev, "EXEC_TIME = %llu, EXEC_COST = %llu, REMAINED = %llu\n",
216 get_exec_time(prev), get_exec_cost(prev), get_exec_cost(prev)-get_exec_time(prev));
217 }
218 if (is_realtime(prev) && (get_exec_time(prev) >= get_exec_cost(prev))) {
219 TRACE_TASK(prev, "JOB COMPLETED! but is_completed = %d\n", is_completed(prev));
220// mc2_complete_job(prev);
221 }
222
223 /* remove task from reservation if it blocks */ 183 /* remove task from reservation if it blocks */
224 if (is_realtime(prev) && !is_running(prev)) 184 if (is_realtime(prev) && !is_running(prev))
225 task_departs(prev, is_completed(prev)); 185 task_departs(prev, is_completed(prev));
@@ -234,12 +194,17 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
234 state->sup_env.will_schedule = false; 194 state->sup_env.will_schedule = false;
235 /* NOTE: drops state->lock */ 195 /* NOTE: drops state->lock */
236 mc2_update_timer_and_unlock(state); 196 mc2_update_timer_and_unlock(state);
237 197
238 if (prev != state->scheduled && is_realtime(prev)) 198 if (prev != state->scheduled && is_realtime(prev)) {
239 TRACE_TASK(prev, "descheduled.\n"); 199 TRACE_TASK(prev, "descheduled.\n");
240 if (state->scheduled) 200 state->run_level = NUM_CRIT_LEVELS;
201 }
202 if (state->scheduled) {
241 TRACE_TASK(state->scheduled, "scheduled.\n"); 203 TRACE_TASK(state->scheduled, "scheduled.\n");
242 204 //tinfo = get_mc2_state(state->scheduled);
205 //state->run_level = tinfo->mc2_param.crit;
206 }
207
243 return state->scheduled; 208 return state->scheduled;
244} 209}
245 210
@@ -279,6 +244,7 @@ static void mc2_task_resume(struct task_struct *tsk)
279 sup_update_time(&state->sup_env, litmus_clock()); 244 sup_update_time(&state->sup_env, litmus_clock());
280 task_arrives(tsk); 245 task_arrives(tsk);
281 /* NOTE: drops state->lock */ 246 /* NOTE: drops state->lock */
247 TRACE("mc2_resume()\n");
282 mc2_update_timer_and_unlock(state); 248 mc2_update_timer_and_unlock(state);
283 local_irq_restore(flags); 249 local_irq_restore(flags);
284 } else { 250 } else {
@@ -289,6 +255,60 @@ static void mc2_task_resume(struct task_struct *tsk)
289 resume_legacy_task_model_updates(tsk); 255 resume_legacy_task_model_updates(tsk);
290} 256}
291 257
258/* syscall backend for job completions */
259static long mc2_complete_job(void)
260{
261 ktime_t next_release;
262 long err;
263 struct mc2_cpu_state *state = local_cpu_state();
264 struct reservation_environment *env = &(state->sup_env.env);
265 struct mc2_task_state *tinfo = get_mc2_state(current);
266
267
268 TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(),
269 get_deadline(current));
270
271 tsk_rt(current)->completed = 1;
272
273 if (tsk_rt(current)->sporadic_release) {
274 env->time_zero = tsk_rt(current)->sporadic_release_time;
275
276 if (tinfo->mc2_param.crit == CRIT_LEVEL_A) {
277 struct reservation *res;
278 struct table_driven_reservation *tdres;
279
280 sup_update_time(&state->sup_env, litmus_clock());
281 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
282 tdres = container_of(res, struct table_driven_reservation, res);
283 tdres->next_interval = 0;
284 tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time;
285 res->next_replenishment = tsk_rt(current)->sporadic_release_time;
286 res->next_replenishment += tdres->intervals[0].start;
287 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
288
289 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
290 }
291
292 }
293
294 prepare_for_next_period(current);
295 next_release = ns_to_ktime(get_release(current));
296 preempt_disable();
297 TRACE_CUR("next_release=%llu\n", get_release(current));
298 if (get_release(current) > litmus_clock()) {
299 set_current_state(TASK_INTERRUPTIBLE);
300 preempt_enable_no_resched();
301 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
302 } else {
303 err = 0;
304 TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
305 preempt_enable();
306 }
307
308 TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
309 return err;
310}
311
292static long mc2_admit_task(struct task_struct *tsk) 312static long mc2_admit_task(struct task_struct *tsk)
293{ 313{
294 long err = -ESRCH; 314 long err = -ESRCH;
@@ -297,10 +317,10 @@ static long mc2_admit_task(struct task_struct *tsk)
297 struct mc2_cpu_state *state; 317 struct mc2_cpu_state *state;
298 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); 318 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
299 struct mc2_task *mp = tsk_rt(tsk)->plugin_state; 319 struct mc2_task *mp = tsk_rt(tsk)->plugin_state;
300 320
301 if (!tinfo) 321 if (!tinfo)
302 return -ENOMEM; 322 return -ENOMEM;
303 323
304 if (!mp) { 324 if (!mp) {
305 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); 325 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
306 return err; 326 return err;
@@ -311,21 +331,22 @@ static long mc2_admit_task(struct task_struct *tsk)
311 state = cpu_state_for(task_cpu(tsk)); 331 state = cpu_state_for(task_cpu(tsk));
312 raw_spin_lock_irqsave(&state->lock, flags); 332 raw_spin_lock_irqsave(&state->lock, flags);
313 333
314 res = sup_find_by_id(&state->sup_env, mp->pid); 334 res = sup_find_by_id(&state->sup_env, mp->res_id);
315 335
316 /* found the appropriate reservation (or vCPU) */ 336 /* found the appropriate reservation (or vCPU) */
317 if (res) { 337 if (res) {
318 TRACE_TASK(tsk, "FOUND RES\n"); 338 TRACE_TASK(tsk, "FOUND RES ID\n");
319 tinfo->res_info.mc2.mc2_task.crit = mp->crit; 339 tinfo->mc2_param.crit = mp->crit;
340 tinfo->mc2_param.res_id = mp->res_id;
320 341
321 kfree(tsk_rt(tsk)->plugin_state); 342 kfree(tsk_rt(tsk)->plugin_state);
322 tsk_rt(tsk)->plugin_state = NULL; 343 tsk_rt(tsk)->plugin_state = NULL;
323 344
324 err = mc2_task_client_init(&tinfo->res_info, tsk, res); 345 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
325 tinfo->cpu = task_cpu(tsk); 346 tinfo->cpu = task_cpu(tsk);
326 tinfo->has_departed = true; 347 tinfo->has_departed = true;
327 tsk_rt(tsk)->plugin_state = tinfo; 348 tsk_rt(tsk)->plugin_state = tinfo;
328 349
329 /* disable LITMUS^RT's per-thread budget enforcement */ 350 /* disable LITMUS^RT's per-thread budget enforcement */
330 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 351 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
331 } 352 }
@@ -340,21 +361,14 @@ static long mc2_admit_task(struct task_struct *tsk)
340 return err; 361 return err;
341} 362}
342 363
343static void task_new_legacy_task_model_updates(struct task_struct *tsk)
344{
345 lt_t now = litmus_clock();
346
347 /* the first job exists starting as of right now */
348 release_at(tsk, now);
349}
350
351static void mc2_task_new(struct task_struct *tsk, int on_runqueue, 364static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
352 int is_running) 365 int is_running)
353{ 366{
354 unsigned long flags; 367 unsigned long flags;
355 struct mc2_task_state* tinfo = get_mc2_state(tsk); 368 struct mc2_task_state* tinfo = get_mc2_state(tsk);
356 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); 369 struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu);
357 370 struct reservation *res;
371
358 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", 372 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
359 litmus_clock(), on_runqueue, is_running); 373 litmus_clock(), on_runqueue, is_running);
360 374
@@ -373,15 +387,69 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
373 sup_update_time(&state->sup_env, litmus_clock()); 387 sup_update_time(&state->sup_env, litmus_clock());
374 task_arrives(tsk); 388 task_arrives(tsk);
375 /* NOTE: drops state->lock */ 389 /* NOTE: drops state->lock */
390 TRACE("mc2_new()\n");
376 mc2_update_timer_and_unlock(state); 391 mc2_update_timer_and_unlock(state);
377 local_irq_restore(flags); 392 local_irq_restore(flags);
378 } else 393 } else
379 raw_spin_unlock_irqrestore(&state->lock, flags); 394 raw_spin_unlock_irqrestore(&state->lock, flags);
380 395
381 task_new_legacy_task_model_updates(tsk); 396 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
397 release_at(tsk, res->next_replenishment);
398 if (res)
399 TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment);
400 else
401 TRACE_TASK(tsk, "next_replenishment = NULL\n");
382} 402}
383 403
384static long mc2_reservation_destroy(unsigned int reservation_id, int cpu); 404static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
405{
406 long ret = -EINVAL;
407 struct mc2_cpu_state *state;
408 struct reservation *res = NULL, *next;
409 struct sup_reservation_environment *sup_env;
410 int found = 0;
411
412 state = cpu_state_for(cpu);
413 raw_spin_lock(&state->lock);
414
415// res = sup_find_by_id(&state->sup_env, reservation_id);
416 sup_env = &state->sup_env;
417 //if (!res) {
418 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
419 if (res->id == reservation_id) {
420 list_del(&res->list);
421 //kfree(res);
422 found = 1;
423 ret = 0;
424 }
425 }
426 if (!found) {
427 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
428 if (res->id == reservation_id) {
429 list_del(&res->list);
430 //kfree(res);
431 found = 1;
432 ret = 0;
433 }
434 }
435 }
436 if (!found) {
437 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
438 if (res->id == reservation_id) {
439 list_del(&res->list);
440 //kfree(res);
441 found = 1;
442 ret = 0;
443 }
444 }
445 }
446 //}
447
448 raw_spin_unlock(&state->lock);
449
450 TRACE("RESERVATION_DESTROY ret = %d\n", ret);
451 return ret;
452}
385 453
386static void mc2_task_exit(struct task_struct *tsk) 454static void mc2_task_exit(struct task_struct *tsk)
387{ 455{
@@ -401,65 +469,31 @@ static void mc2_task_exit(struct task_struct *tsk)
401 sup_update_time(&state->sup_env, litmus_clock()); 469 sup_update_time(&state->sup_env, litmus_clock());
402 task_departs(tsk, 0); 470 task_departs(tsk, 0);
403 /* NOTE: drops state->lock */ 471 /* NOTE: drops state->lock */
472 TRACE("mc2_exit()\n");
404 mc2_update_timer_and_unlock(state); 473 mc2_update_timer_and_unlock(state);
405 local_irq_restore(flags); 474 local_irq_restore(flags);
406 } else 475 } else
407 raw_spin_unlock_irqrestore(&state->lock, flags); 476 raw_spin_unlock_irqrestore(&state->lock, flags);
408 477/*
409 mc2_reservation_destroy(tsk->pid, tinfo->cpu); 478 if (tinfo->mc2_param.crit == CRIT_LEVEL_A) {
410 479 struct table_driven_reservation *td_res;
480 struct reservation *res;
481 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
482 td_res = container_of(res, struct table_driven_reservation, res);
483 kfree(td_res->intervals);
484 //kfree(td_res);
485 } else if (tinfo->mc2_param.crit == CRIT_LEVEL_B) {
486 struct polling_reservation *pres;
487 struct reservation *res;
488 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
489 pres = container_of(res, struct polling_reservation, res);
490 kfree(pres);
491 }
492*/
411 kfree(tsk_rt(tsk)->plugin_state); 493 kfree(tsk_rt(tsk)->plugin_state);
412 tsk_rt(tsk)->plugin_state = NULL; 494 tsk_rt(tsk)->plugin_state = NULL;
413} 495}
414 496
415asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
416{
417 struct task_struct *target;
418 int retval = -EINVAL;
419 struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
420
421 if (!mp)
422 return -ENOMEM;
423
424 printk("Setting up mc^2 task parameters for process %d.\n", pid);
425
426 if (pid < 0 || param == 0) {
427 goto out;
428 }
429 if (copy_from_user(mp, param, sizeof(*mp))) {
430 retval = -EFAULT;
431 goto out;
432 }
433
434 /* Task search and manipulation must be protected */
435 read_lock_irq(&tasklist_lock);
436 if (!(target = find_task_by_vpid(pid))) {
437 retval = -ESRCH;
438 goto out_unlock;
439 }
440
441 if (is_realtime(target)) {
442 /* The task is already a real-time task.
443 * We cannot not allow parameter changes at this point.
444 */
445 retval = -EBUSY;
446 goto out_unlock;
447 }
448 if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
449 printk(KERN_INFO "litmus: real-time task %d rejected "
450 "because of invalid criticality level\n", pid);
451 goto out_unlock;
452 }
453
454 target->rt_param.plugin_state = mp;
455
456 retval = 0;
457 out_unlock:
458 read_unlock_irq(&tasklist_lock);
459 out:
460 return retval;
461}
462
463static long create_polling_reservation( 497static long create_polling_reservation(
464 int res_type, 498 int res_type,
465 struct reservation_config *config) 499 struct reservation_config *config)
@@ -566,10 +600,6 @@ static long create_table_driven_reservation(
566 err = copy_from_user(slots, 600 err = copy_from_user(slots,
567 config->table_driven_params.intervals, slots_size); 601 config->table_driven_params.intervals, slots_size);
568 602
569 for (i=0; i<num_slots;i++) {
570 TRACE("###### [%llu, %llu]\n", slots[i].start, slots[i].end);
571 }
572
573 if (!err) { 603 if (!err) {
574 /* sanity checks */ 604 /* sanity checks */
575 for (i = 0; !err && i < num_slots; i++) 605 for (i = 0; !err && i < num_slots; i++)
@@ -628,7 +658,6 @@ static long create_table_driven_reservation(
628 kfree(td_res); 658 kfree(td_res);
629 } 659 }
630 660
631 TRACE("CREATE_TABLE_DRIVEN_RES = %d\n", err);
632 return err; 661 return err;
633} 662}
634 663
@@ -665,54 +694,6 @@ static long mc2_reservation_create(int res_type, void* __user _config)
665 return ret; 694 return ret;
666} 695}
667 696
668static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
669{
670 long ret = -EINVAL;
671 struct mc2_cpu_state *state;
672 struct reservation *res, *next;
673 struct sup_reservation_environment *sup_env;
674 unsigned long flags;
675 int found = 0;
676
677 state = cpu_state_for(cpu);
678 raw_spin_lock_irqsave(&state->lock, flags);
679
680 //res = sup_find_by_id(&state->sup_env, reservation_id);
681 sup_env = &state->sup_env;
682 //if (!res) {
683 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
684 if (res->id == reservation_id) {
685 list_del(&res->list);
686 found = 1;
687 ret = 0;
688 }
689 }
690 if (!found) {
691 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
692 if (res->id == reservation_id) {
693 list_del(&res->list);
694 found = 1;
695 ret = 0;
696 }
697 }
698 }
699 if (!found) {
700 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
701 if (res->id == reservation_id) {
702 list_del(&res->list);
703 found = 1;
704 ret = 0;
705 }
706 }
707 }
708 //}
709
710 raw_spin_unlock_irqrestore(&state->lock, flags);
711
712 TRACE("RESERVATION_DESTROY ret = %d\n", ret);
713 return ret;
714}
715
716static struct domain_proc_info mc2_domain_proc_info; 697static struct domain_proc_info mc2_domain_proc_info;
717 698
718static long mc2_get_domain_proc_info(struct domain_proc_info **ret) 699static long mc2_get_domain_proc_info(struct domain_proc_info **ret)
@@ -756,11 +737,6 @@ static long mc2_activate_plugin(void)
756 737
757 state = cpu_state_for(cpu); 738 state = cpu_state_for(cpu);
758 739
759#ifdef CONFIG_RELEASE_MASTER
760 state->release_master = atomic_read(&release_master_cpu);
761 hrtimer_start_on_info_init(&state->info);
762#endif
763
764 raw_spin_lock_init(&state->lock); 740 raw_spin_lock_init(&state->lock);
765 state->cpu = cpu; 741 state->cpu = cpu;
766 state->scheduled = NULL; 742 state->scheduled = NULL;
@@ -823,15 +799,15 @@ static long mc2_deactivate_plugin(void)
823} 799}
824 800
825static struct sched_plugin mc2_plugin = { 801static struct sched_plugin mc2_plugin = {
826 .plugin_name = "MC2", 802 .plugin_name = "MC2",
827 .schedule = mc2_schedule, 803 .schedule = mc2_schedule,
828 .task_wake_up = mc2_task_resume, 804 .task_wake_up = mc2_task_resume,
829 .admit_task = mc2_admit_task, 805 .admit_task = mc2_admit_task,
830 .task_new = mc2_task_new, 806 .task_new = mc2_task_new,
831 .task_exit = mc2_task_exit, 807 .task_exit = mc2_task_exit,
832 .complete_job = mc2_complete_job, 808 .complete_job = mc2_complete_job,
833 .get_domain_proc_info = mc2_get_domain_proc_info, 809 .get_domain_proc_info = mc2_get_domain_proc_info,
834 .activate_plugin = mc2_activate_plugin, 810 .activate_plugin = mc2_activate_plugin,
835 .deactivate_plugin = mc2_deactivate_plugin, 811 .deactivate_plugin = mc2_deactivate_plugin,
836 .reservation_create = mc2_reservation_create, 812 .reservation_create = mc2_reservation_create,
837 .reservation_destroy = mc2_reservation_destroy, 813 .reservation_destroy = mc2_reservation_destroy,
@@ -843,4 +819,3 @@ static int __init init_mc2(void)
843} 819}
844 820
845module_init(init_mc2); 821module_init(init_mc2);
846
diff --git a/litmus/sched_pres.c b/litmus/sched_pres.c
index e2d48101c890..6c636cce07fb 100644
--- a/litmus/sched_pres.c
+++ b/litmus/sched_pres.c
@@ -14,6 +14,8 @@
14#include <litmus/reservation.h> 14#include <litmus/reservation.h>
15#include <litmus/polling_reservations.h> 15#include <litmus/polling_reservations.h>
16 16
17//static int testval = 0;
18
17struct pres_task_state { 19struct pres_task_state {
18 struct task_client res_info; 20 struct task_client res_info;
19 int cpu; 21 int cpu;
@@ -172,6 +174,11 @@ static struct task_struct* pres_schedule(struct task_struct * prev)
172 BUG_ON(state->scheduled && state->scheduled != prev); 174 BUG_ON(state->scheduled && state->scheduled != prev);
173 BUG_ON(state->scheduled && !is_realtime(prev)); 175 BUG_ON(state->scheduled && !is_realtime(prev));
174 176
177//if (testval == 1) {
178// testval = 0;
179// printk(KERN_INFO "TESTVAL = 1 at %llu\n", litmus_clock());
180//}
181
175 /* update time */ 182 /* update time */
176 state->sup_env.will_schedule = true; 183 state->sup_env.will_schedule = true;
177 sup_update_time(&state->sup_env, litmus_clock()); 184 sup_update_time(&state->sup_env, litmus_clock());
@@ -250,6 +257,8 @@ static long pres_complete_job(void)
250{ 257{
251 ktime_t next_release; 258 ktime_t next_release;
252 long err; 259 long err;
260//testval = 1;
261//printk(KERN_INFO "pres_complete_job at %llu (deadline: %llu)\n", litmus_clock(), get_deadline(current));
253 262
254 TRACE_CUR("pres_complete_job at %llu (deadline: %llu)\n", litmus_clock(), 263 TRACE_CUR("pres_complete_job at %llu (deadline: %llu)\n", litmus_clock(),
255 get_deadline(current)); 264 get_deadline(current));
@@ -270,6 +279,7 @@ static long pres_complete_job(void)
270 } 279 }
271 280
272 TRACE_CUR("pres_complete_job returns at %llu\n", litmus_clock()); 281 TRACE_CUR("pres_complete_job returns at %llu\n", litmus_clock());
282//printk(KERN_INFO "pres_complete_job returns at %llu\n", litmus_clock());
273 return err; 283 return err;
274} 284}
275 285