aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-02-25 10:42:28 -0500
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-02-25 10:42:28 -0500
commita83b6b631b081f9dfeb8134c9aee6aeb866f7231 (patch)
tree0afeb26e74c4a52cf9473a405420c8483462d087
parentd7f8145b8a0525dccd2990fd0739012d04f3d978 (diff)
proto type
-rw-r--r--include/litmus/reservation.h18
-rw-r--r--litmus/bank_proc.c29
-rw-r--r--litmus/cache_proc.c40
-rw-r--r--litmus/jobs.c1
-rw-r--r--litmus/litmus.c48
-rw-r--r--litmus/reservation.c54
-rw-r--r--litmus/sched_mc2.c405
7 files changed, 396 insertions, 199 deletions
diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
index fc7e31918a60..0b9c08d6051e 100644
--- a/include/litmus/reservation.h
+++ b/include/litmus/reservation.h
@@ -201,19 +201,19 @@ struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
201 unsigned int id); 201 unsigned int id);
202 202
203/* A global multiprocessor reservation environment. */ 203/* A global multiprocessor reservation environment. */
204/* 204
205typedef enum { 205typedef enum {
206 EVENT_REPLENISH = 0, 206 EVENT_REPLENISH = 0,
207 EVENT_DRAIN, 207 EVENT_DRAIN,
208 EVENT_OTHERS, 208 EVENT_OTHERS,
209} event_type_t; 209} event_type_t;
210*/ 210
211 211
212struct next_timer_event { 212struct next_timer_event {
213 lt_t next_update; 213 lt_t next_update;
214 int timer_armed_on; 214 int timer_armed_on;
215 //unsigned int id; 215 unsigned int id;
216 //event_type_t type; 216 event_type_t type;
217 struct list_head list; 217 struct list_head list;
218}; 218};
219 219
@@ -234,7 +234,7 @@ struct gmp_reservation_environment {
234 struct list_head next_events; 234 struct list_head next_events;
235 235
236 /* (schedule_now == true) means call gmp_dispatch() now */ 236 /* (schedule_now == true) means call gmp_dispatch() now */
237 bool schedule_now; 237 int schedule_now;
238 /* set to true if a call to gmp_dispatch() is imminent */ 238 /* set to true if a call to gmp_dispatch() is imminent */
239 bool will_schedule; 239 bool will_schedule;
240}; 240};
@@ -242,11 +242,11 @@ struct gmp_reservation_environment {
242void gmp_init(struct gmp_reservation_environment* gmp_env); 242void gmp_init(struct gmp_reservation_environment* gmp_env);
243void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env, 243void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
244 struct reservation* new_res); 244 struct reservation* new_res);
245void gmp_scheduler_update_after(struct gmp_reservation_environment* gmp_env, 245void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
246 lt_t timeout); 246 lt_t timeout, unsigned int id, event_type_t type);
247bool gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now); 247int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
248struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env); 248struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
249//struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); 249struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
250struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when); 250struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
251struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env, 251struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
252 unsigned int id); 252 unsigned int id);
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
index ec04626b43ec..07d572833b30 100644
--- a/litmus/bank_proc.c
+++ b/litmus/bank_proc.c
@@ -88,7 +88,7 @@ void add_page_to_color_list(struct page *page)
88 88
89static int do_add_pages(void) 89static int do_add_pages(void)
90{ 90{
91 printk("LITMUS do add pages\n"); 91 //printk("LITMUS do add pages\n");
92 92
93 struct page *page, *page_tmp; 93 struct page *page, *page_tmp;
94 LIST_HEAD(free_later); 94 LIST_HEAD(free_later);
@@ -122,6 +122,7 @@ out:
122 return ret; 122 return ret;
123} 123}
124 124
125extern int l2_usable_sets;
125 126
126/* 127/*
127 * provide pages for replacement 128 * provide pages for replacement
@@ -131,9 +132,10 @@ out:
131 * node = 3 for Level A, B tasks in Cpu 3 132 * node = 3 for Level A, B tasks in Cpu 3
132 * node = 4 for Level C tasks 133 * node = 4 for Level C tasks
133 */ 134 */
135#if 1
134struct page *new_alloc_page(struct page *page, unsigned long node, int **x) 136struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
135{ 137{
136 printk("allocate new page node = %d\n", node); 138 //printk("allocate new page node = %d\n", node);
137// return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 139// return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
138 struct color_group *cgroup; 140 struct color_group *cgroup;
139 struct page *rPage = NULL; 141 struct page *rPage = NULL;
@@ -155,7 +157,20 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
155 goto out; 157 goto out;
156 } 158 }
157 */ 159 */
158 160 switch(node ){
161 case 0:
162 color = (color % l2_usable_sets);
163 break;
164 case 1:
165 case 2:
166 case 3:
167 case 4:
168 color = (color% (16-l2_usable_sets)) + l2_usable_sets;
169 break;
170 default:
171 goto out;
172 }
173 /*
159 switch(node ){ 174 switch(node ){
160 case 0: 175 case 0:
161 case 1: 176 case 1:
@@ -171,14 +186,15 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
171 default: 186 default:
172 goto out; 187 goto out;
173 } 188 }
189 */
174 190
175 191 //printk("allocate new page color = %d\n", color);
176 printk("allocate new page color = %d\n", color); 192 //TRACE("allocate new page color = %d\n", color);
177 193
178 cgroup = &color_groups[color]; 194 cgroup = &color_groups[color];
179 spin_lock(&cgroup->lock); 195 spin_lock(&cgroup->lock);
180 if (unlikely(!atomic_read(&cgroup->nr_pages))) { 196 if (unlikely(!atomic_read(&cgroup->nr_pages))) {
181 TRACE_CUR("No free %lu colored pages.\n", color); 197 //TRACE_CUR("No free %lu colored pages.\n", color);
182 printk(KERN_WARNING "no free %lu colored pages.\n", color); 198 printk(KERN_WARNING "no free %lu colored pages.\n", color);
183 goto out_unlock; 199 goto out_unlock;
184 } 200 }
@@ -194,6 +210,7 @@ out:
194 do_add_pages(); 210 do_add_pages();
195 return rPage; 211 return rPage;
196} 212}
213#endif
197 214
198struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x) 215struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x)
199{ 216{
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index cc818b9f1fc4..7b48d5cf2b98 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -49,6 +49,9 @@ struct mutex lockdown_proc;
49 49
50static int min_usable_ways = 0; 50static int min_usable_ways = 0;
51static int max_usable_ways = 16; 51static int max_usable_ways = 16;
52static int min_usable_sets = 1;
53static int max_usable_sets = 15;
54
52static int zero = 0; 55static int zero = 0;
53static int one = 1; 56static int one = 1;
54 57
@@ -60,6 +63,7 @@ static int one = 1;
60 __cpu * L2X0_LOCKDOWN_STRIDE; __v; }) 63 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
61 64
62int l2_usable_ways; 65int l2_usable_ways;
66int l2_usable_sets;
63int lock_all; 67int lock_all;
64int nr_lockregs; 68int nr_lockregs;
65 69
@@ -176,6 +180,7 @@ int l2_usable_ways_handler(struct ctl_table *table, int write, void __user *buff
176 goto out; 180 goto out;
177 181
178 TRACE_CUR("l2_usable_ways : %d\n", l2_usable_ways); 182 TRACE_CUR("l2_usable_ways : %d\n", l2_usable_ways);
183 printk("l2_usable_ways : %d\n", l2_usable_ways);
179 184
180 if (write) { 185 if (write) {
181 //for (i = 0; i < nr_lockregs; i++) { 186 //for (i = 0; i < nr_lockregs; i++) {
@@ -190,6 +195,31 @@ out:
190 return ret; 195 return ret;
191} 196}
192 197
198int l2_usable_sets_handler(struct ctl_table *table, int write, void __user *buffer,
199 size_t *lenp, loff_t *ppos)
200{
201 int ret = 0;
202
203 mutex_lock(&lockdown_proc);
204
205 flush_cache_all();
206
207 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
208 if (ret)
209 goto out;
210
211 TRACE_CUR("l2_usable_sets : %d\n", l2_usable_sets);
212 printk("l2_usable_sets : %d\n", l2_usable_sets);
213
214 if (write) {
215 ;
216 }
217
218out:
219 mutex_unlock(&lockdown_proc);
220 return ret;
221}
222
193static struct ctl_table cache_table[] = 223static struct ctl_table cache_table[] =
194{ 224{
195 { 225 {
@@ -202,6 +232,15 @@ static struct ctl_table cache_table[] =
202 .extra2 = &max_usable_ways, 232 .extra2 = &max_usable_ways,
203 }, 233 },
204 { 234 {
235 .procname = "l2_usable_sets",
236 .mode = 0666,
237 .proc_handler = l2_usable_sets_handler,
238 .data = &l2_usable_sets,
239 .maxlen = sizeof(l2_usable_sets),
240 .extra1 = &min_usable_sets,
241 .extra2 = &max_usable_sets,
242 },
243 {
205 .procname = "lock_all", 244 .procname = "lock_all",
206 .mode = 0666, 245 .mode = 0666,
207 .proc_handler = lock_all_handler, 246 .proc_handler = lock_all_handler,
@@ -237,6 +276,7 @@ static int __init litmus_sysctl_init(void)
237 } 276 }
238 277
239 l2_usable_ways = 16; 278 l2_usable_ways = 16;
279 l2_usable_sets = 5;
240 280
241out: 281out:
242 return ret; 282 return ret;
diff --git a/litmus/jobs.c b/litmus/jobs.c
index e523e29ea8a6..547222c3387a 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -45,7 +45,6 @@ void release_at(struct task_struct *t, lt_t start)
45{ 45{
46 BUG_ON(!t); 46 BUG_ON(!t);
47 setup_release(t, start); 47 setup_release(t, start);
48 TRACE("RELEASE!!\n");
49 tsk_rt(t)->completed = 0; 48 tsk_rt(t)->completed = 0;
50} 49}
51 50
diff --git a/litmus/litmus.c b/litmus/litmus.c
index dcb9ed58962c..4ff840dcb309 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -392,14 +392,16 @@ static struct page *walk_page_table(unsigned long addr)
392extern int isolate_lru_page(struct page *page); 392extern int isolate_lru_page(struct page *page);
393extern void putback_lru_page(struct page *page); 393extern void putback_lru_page(struct page *page);
394 394
395extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
396
397#if 0 395#if 0
398static struct page *new_alloc_page(struct page *page, unsigned long node, int **x) 396static struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
399{ 397{
400 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); 398 return alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
401} 399}
400#else
401extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
402
402#endif 403#endif
404
403asmlinkage long sys_set_page_color(int cpu) 405asmlinkage long sys_set_page_color(int cpu)
404{ 406{
405 long ret = 0; 407 long ret = 0;
@@ -408,16 +410,18 @@ asmlinkage long sys_set_page_color(int cpu)
408 struct page *page_itr = NULL; 410 struct page *page_itr = NULL;
409 struct vm_area_struct *vma_itr = NULL; 411 struct vm_area_struct *vma_itr = NULL;
410 //struct task_page *entry = NULL; 412 //struct task_page *entry = NULL;
411 int nr_pages = 0, nr_shared_pages = 0; 413 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0;
414 unsigned long node;
415
412 LIST_HEAD(pagelist); 416 LIST_HEAD(pagelist);
413 LIST_HEAD(shared_pagelist); 417 LIST_HEAD(shared_pagelist);
414 418
415 down_read(&current->mm->mmap_sem); 419 down_read(&current->mm->mmap_sem);
416 printk(KERN_INFO "SYSCALL set_page_color\n"); 420 TRACE_TASK(current, "SYSCALL set_page_color\n");
417 vma_itr = current->mm->mmap; 421 vma_itr = current->mm->mmap;
418 while (vma_itr != NULL) { 422 while (vma_itr != NULL) {
419 unsigned int num_pages = 0, i; 423 unsigned int num_pages = 0, i;
420 struct page *new_page = NULL, *old_page = NULL; 424 struct page *old_page = NULL;
421 /* 425 /*
422 entry = kmalloc(sizeof(struct task_page), GFP_ATOMIC); 426 entry = kmalloc(sizeof(struct task_page), GFP_ATOMIC);
423 if (entry == NULL) { 427 if (entry == NULL) {
@@ -428,8 +432,8 @@ asmlinkage long sys_set_page_color(int cpu)
428 */ 432 */
429 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; 433 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
430 // print vma flags 434 // print vma flags
431 printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); 435 //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags);
432 printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); 436 //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE);
433 437
434 for (i = 0; i < num_pages; i++) { 438 for (i = 0; i < num_pages; i++) {
435/* 439/*
@@ -447,19 +451,22 @@ asmlinkage long sys_set_page_color(int cpu)
447 continue; 451 continue;
448 452
449 if (PageReserved(old_page)) { 453 if (PageReserved(old_page)) {
454 TRACE("Reserved Page!\n");
450 put_page(old_page); 455 put_page(old_page);
451 continue; 456 continue;
452 } 457 }
453 458
454 printk(KERN_INFO "addr: %lu, pfn: %lu, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); 459 TRACE_TASK(current, "addr: %lu, pfn: %lu, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page));
455 460
456 if (page_mapcount(old_page) == 1) { 461 if (page_mapcount(old_page) != 0) {
457 ret = isolate_lru_page(old_page); 462 ret = isolate_lru_page(old_page);
458 //if (pfn_valid(__page_to_pfn(old_page)))
459 if (!ret) { 463 if (!ret) {
460 list_add_tail(&old_page->lru, &pagelist); 464 list_add_tail(&old_page->lru, &pagelist);
461 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 465 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
462 nr_pages++; 466 nr_pages++;
467 } else {
468 TRACE_TASK(current, "isolate_lru_page failed\n");
469 nr_failed++;
463 } 470 }
464 put_page(old_page); 471 put_page(old_page);
465 } 472 }
@@ -468,8 +475,9 @@ asmlinkage long sys_set_page_color(int cpu)
468 if (!ret) { 475 if (!ret) {
469 list_add_tail(&old_page->lru, &shared_pagelist); 476 list_add_tail(&old_page->lru, &shared_pagelist);
470 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 477 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
471 nr_shared_pages++; 478
472 } 479 }
480 nr_shared_pages++;
473 put_page(old_page); 481 put_page(old_page);
474 } 482 }
475 } 483 }
@@ -485,10 +493,15 @@ asmlinkage long sys_set_page_color(int cpu)
485// } 493// }
486 494
487 ret = 0; 495 ret = 0;
496 if (cpu == -1)
497 node = 4;
498 else
499 node = cpu;
500
488 if (!list_empty(&pagelist)) { 501 if (!list_empty(&pagelist)) {
489 ret = migrate_pages(&pagelist, new_alloc_page, 4, MIGRATE_ASYNC, MR_SYSCALL); 502 ret = migrate_pages(&pagelist, new_alloc_page, node, MIGRATE_ASYNC, MR_SYSCALL);
503 TRACE_TASK(current, "%ld pages not migrated.\n", ret);
490 if (ret) { 504 if (ret) {
491 printk(KERN_INFO "%ld pages not migrated.\n", ret);
492 putback_lru_pages(&pagelist); 505 putback_lru_pages(&pagelist);
493 } 506 }
494 } 507 }
@@ -507,7 +520,7 @@ asmlinkage long sys_set_page_color(int cpu)
507 up_read(&current->mm->mmap_sem); 520 up_read(&current->mm->mmap_sem);
508 521
509 list_for_each_entry(page_itr, &shared_pagelist, lru) { 522 list_for_each_entry(page_itr, &shared_pagelist, lru) {
510 printk(KERN_INFO "S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr)); 523 TRACE("S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr));
511 } 524 }
512 525
513/* 526/*
@@ -517,7 +530,7 @@ asmlinkage long sys_set_page_color(int cpu)
517 kfree(task_page_itr); 530 kfree(task_page_itr);
518 } 531 }
519*/ 532*/
520 printk(KERN_INFO "nr_pages = %d\n", nr_pages); 533 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed);
521 return ret; 534 return ret;
522} 535}
523 536
@@ -888,6 +901,7 @@ static int __init _init_litmus(void)
888#endif 901#endif
889 902
890 color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1); 903 color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1);
904 printk("Page color mask %08x\n", color_mask);
891 return 0; 905 return 0;
892} 906}
893 907
diff --git a/litmus/reservation.c b/litmus/reservation.c
index e30892c72f4a..b0b13a9916ef 100644
--- a/litmus/reservation.c
+++ b/litmus/reservation.c
@@ -348,7 +348,7 @@ struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
348 return NULL; 348 return NULL;
349} 349}
350 350
351/* 351
352struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, 352struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
353 unsigned int id) 353 unsigned int id)
354{ 354{
@@ -361,7 +361,7 @@ struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment
361 361
362 return NULL; 362 return NULL;
363} 363}
364*/ 364
365 365
366struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, 366struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
367 lt_t when) 367 lt_t when)
@@ -415,9 +415,9 @@ static void gmp_scheduler_update_at(
415*/ 415*/
416#define TIMER_RESOLUTION 100000L 416#define TIMER_RESOLUTION 100000L
417 417
418static void gmp_scheduler_update_at( 418static void gmp_add_event(
419 struct gmp_reservation_environment* gmp_env, 419 struct gmp_reservation_environment* gmp_env,
420 lt_t when) 420 lt_t when, unsigned int id, event_type_t type)
421{ 421{
422 struct next_timer_event *nevent, *queued; 422 struct next_timer_event *nevent, *queued;
423 struct list_head *pos; 423 struct list_head *pos;
@@ -426,11 +426,13 @@ static void gmp_scheduler_update_at(
426 //when = div64_u64(when, TIMER_RESOLUTION); 426 //when = div64_u64(when, TIMER_RESOLUTION);
427 //when *= TIMER_RESOLUTION; 427 //when *= TIMER_RESOLUTION;
428 428
429 nevent = gmp_find_event_by_time(gmp_env, when); 429 nevent = gmp_find_event_by_id(gmp_env, id);
430 430
431 if (!nevent) { 431 if (!nevent) {
432 nevent = kzalloc(sizeof(*nevent), GFP_KERNEL); 432 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
433 nevent->next_update = when; 433 nevent->next_update = when;
434 nevent->id = id;
435 nevent->type = type;
434 nevent->timer_armed_on = NO_CPU; 436 nevent->timer_armed_on = NO_CPU;
435 437
436 list_for_each(pos, &gmp_env->next_events) { 438 list_for_each(pos, &gmp_env->next_events) {
@@ -448,14 +450,14 @@ static void gmp_scheduler_update_at(
448 TRACE("NEXT_EVENT ADDED at %llu ADDED at HEAD\n", nevent->next_update); 450 TRACE("NEXT_EVENT ADDED at %llu ADDED at HEAD\n", nevent->next_update);
449 } 451 }
450 } else { 452 } else {
451 ; //TRACE("EVENT FOUND at %llu, NEW EVENT %llu\n", nevent->next_update, when); 453 TRACE("EVENT FOUND type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->type, nevent->next_update, type, when);
452 } 454 }
453} 455}
454 456
455void gmp_scheduler_update_after( 457void gmp_add_event_after(
456 struct gmp_reservation_environment* gmp_env, lt_t timeout) 458 struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
457{ 459{
458 gmp_scheduler_update_at(gmp_env, gmp_env->env.current_time + timeout); 460 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
459} 461}
460 462
461static void gmp_queue_depleted( 463static void gmp_queue_depleted(
@@ -468,7 +470,7 @@ static void gmp_queue_depleted(
468 470
469 list_for_each(pos, &gmp_env->depleted_reservations) { 471 list_for_each(pos, &gmp_env->depleted_reservations) {
470 queued = list_entry(pos, struct reservation, list); 472 queued = list_entry(pos, struct reservation, list);
471 if (queued->next_replenishment > res->next_replenishment) { 473 if (queued && queued->next_replenishment > res->next_replenishment) {
472 list_add(&res->list, pos->prev); 474 list_add(&res->list, pos->prev);
473 found = 1; 475 found = 1;
474 } 476 }
@@ -477,7 +479,7 @@ static void gmp_queue_depleted(
477 if (!found) 479 if (!found)
478 list_add_tail(&res->list, &gmp_env->depleted_reservations); 480 list_add_tail(&res->list, &gmp_env->depleted_reservations);
479 481
480 gmp_scheduler_update_at(gmp_env, res->next_replenishment); 482 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
481} 483}
482 484
483static void gmp_queue_active( 485static void gmp_queue_active(
@@ -502,16 +504,20 @@ static void gmp_queue_active(
502 list_add_tail(&res->list, &gmp_env->active_reservations); 504 list_add_tail(&res->list, &gmp_env->active_reservations);
503 505
504 /* check for possible preemption */ 506 /* check for possible preemption */
505 if (res->state == RESERVATION_ACTIVE && !check_preempt) 507 if (res->state == RESERVATION_ACTIVE && check_preempt)
506 gmp_env->schedule_now = true; 508 gmp_env->schedule_now++;
507 509
508 gmp_scheduler_update_after(gmp_env, res->cur_budget); 510 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
509} 511}
510 512
511static void gmp_queue_reservation( 513static void gmp_queue_reservation(
512 struct gmp_reservation_environment* gmp_env, 514 struct gmp_reservation_environment* gmp_env,
513 struct reservation *res) 515 struct reservation *res)
514{ 516{
517 if (res == NULL) {
518 BUG();
519 return;
520 }
515 switch (res->state) { 521 switch (res->state) {
516 case RESERVATION_INACTIVE: 522 case RESERVATION_INACTIVE:
517 list_add(&res->list, &gmp_env->inactive_reservations); 523 list_add(&res->list, &gmp_env->inactive_reservations);
@@ -572,7 +578,7 @@ static void gmp_charge_budget(
572 * its remaining budget */ 578 * its remaining budget */
573 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", 579 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n",
574 res->id, res->cur_budget); 580 res->id, res->cur_budget);
575 gmp_scheduler_update_after(gmp_env, res->cur_budget); 581 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
576 } 582 }
577 //if (encountered_active == 2) 583 //if (encountered_active == 2)
578 /* stop at the first ACTIVE reservation */ 584 /* stop at the first ACTIVE reservation */
@@ -601,26 +607,22 @@ static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
601 res = list_first_entry_or_null(&gmp_env->depleted_reservations, 607 res = list_first_entry_or_null(&gmp_env->depleted_reservations,
602 struct reservation, list); 608 struct reservation, list);
603 if (res) 609 if (res)
604 gmp_scheduler_update_at(gmp_env, res->next_replenishment); 610 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
605} 611}
606 612
607/* return schedule_now */ 613/* return schedule_now */
608bool gmp_update_time( 614int gmp_update_time(
609 struct gmp_reservation_environment* gmp_env, 615 struct gmp_reservation_environment* gmp_env,
610 lt_t now) 616 lt_t now)
611{ 617{
612 lt_t delta; 618 lt_t delta;
613 619
614 if (!gmp_env) {
615 TRACE("BUG****************************************\n");
616 return false;
617 }
618 /* If the time didn't advance, there is nothing to do. 620 /* If the time didn't advance, there is nothing to do.
619 * This check makes it safe to call sup_advance_time() potentially 621 * This check makes it safe to call sup_advance_time() potentially
620 * multiple times (e.g., via different code paths. */ 622 * multiple times (e.g., via different code paths. */
621 //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); 623 //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time);
622 if (unlikely(now <= gmp_env->env.current_time)) 624 if (unlikely(now <= gmp_env->env.current_time))
623 return gmp_env->schedule_now; 625 return min(gmp_env->schedule_now, NR_CPUS);
624 626
625 delta = now - gmp_env->env.current_time; 627 delta = now - gmp_env->env.current_time;
626 gmp_env->env.current_time = now; 628 gmp_env->env.current_time = now;
@@ -634,7 +636,7 @@ bool gmp_update_time(
634 //TRACE("REPLENISH###\n"); 636 //TRACE("REPLENISH###\n");
635 gmp_replenish_budgets(gmp_env); 637 gmp_replenish_budgets(gmp_env);
636 638
637 return gmp_env->schedule_now; 639 return min(gmp_env->schedule_now, NR_CPUS);
638} 640}
639 641
640static void gmp_res_change_state( 642static void gmp_res_change_state(
@@ -652,7 +654,7 @@ static void gmp_res_change_state(
652 list_del(&res->list); 654 list_del(&res->list);
653 /* check if we need to reschedule because we lost an active reservation */ 655 /* check if we need to reschedule because we lost an active reservation */
654 if (res->state == RESERVATION_ACTIVE && !gmp_env->will_schedule) 656 if (res->state == RESERVATION_ACTIVE && !gmp_env->will_schedule)
655 gmp_env->schedule_now = true; 657 gmp_env->schedule_now++;
656 res->state = new_state; 658 res->state = new_state;
657 gmp_queue_reservation(gmp_env, res); 659 gmp_queue_reservation(gmp_env, res);
658} 660}
@@ -668,7 +670,7 @@ void gmp_init(struct gmp_reservation_environment* gmp_env)
668 670
669 gmp_env->env.change_state = gmp_res_change_state; 671 gmp_env->env.change_state = gmp_res_change_state;
670 672
671 gmp_env->schedule_now = false; 673 gmp_env->schedule_now = 0;
672 gmp_env->will_schedule = false; 674 gmp_env->will_schedule = false;
673 675
674 raw_spin_lock_init(&gmp_env->lock); 676 raw_spin_lock_init(&gmp_env->lock);
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 6dee1ec2c99c..79fecd49080a 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -1,3 +1,12 @@
1/*
2 * litmus/sched_mc2.c
3 *
4 * Implementation of the Mixed-Criticality on MultiCore scheduler
5 *
6 * Thus plugin implements a scheduling algorithm proposed in
7 * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper.
8 */
9
1#include <linux/percpu.h> 10#include <linux/percpu.h>
2#include <linux/slab.h> 11#include <linux/slab.h>
3#include <asm/uaccess.h> 12#include <asm/uaccess.h>
@@ -10,21 +19,29 @@
10#include <litmus/jobs.h> 19#include <litmus/jobs.h>
11#include <litmus/budget.h> 20#include <litmus/budget.h>
12#include <litmus/litmus_proc.h> 21#include <litmus/litmus_proc.h>
22#include <litmus/sched_trace.h>
13 23
14#include <litmus/mc2_common.h> 24#include <litmus/mc2_common.h>
15#include <litmus/reservation.h> 25#include <litmus/reservation.h>
16#include <litmus/polling_reservations.h> 26#include <litmus/polling_reservations.h>
17 27
28/* _global_env - reservation container for level-C tasks*/
18struct gmp_reservation_environment _global_env; 29struct gmp_reservation_environment _global_env;
19 30
31/* cpu_entry - keep track of a running task on a cpu
32 * This state is used to decide the lowest priority cpu
33 */
20struct cpu_entry { 34struct cpu_entry {
21 struct task_struct *scheduled; 35 struct task_struct *scheduled;
22 lt_t deadline; 36 lt_t deadline;
23 int cpu; 37 int cpu;
24 enum crit_level lv; 38 enum crit_level lv;
39 /* if will_schedule is true, this cpu is already selected and
40 call mc2_schedule() soon. */
25 bool will_schedule; 41 bool will_schedule;
26}; 42};
27 43
44/* cpu_priority - a global state for choosing the lowest priority CPU */
28struct cpu_priority { 45struct cpu_priority {
29 raw_spinlock_t lock; 46 raw_spinlock_t lock;
30 struct cpu_entry cpu_entries[NR_CPUS]; 47 struct cpu_entry cpu_entries[NR_CPUS];
@@ -32,19 +49,26 @@ struct cpu_priority {
32 49
33struct cpu_priority _lowest_prio_cpu; 50struct cpu_priority _lowest_prio_cpu;
34 51
52/* mc2_task_state - a task state structure */
35struct mc2_task_state { 53struct mc2_task_state {
36 struct task_client res_info; 54 struct task_client res_info;
55 /* if cpu == -1, this task is a global task (level C) */
37 int cpu; 56 int cpu;
38 bool has_departed; 57 bool has_departed;
39 struct mc2_task mc2_param; 58 struct mc2_task mc2_param;
40}; 59};
41 60
61/* crit_entry - maintain the logically running job (ghost job) */
42struct crit_entry { 62struct crit_entry {
43 enum crit_level level; 63 enum crit_level level;
44 struct task_struct *running; 64 struct task_struct *running;
45 //struct hrtimer ghost_timer; 65 //struct hrtimer ghost_timer;
46}; 66};
47 67
68/* mc2_cpu_state - maintain the scheduled state and ghost jobs
69 * timer : timer for partitioned tasks (level A and B)
70 * g_timer : timer for global tasks (level C)
71 */
48struct mc2_cpu_state { 72struct mc2_cpu_state {
49 raw_spinlock_t lock; 73 raw_spinlock_t lock;
50 74
@@ -62,13 +86,22 @@ static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
62#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) 86#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id))
63#define local_cpu_state() (&__get_cpu_var(mc2_cpu_state)) 87#define local_cpu_state() (&__get_cpu_var(mc2_cpu_state))
64 88
89/* get_mc2_state - get the task's state */
65static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) 90static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
66{ 91{
67 return (struct mc2_task_state*) tsk_rt(tsk)->plugin_state; 92 struct mc2_task_state* tinfo;
93
94 tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state;
95
96 if (tinfo)
97 return tinfo;
98 else
99 return NULL;
68} 100}
101
102/* get_task_crit_level - return the criticaility level of a task */
69static enum crit_level get_task_crit_level(struct task_struct *tsk) 103static enum crit_level get_task_crit_level(struct task_struct *tsk)
70{ 104{
71 //struct mc2_task_state *tinfo = get_mc2_state(tsk);
72 struct mc2_task *mp; 105 struct mc2_task *mp;
73 106
74 if (!tsk || !is_realtime(tsk)) 107 if (!tsk || !is_realtime(tsk))
@@ -82,7 +115,9 @@ static enum crit_level get_task_crit_level(struct task_struct *tsk)
82 return mp->crit; 115 return mp->crit;
83} 116}
84 117
85static struct reservation* res_find_by_id(struct mc2_cpu_state *state, unsigned int id) 118/* res_find_by_id - find reservation by id */
119static struct reservation* res_find_by_id(struct mc2_cpu_state *state,
120 unsigned int id)
86{ 121{
87 struct reservation *res; 122 struct reservation *res;
88 123
@@ -93,7 +128,12 @@ static struct reservation* res_find_by_id(struct mc2_cpu_state *state, unsigned
93 return res; 128 return res;
94} 129}
95 130
96static void mc2_update_time(enum crit_level lv, struct mc2_cpu_state *state, lt_t time) 131/* mc2_update_time - update time for a given criticality level.
132 * caller must hold a proper lock
133 * (cpu_state lock or global lock)
134 */
135static void mc2_update_time(enum crit_level lv,
136 struct mc2_cpu_state *state, lt_t time)
97{ 137{
98 if (lv < CRIT_LEVEL_C) 138 if (lv < CRIT_LEVEL_C)
99 sup_update_time(&state->sup_env, time); 139 sup_update_time(&state->sup_env, time);
@@ -103,6 +143,12 @@ static void mc2_update_time(enum crit_level lv, struct mc2_cpu_state *state, lt_
103 TRACE("update_time(): Criticality level error!!!!\n"); 143 TRACE("update_time(): Criticality level error!!!!\n");
104} 144}
105 145
146/* task_depart - remove a task from its reservation
147 * If the job has remaining budget, convert it to a ghost job
148 * and update crit_entries[]
149 *
150 * @job_complete indicate whether job completes or not
151 */
106static void task_departs(struct task_struct *tsk, int job_complete) 152static void task_departs(struct task_struct *tsk, int job_complete)
107{ 153{
108 struct mc2_task_state* tinfo = get_mc2_state(tsk); 154 struct mc2_task_state* tinfo = get_mc2_state(tsk);
@@ -110,28 +156,30 @@ static void task_departs(struct task_struct *tsk, int job_complete)
110 struct reservation* res; 156 struct reservation* res;
111 struct reservation_client *client; 157 struct reservation_client *client;
112 158
159 BUG_ON(!is_realtime(tsk));
160
113 res = tinfo->res_info.client.reservation; 161 res = tinfo->res_info.client.reservation;
114 client = &tinfo->res_info.client; 162 client = &tinfo->res_info.client;
115 163
116 res->ops->client_departs(res, client, job_complete); 164 res->ops->client_departs(res, client, job_complete);
117 tinfo->has_departed = true; 165 tinfo->has_departed = true;
118 TRACE_TASK(tsk, "CLIENT DEPART with budget %llu\n", res->cur_budget); 166 TRACE_TASK(tsk, "CLIENT DEPART with budget %llu\n", res->cur_budget);
167
119 if (job_complete && res->cur_budget) { 168 if (job_complete && res->cur_budget) {
120 struct crit_entry* ce; 169 struct crit_entry* ce;
121 enum crit_level lv = tinfo->mc2_param.crit; 170 enum crit_level lv = tinfo->mc2_param.crit;
122 //lt_t now = litmus_clock();
123 171
124 ce = &state->crit_entries[lv]; 172 ce = &state->crit_entries[lv];
125 ce->running = tsk; 173 ce->running = tsk;
126 res->is_ghost = 1; 174 res->is_ghost = 1;
127 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); 175 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock());
128 176
129 //BUG_ON(hrtimer_active(&ce->ghost_timer));
130 //TRACE("setting GHOST timer %llu\n", ns_to_ktime(now + res->cur_budget));
131 //__hrtimer_start_range_ns(&ce->ghost_timer, ns_to_ktime(now + res->cur_budget), 0, HRTIMER_MODE_ABS_PINNED, 0);
132 } 177 }
133} 178}
134 179
180/* task_arrive - put a task into its reservation
181 * If the job was a ghost job, remove it from crit_entries[]
182 */
135static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) 183static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
136{ 184{
137 struct mc2_task_state* tinfo = get_mc2_state(tsk); 185 struct mc2_task_state* tinfo = get_mc2_state(tsk);
@@ -145,15 +193,22 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
145 tinfo->has_departed = false; 193 tinfo->has_departed = false;
146 res->ops->client_arrives(res, client); 194 res->ops->client_arrives(res, client);
147 195
196 sched_trace_task_release(tsk);
197
148 if (lv != NUM_CRIT_LEVELS) { 198 if (lv != NUM_CRIT_LEVELS) {
149 struct crit_entry *ce; 199 struct crit_entry *ce;
150 ce = &state->crit_entries[lv]; 200 ce = &state->crit_entries[lv];
201 /* if the currrent task is a ghost job, remove it */
151 if (ce->running == tsk) 202 if (ce->running == tsk)
152 ce->running = NULL; 203 ce->running = NULL;
153 } 204 }
154} 205}
155 206
156/* return: NO_CPU - all CPUs are running tasks with higher priority than Level C */ 207/* get_lowest_prio_cpu - return the lowest priority cpu
208 * This will be used for scheduling level-C tasks.
209 * If all CPUs are running tasks which has
210 * higher priority than level C, return NO_CPU.
211 */
157static int get_lowest_prio_cpu(void) 212static int get_lowest_prio_cpu(void)
158{ 213{
159 struct cpu_entry *ce; 214 struct cpu_entry *ce;
@@ -163,11 +218,15 @@ static int get_lowest_prio_cpu(void)
163 raw_spin_lock(&_lowest_prio_cpu.lock); 218 raw_spin_lock(&_lowest_prio_cpu.lock);
164 for_each_online_cpu(cpu) { 219 for_each_online_cpu(cpu) {
165 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 220 ce = &_lowest_prio_cpu.cpu_entries[cpu];
221 /* If a CPU will call schedule() in the near future, we don't
222 return that CPU. */
166 if (!ce->will_schedule) { 223 if (!ce->will_schedule) {
167 if (!ce->scheduled) { 224 if (!ce->scheduled) {
225 /* Idle cpu, return this. */
168 raw_spin_unlock(&_lowest_prio_cpu.lock); 226 raw_spin_unlock(&_lowest_prio_cpu.lock);
169 return ce->cpu; 227 return ce->cpu;
170 } else if (ce->lv == CRIT_LEVEL_C && ce->deadline > latest_deadline) { 228 } else if (ce->lv == CRIT_LEVEL_C &&
229 ce->deadline > latest_deadline) {
171 latest_deadline = ce->deadline; 230 latest_deadline = ce->deadline;
172 ret = ce->cpu; 231 ret = ce->cpu;
173 } 232 }
@@ -180,6 +239,14 @@ static int get_lowest_prio_cpu(void)
180} 239}
181 240
182/* NOTE: drops state->lock */ 241/* NOTE: drops state->lock */
242/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock
243 * Whenever res_env.current_time is updated,
244 * we check next_scheduler_update and set
245 * a timer.
246 * If there exist a global event which is
247 * not armed on any CPU and g_timer is not
248 * active, set a g_timer for that event.
249 */
183static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) 250static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
184{ 251{
185 int local; 252 int local;
@@ -211,7 +278,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
211 /* Reprogram only if not already set correctly. */ 278 /* Reprogram only if not already set correctly. */
212 if (!hrtimer_active(&state->timer) || 279 if (!hrtimer_active(&state->timer) ||
213 ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { 280 ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
214 TRACE("canceling timer...at %llu\n", ktime_to_ns(hrtimer_get_expires(&state->timer))); 281 TRACE("canceling timer...at %llu\n",
282 ktime_to_ns(hrtimer_get_expires(&state->timer)));
215 hrtimer_cancel(&state->timer); 283 hrtimer_cancel(&state->timer);
216 TRACE("setting scheduler timer for %llu\n", update); 284 TRACE("setting scheduler timer for %llu\n", update);
217 /* We cannot use hrtimer_start() here because the 285 /* We cannot use hrtimer_start() here because the
@@ -246,7 +314,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
246 raw_spin_lock(&_global_env.lock); 314 raw_spin_lock(&_global_env.lock);
247 list_for_each_entry_safe(event, next, &_global_env.next_events, list) { 315 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
248 if (event->timer_armed_on == NO_CPU) { 316 if (event->timer_armed_on == NO_CPU) {
249 found_event = 1; 317 /* If the event time is already passed, we call schedule() on
318 the lowest priority cpu */
250 if (event->next_update < litmus_clock()) { 319 if (event->next_update < litmus_clock()) {
251 int cpu = get_lowest_prio_cpu(); 320 int cpu = get_lowest_prio_cpu();
252 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); 321 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
@@ -260,7 +329,12 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
260 } 329 }
261 } else if (!hrtimer_active(&state->g_timer)) { 330 } else if (!hrtimer_active(&state->g_timer)) {
262 int ret; 331 int ret;
263 TRACE("setting global scheduler timer for %llu\n", event->next_update); 332
333 raw_spin_unlock(&_global_env.lock);
334 found_event = 1;
335
336 TRACE("setting global scheduler timer for %llu\n",
337 event->next_update);
264 ret = __hrtimer_start_range_ns(&state->g_timer, 338 ret = __hrtimer_start_range_ns(&state->g_timer,
265 ns_to_ktime(event->next_update), 339 ns_to_ktime(event->next_update),
266 0 /* timer coalescing slack */, 340 0 /* timer coalescing slack */,
@@ -268,74 +342,76 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
268 0 /* wakeup */); 342 0 /* wakeup */);
269 if (!ret) { 343 if (!ret) {
270 event->timer_armed_on = state->cpu; 344 event->timer_armed_on = state->cpu;
345 break;
271 } 346 }
272 } 347 }
273 } 348 }
274 } 349 }
275 raw_spin_unlock(&_global_env.lock); 350 if (found_event == 0)
351 raw_spin_unlock(&_global_env.lock);
276} 352}
277 353
354/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
355 * If the budget of a ghost is exhausted,
356 * clear is_ghost and reschedule
357 */
278static void mc2_update_ghost_state(struct mc2_cpu_state *state) 358static void mc2_update_ghost_state(struct mc2_cpu_state *state)
279{ 359{
280 int lv = 0; 360 int lv = 0;
281 struct crit_entry* ce; 361 struct crit_entry* ce;
282 struct reservation *res; 362 struct reservation *res;
283 struct mc2_task_state *tinfo; 363 struct mc2_task_state *tinfo;
364
365 BUG_ON(!state);
284 366
285 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { 367 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
286 ce = &state->crit_entries[lv]; 368 ce = &state->crit_entries[lv];
287 if (ce->running != NULL) { 369 if (ce->running != NULL) {
288 tinfo = get_mc2_state(ce->running); 370 tinfo = get_mc2_state(ce->running);
289 /* 371 if (!tinfo)
290 if (lv != CRIT_LEVEL_C)
291 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
292 else
293 continue; 372 continue;
294 */ 373
295 res = res_find_by_id(state, tinfo->mc2_param.res_id); 374 res = res_find_by_id(state, tinfo->mc2_param.res_id);
296 TRACE("LV %d running id %d budget %llu\n", lv, tinfo->mc2_param.res_id, res->cur_budget); 375 TRACE("LV %d running id %d budget %llu\n",
376 lv, tinfo->mc2_param.res_id, res->cur_budget);
377 /* If the budget is exhausted, clear is_ghost and reschedule */
297 if (!res->cur_budget) { 378 if (!res->cur_budget) {
298 struct sup_reservation_environment* sup_env = &state->sup_env; 379 struct sup_reservation_environment* sup_env = &state->sup_env;
299 380
300 TRACE("GHOST FINISH id %d at %llu\n", tinfo->mc2_param.res_id, litmus_clock()); 381 TRACE("GHOST FINISH id %d at %llu\n",
382 tinfo->mc2_param.res_id, litmus_clock());
301 ce->running = NULL; 383 ce->running = NULL;
302 res->is_ghost = 0; 384 res->is_ghost = 0;
303 res = list_first_entry_or_null(&sup_env->active_reservations, struct reservation, list); 385
304 if (res) 386 if (lv < CRIT_LEVEL_C) {
305 litmus_reschedule_local(); 387 res = list_first_entry_or_null(
388 &sup_env->active_reservations,
389 struct reservation, list);
390 if (res)
391 litmus_reschedule_local();
392 } else {
393 res = list_first_entry_or_null(
394 &_global_env.active_reservations,
395 struct reservation, list);
396 if (res)
397 litmus_reschedule(state->cpu);
398 }
306 } 399 }
307 } 400 }
308 } 401 }
309} 402}
310 403
311/* 404/* update_cpu_prio - Update cpu's priority
312static enum hrtimer_restart on_ghost_timer(struct hrtimer *timer) 405 * When a cpu picks a new task, call this function
313{ 406 * to update cpu priorities.
314 struct crit_entry *ce; 407 */
315 struct mc2_cpu_state *state;
316
317 ce = container_of(timer, struct crit_entry, ghost_timer);
318 state = container_of(ce, struct mc2_cpu_state, crit_entries[ce->level]);
319
320 TRACE("GHOST_TIMER FIRED at %llu\n", litmus_clock());
321
322 raw_spin_lock(&state->lock);
323 sup_update_time(&state->sup_env, litmus_clock());
324 mc2_update_ghost_state(state);
325
326 raw_spin_unlock(&state->lock);
327
328 return HRTIMER_NORESTART;
329}
330*/
331
332static void update_cpu_prio(struct mc2_cpu_state *state) 408static void update_cpu_prio(struct mc2_cpu_state *state)
333{ 409{
334 struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu]; 410 struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu];
335 enum crit_level lv = get_task_crit_level(state->scheduled); 411 enum crit_level lv = get_task_crit_level(state->scheduled);
336 412
337 if (!state->scheduled) { 413 if (!state->scheduled) {
338 // cpu is idle. 414 /* cpu is idle. */
339 ce->scheduled = NULL; 415 ce->scheduled = NULL;
340 ce->deadline = ULLONG_MAX; 416 ce->deadline = ULLONG_MAX;
341 ce->lv = NUM_CRIT_LEVELS; 417 ce->lv = NUM_CRIT_LEVELS;
@@ -344,24 +420,31 @@ static void update_cpu_prio(struct mc2_cpu_state *state)
344 ce->deadline = get_deadline(state->scheduled); 420 ce->deadline = get_deadline(state->scheduled);
345 ce->lv = lv; 421 ce->lv = lv;
346 } else if (lv < CRIT_LEVEL_C) { 422 } else if (lv < CRIT_LEVEL_C) {
423 /* If cpu is running level A or B tasks, it is not eligible
424 to run level-C tasks */
347 ce->scheduled = state->scheduled; 425 ce->scheduled = state->scheduled;
348 ce->deadline = 0; 426 ce->deadline = 0;
349 ce->lv = lv; 427 ce->lv = lv;
350 } 428 }
351}; 429};
352 430
431/* on_global_scheduling_timer - Process the budget accounting (replenish
432 * and charge)
433 */
353static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer) 434static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer)
354{ 435{
355 unsigned long flags; 436 unsigned long flags;
356 enum hrtimer_restart restart = HRTIMER_NORESTART; 437 enum hrtimer_restart restart = HRTIMER_NORESTART;
357 struct mc2_cpu_state *state; 438 struct mc2_cpu_state *state;
358 struct next_timer_event *event, *next; 439 struct next_timer_event *event, *next;
359 bool schedule_now; 440 int schedule_now;
360 lt_t update, now; 441 lt_t update, now;
361 int found_event = 0; 442 int found_event = 0;
362 443
363 state = container_of(timer, struct mc2_cpu_state, g_timer); 444 state = container_of(timer, struct mc2_cpu_state, g_timer);
364 445
446 raw_spin_lock_irqsave(&state->lock, flags);
447
365 /* The scheduling timer should only fire on the local CPU, because 448 /* The scheduling timer should only fire on the local CPU, because
366 * otherwise deadlocks via timer_cancel() are possible. 449 * otherwise deadlocks via timer_cancel() are possible.
367 * Note: this does not interfere with dedicated interrupt handling, as 450 * Note: this does not interfere with dedicated interrupt handling, as
@@ -372,11 +455,13 @@ static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer)
372 if (state->cpu != raw_smp_processor_id()) 455 if (state->cpu != raw_smp_processor_id())
373 TRACE("BUG!!!!!!!!!!!!! TIMER FIRED ON THE OTHER CPU\n"); 456 TRACE("BUG!!!!!!!!!!!!! TIMER FIRED ON THE OTHER CPU\n");
374 457
375 raw_spin_lock_irqsave(&_global_env.lock, flags); 458 raw_spin_lock(&_global_env.lock);
376 459
377 update = litmus_clock(); 460 update = litmus_clock();
378 TRACE("GLOBAL TIMER FIRED at %llu\n", update); 461 TRACE("GLOBAL TIMER FIRED at %llu\n", update);
379 462
463 /* The event can be processed by the other cpus. So, if there is no
464 events to process, we do nothing */
380 list_for_each_entry_safe(event, next, &_global_env.next_events, list) { 465 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
381 if (event->next_update < update) { 466 if (event->next_update < update) {
382 found_event = 1; 467 found_event = 1;
@@ -387,22 +472,21 @@ static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer)
387 } 472 }
388 473
389 if (!found_event) { 474 if (!found_event) {
390 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 475 goto unlock;
391 return restart;
392 } 476 }
393 477
478 /* gmp_update_timer returns how many tasks become ACTIVE */
394 schedule_now = gmp_update_time(&_global_env, update); 479 schedule_now = gmp_update_time(&_global_env, update);
395 480
396 raw_spin_lock(&state->lock);
397 mc2_update_ghost_state(state); 481 mc2_update_ghost_state(state);
398 raw_spin_unlock(&state->lock);
399 482
400 now = _global_env.env.current_time; 483 now = _global_env.env.current_time;
401 484
402 TRACE_CUR("on_global_scheduling_timer at %llu, upd:%llu (for cpu=%d) SCHEDULE_NOW = %d\n", 485 TRACE_CUR("on_global_scheduling_timer at %llu, upd:%llu (for cpu=%d) SCHEDULE_NOW = %d\n",
403 now, update, state->cpu, schedule_now); 486 now, update, state->cpu, schedule_now);
404 487
405 if (schedule_now) { 488 /* Find the lowest cpu, and call reschedule */
489 while (schedule_now--) {
406 int cpu = get_lowest_prio_cpu(); 490 int cpu = get_lowest_prio_cpu();
407 if (cpu != NO_CPU) { 491 if (cpu != NO_CPU) {
408 raw_spin_lock(&_lowest_prio_cpu.lock); 492 raw_spin_lock(&_lowest_prio_cpu.lock);
@@ -413,11 +497,15 @@ static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer)
413 } 497 }
414 } 498 }
415 499
416 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 500unlock:
417 501 raw_spin_unlock(&_global_env.lock);
502 raw_spin_unlock_irqrestore(&state->lock, flags);
503
418 return restart; 504 return restart;
419} 505}
420 506
507/* on_scheduling_timer - timer event for partitioned tasks
508 */
421static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) 509static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
422{ 510{
423 unsigned long flags; 511 unsigned long flags;
@@ -438,8 +526,9 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
438 TRACE("TIMER FIRED at %llu\n", litmus_clock()); 526 TRACE("TIMER FIRED at %llu\n", litmus_clock());
439 raw_spin_lock_irqsave(&state->lock, flags); 527 raw_spin_lock_irqsave(&state->lock, flags);
440 sup_update_time(&state->sup_env, litmus_clock()); 528 sup_update_time(&state->sup_env, litmus_clock());
529 raw_spin_lock(&_global_env.lock);
441 mc2_update_ghost_state(state); 530 mc2_update_ghost_state(state);
442 531 raw_spin_unlock(&_global_env.lock);
443 update = state->sup_env.next_scheduler_update; 532 update = state->sup_env.next_scheduler_update;
444 now = state->sup_env.env.current_time; 533 now = state->sup_env.env.current_time;
445 534
@@ -458,6 +547,8 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
458 return restart; 547 return restart;
459} 548}
460 549
550/* mc2_dispatch - Select the next task to schedule.
551 */
461struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state) 552struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state)
462{ 553{
463 struct reservation *res, *next; 554 struct reservation *res, *next;
@@ -477,36 +568,38 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
477 } else { 568 } else {
478 ce = &state->crit_entries[lv]; 569 ce = &state->crit_entries[lv];
479 if (likely(!ce->running)) { 570 if (likely(!ce->running)) {
571 /* If we found the next task, clear all flags */
480 sup_scheduler_update_after(sup_env, res->cur_budget); 572 sup_scheduler_update_after(sup_env, res->cur_budget);
481 res->blocked_by_ghost = 0; 573 res->blocked_by_ghost = 0;
482 res->is_ghost = 0; 574 res->is_ghost = 0;
483 return tsk; 575 return tsk;
484 } else { 576 } else {
577 /* We cannot schedule the same criticality task
578 because the ghost job exists. Set blocked_by_ghost
579 flag not to charge budget */
485 res->blocked_by_ghost = 1; 580 res->blocked_by_ghost = 1;
486 } 581 }
487 } 582 }
488 } 583 }
489 } 584 }
490 } 585 }
491 // no level A or B tasks
492 586
587 /* no eligible level A or B tasks exists */
493 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { 588 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
494 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { 589 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
495 tsk = res->ops->dispatch_client(res, &time_slice); 590 tsk = res->ops->dispatch_client(res, &time_slice);
496 if (likely(tsk)) { 591 if (likely(tsk)) {
497 lv = get_task_crit_level(tsk); 592 lv = get_task_crit_level(tsk);
498 if (lv == NUM_CRIT_LEVELS) { 593 if (lv == NUM_CRIT_LEVELS) {
499 gmp_scheduler_update_after(&_global_env, res->cur_budget); 594 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
500 //raw_spin_unlock(&_global_env.lock);
501 return tsk; 595 return tsk;
502 } else { 596 } else {
503 ce = &state->crit_entries[lv]; 597 ce = &state->crit_entries[lv];
504 if (likely(!ce->running)) { 598 if (likely(!ce->running)) {
505 gmp_scheduler_update_after(&_global_env, res->cur_budget); 599 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
506 res->blocked_by_ghost = 0; 600 res->blocked_by_ghost = 0;
507 res->is_ghost = 0; 601 res->is_ghost = 0;
508 res->scheduled_on = state->cpu; 602 res->scheduled_on = state->cpu;
509 //raw_spin_unlock(&_global_env.lock);
510 return tsk; 603 return tsk;
511 } else { 604 } else {
512 res->blocked_by_ghost = 1; 605 res->blocked_by_ghost = 1;
@@ -519,18 +612,43 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
519 return NULL; 612 return NULL;
520} 613}
521 614
615/* not used now */
616static void pre_schedule(struct task_struct *prev)
617{
618 enum crit_level lv;
619 if (!is_realtime(prev) || !prev)
620 return;
621
622 lv = get_task_crit_level(prev);
623}
624
625/* not used now */
626static void post_schedule(struct task_struct *next)
627{
628 enum crit_level lv;
629 if (!is_realtime(next) || !next)
630 return;
631
632 lv = get_task_crit_level(next);
633}
634
635/* mc2_schedule - main scheduler function. pick the next task to run
636 */
522static struct task_struct* mc2_schedule(struct task_struct * prev) 637static struct task_struct* mc2_schedule(struct task_struct * prev)
523{ 638{
524 /* next == NULL means "schedule background work". */ 639 /* next == NULL means "schedule background work". */
525 struct mc2_cpu_state *state = local_cpu_state(); 640 struct mc2_cpu_state *state = local_cpu_state();
526 641
642 pre_schedule(prev);
643
527 raw_spin_lock(&_lowest_prio_cpu.lock); 644 raw_spin_lock(&_lowest_prio_cpu.lock);
528 if (_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule == true) 645 if (_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule == true)
529 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 646 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
530 raw_spin_unlock(&_lowest_prio_cpu.lock); 647 raw_spin_unlock(&_lowest_prio_cpu.lock);
531 648
532 raw_spin_lock(&state->lock); 649 raw_spin_lock(&state->lock);
533 650 raw_spin_lock(&_global_env.lock);
651
534 //BUG_ON(state->scheduled && state->scheduled != prev); 652 //BUG_ON(state->scheduled && state->scheduled != prev);
535 //BUG_ON(state->scheduled && !is_realtime(prev)); 653 //BUG_ON(state->scheduled && !is_realtime(prev));
536 if (state->scheduled && state->scheduled != prev) 654 if (state->scheduled && state->scheduled != prev)
@@ -540,19 +658,16 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
540 658
541 /* update time */ 659 /* update time */
542 state->sup_env.will_schedule = true; 660 state->sup_env.will_schedule = true;
543 //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n"); 661
544 sup_update_time(&state->sup_env, litmus_clock()); 662 sup_update_time(&state->sup_env, litmus_clock());
545
546 raw_spin_lock(&_global_env.lock);
547 gmp_update_time(&_global_env, litmus_clock()); 663 gmp_update_time(&_global_env, litmus_clock());
548 664
549 //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n");
550 mc2_update_ghost_state(state); 665 mc2_update_ghost_state(state);
551 666
552 /* remove task from reservation if it blocks */ 667 /* remove task from reservation if it blocks */
553 if (is_realtime(prev) && !is_running(prev)) 668 if (is_realtime(prev) && !is_running(prev))
554 task_departs(prev, is_completed(prev)); 669 task_departs(prev, is_completed(prev));
555 670
556 /* figure out what to schedule next */ 671 /* figure out what to schedule next */
557 state->scheduled = mc2_dispatch(&state->sup_env, state); 672 state->scheduled = mc2_dispatch(&state->sup_env, state);
558 if (state->scheduled && is_realtime(state->scheduled)) 673 if (state->scheduled && is_realtime(state->scheduled))
@@ -582,10 +697,10 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
582 } 697 }
583 if (state->scheduled) { 698 if (state->scheduled) {
584 TRACE_TASK(state->scheduled, "scheduled.\n"); 699 TRACE_TASK(state->scheduled, "scheduled.\n");
585 //tinfo = get_mc2_state(state->scheduled);
586 //state->run_level = tinfo->mc2_param.crit;
587 } 700 }
588 701
702 post_schedule(state->scheduled);
703
589 return state->scheduled; 704 return state->scheduled;
590} 705}
591 706
@@ -599,13 +714,15 @@ static void resume_legacy_task_model_updates(struct task_struct *tsk)
599 * P-RES scheduler. */ 714 * P-RES scheduler. */
600 715
601 now = litmus_clock(); 716 now = litmus_clock();
602 if (is_tardy(tsk, now)) 717 if (is_tardy(tsk, now)) {
603 release_at(tsk, now); 718 release_at(tsk, now);
719 sched_trace_task_release(tsk);
720 }
604 } 721 }
605} 722}
606 723
607/* Called when the state of tsk changes back to TASK_RUNNING. 724/* mc2_task_resume - Called when the state of tsk changes back to
608 * We need to requeue the task. 725 * TASK_RUNNING. We need to requeue the task.
609 */ 726 */
610static void mc2_task_resume(struct task_struct *tsk) 727static void mc2_task_resume(struct task_struct *tsk)
611{ 728{
@@ -624,23 +741,25 @@ static void mc2_task_resume(struct task_struct *tsk)
624 /* Requeue only if self-suspension was already processed. */ 741 /* Requeue only if self-suspension was already processed. */
625 if (tinfo->has_departed) 742 if (tinfo->has_departed)
626 { 743 {
744 raw_spin_lock(&_global_env.lock);
627 /* Assumption: litmus_clock() is synchronized across cores, 745 /* Assumption: litmus_clock() is synchronized across cores,
628 * since we might not actually be executing on tinfo->cpu 746 * since we might not actually be executing on tinfo->cpu
629 * at the moment. */ 747 * at the moment. */
630 if (tinfo->cpu != -1) { 748 if (tinfo->cpu != -1) {
631 sup_update_time(&state->sup_env, litmus_clock()); 749 sup_update_time(&state->sup_env, litmus_clock());
632 } else { 750 } else {
633 raw_spin_lock(&_global_env.lock); 751 //raw_spin_lock(&_global_env.lock);
634 TRACE("RESUME UPDATE ####\n"); 752 TRACE("RESUME UPDATE ####\n");
635 gmp_update_time(&_global_env, litmus_clock()); 753 gmp_update_time(&_global_env, litmus_clock());
636 TRACE("RESUME UPDATE $$$$\n"); 754 TRACE("RESUME UPDATE $$$$\n");
637 raw_spin_unlock(&_global_env.lock); 755 //raw_spin_unlock(&_global_env.lock);
638 } 756 }
639 757
640 mc2_update_ghost_state(state); 758 mc2_update_ghost_state(state);
641 task_arrives(state, tsk); 759 task_arrives(state, tsk);
642 /* NOTE: drops state->lock */ 760 /* NOTE: drops state->lock */
643 TRACE_TASK(tsk, "mc2_resume()\n"); 761 TRACE_TASK(tsk, "mc2_resume()\n");
762 raw_spin_unlock(&_global_env.lock);
644 mc2_update_timer_and_unlock(state); 763 mc2_update_timer_and_unlock(state);
645 local_irq_restore(flags); 764 local_irq_restore(flags);
646 } else { 765 } else {
@@ -651,7 +770,8 @@ static void mc2_task_resume(struct task_struct *tsk)
651 resume_legacy_task_model_updates(tsk); 770 resume_legacy_task_model_updates(tsk);
652} 771}
653 772
654/* syscall backend for job completions */ 773/* mc2_complete_job - syscall backend for job completions
774 */
655static long mc2_complete_job(void) 775static long mc2_complete_job(void)
656{ 776{
657 ktime_t next_release; 777 ktime_t next_release;
@@ -662,6 +782,8 @@ static long mc2_complete_job(void)
662 782
663 tsk_rt(current)->completed = 1; 783 tsk_rt(current)->completed = 1;
664 784
785 /* If this the first job instance, we need to reset replenish
786 time to the next release time */
665 if (tsk_rt(current)->sporadic_release) { 787 if (tsk_rt(current)->sporadic_release) {
666 struct mc2_cpu_state *state; 788 struct mc2_cpu_state *state;
667 struct reservation_environment *env; 789 struct reservation_environment *env;
@@ -670,21 +792,27 @@ static long mc2_complete_job(void)
670 unsigned long flags; 792 unsigned long flags;
671 793
672 local_irq_save(flags); 794 local_irq_save(flags);
673 795
674 state = local_cpu_state();
675 env = &(state->sup_env.env);
676 tinfo = get_mc2_state(current); 796 tinfo = get_mc2_state(current);
677 797
678 res = res_find_by_id(state, tsk_rt(current)->mc2_data->res_id); 798 if (get_task_crit_level(current) < CRIT_LEVEL_C)
799 state = cpu_state_for(tinfo->cpu);
800 else
801 state = local_cpu_state();
802
803 raw_spin_lock(&state->lock);
804 env = &(state->sup_env.env);
805
806 res = res_find_by_id(state, tinfo->mc2_param.res_id);
679 807
680 if (get_task_crit_level(current) < CRIT_LEVEL_C) { 808 if (get_task_crit_level(current) < CRIT_LEVEL_C) {
681 raw_spin_lock(&state->lock);
682 env->time_zero = tsk_rt(current)->sporadic_release_time; 809 env->time_zero = tsk_rt(current)->sporadic_release_time;
683 } else { 810 } else {
684 raw_spin_lock(&_global_env.lock); 811 raw_spin_lock(&_global_env.lock);
685 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; 812 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time;
686 } 813 }
687 814
815 /* set next_replenishtime to synchronous release time */
688 res->next_replenishment = tsk_rt(current)->sporadic_release_time; 816 res->next_replenishment = tsk_rt(current)->sporadic_release_time;
689 817
690 if (get_task_crit_level(current) == CRIT_LEVEL_A) { 818 if (get_task_crit_level(current) == CRIT_LEVEL_A) {
@@ -697,34 +825,44 @@ static long mc2_complete_job(void)
697 res->cur_budget = 0; 825 res->cur_budget = 0;
698 res->env->change_state(res->env, res, RESERVATION_DEPLETED); 826 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
699 827
700 //TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); 828 TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
701 if (get_task_crit_level(current) < CRIT_LEVEL_C) { 829 if (get_task_crit_level(current) == CRIT_LEVEL_C) {
702 raw_spin_unlock(&state->lock);
703 } else {
704 raw_spin_unlock(&_global_env.lock); 830 raw_spin_unlock(&_global_env.lock);
705 } 831 }
832 raw_spin_unlock(&state->lock);
706 833
707 local_irq_restore(flags); 834 local_irq_restore(flags);
708 } 835 }
836 sched_trace_task_completion(current, 0);
709 837
838 /* update the next release time and deadline */
710 prepare_for_next_period(current); 839 prepare_for_next_period(current);
840
711 next_release = ns_to_ktime(get_release(current)); 841 next_release = ns_to_ktime(get_release(current));
712 preempt_disable(); 842 preempt_disable();
713 TRACE_CUR("next_release=%llu\n", get_release(current)); 843 TRACE_CUR("next_release=%llu\n", get_release(current));
714 if (get_release(current) > litmus_clock()) { 844 if (get_release(current) > litmus_clock()) {
845 /* sleep until next_release */
715 set_current_state(TASK_INTERRUPTIBLE); 846 set_current_state(TASK_INTERRUPTIBLE);
716 preempt_enable_no_resched(); 847 preempt_enable_no_resched();
717 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); 848 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
849 if (get_task_crit_level(current) == CRIT_LEVEL_A)
850 sched_trace_task_release(current);
718 } else { 851 } else {
852 /* release the next job immediately */
719 err = 0; 853 err = 0;
720 TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock()); 854 TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
721 preempt_enable(); 855 preempt_enable();
856 if (get_task_crit_level(current) == CRIT_LEVEL_A)
857 sched_trace_task_release(current);
722 } 858 }
723 859
724 TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); 860 TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
725 return err; 861 return err;
726} 862}
727 863
864/* mc2_admit_task - Setup mc2 task parameters
865 */
728static long mc2_admit_task(struct task_struct *tsk) 866static long mc2_admit_task(struct task_struct *tsk)
729{ 867{
730 long err = -ESRCH; 868 long err = -ESRCH;
@@ -752,15 +890,13 @@ static long mc2_admit_task(struct task_struct *tsk)
752 890
753 res = sup_find_by_id(&state->sup_env, mp->res_id); 891 res = sup_find_by_id(&state->sup_env, mp->res_id);
754 892
755 /* found the appropriate reservation (or vCPU) */ 893 /* found the appropriate reservation */
756 if (res) { 894 if (res) {
757 TRACE_TASK(tsk, "SUP FOUND RES ID\n"); 895 TRACE_TASK(tsk, "SUP FOUND RES ID\n");
758 tinfo->mc2_param.crit = mp->crit; 896 tinfo->mc2_param.crit = mp->crit;
759 tinfo->mc2_param.res_id = mp->res_id; 897 tinfo->mc2_param.res_id = mp->res_id;
760 898
761 //kfree(tsk_rt(tsk)->plugin_state); 899 /* initial values */
762 //tsk_rt(tsk)->plugin_state = NULL;
763
764 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); 900 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
765 tinfo->cpu = task_cpu(tsk); 901 tinfo->cpu = task_cpu(tsk);
766 tinfo->has_departed = true; 902 tinfo->has_departed = true;
@@ -772,7 +908,10 @@ static long mc2_admit_task(struct task_struct *tsk)
772 908
773 raw_spin_unlock_irqrestore(&state->lock, flags); 909 raw_spin_unlock_irqrestore(&state->lock, flags);
774 } else if (lv == CRIT_LEVEL_C) { 910 } else if (lv == CRIT_LEVEL_C) {
775 raw_spin_lock_irqsave(&_global_env.lock, flags); 911 local_irq_save(flags);
912 state = local_cpu_state();
913 raw_spin_lock(&state->lock);
914 raw_spin_lock(&_global_env.lock);
776 915
777 res = gmp_find_by_id(&_global_env, mp->res_id); 916 res = gmp_find_by_id(&_global_env, mp->res_id);
778 917
@@ -782,9 +921,7 @@ static long mc2_admit_task(struct task_struct *tsk)
782 tinfo->mc2_param.crit = mp->crit; 921 tinfo->mc2_param.crit = mp->crit;
783 tinfo->mc2_param.res_id = mp->res_id; 922 tinfo->mc2_param.res_id = mp->res_id;
784 923
785 //kfree(tsk_rt(tsk)->plugin_state); 924 /* initial values */
786 //tsk_rt(tsk)->plugin_state = NULL;
787
788 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); 925 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
789 tinfo->cpu = -1; 926 tinfo->cpu = -1;
790 tinfo->has_departed = true; 927 tinfo->has_departed = true;
@@ -794,8 +931,9 @@ static long mc2_admit_task(struct task_struct *tsk)
794 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 931 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
795 } 932 }
796 933
797 raw_spin_unlock_irqrestore(&_global_env.lock, flags); 934 raw_spin_unlock(&_global_env.lock);
798 935 raw_spin_unlock(&state->lock);
936 local_irq_restore(flags);
799 } 937 }
800 938
801 preempt_enable(); 939 preempt_enable();
@@ -806,6 +944,9 @@ static long mc2_admit_task(struct task_struct *tsk)
806 return err; 944 return err;
807} 945}
808 946
947/* mc2_task_new - A new real-time job is arrived. Release the next job
948 * at the next reservation replenish time
949 */
809static void mc2_task_new(struct task_struct *tsk, int on_runqueue, 950static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
810 int is_running) 951 int is_running)
811{ 952{
@@ -837,11 +978,12 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
837 * [see comment in pres_task_resume()] */ 978 * [see comment in pres_task_resume()] */
838 raw_spin_lock(&_global_env.lock); 979 raw_spin_lock(&_global_env.lock);
839 mc2_update_time(lv, state, litmus_clock()); 980 mc2_update_time(lv, state, litmus_clock());
840 raw_spin_unlock(&_global_env.lock);
841 mc2_update_ghost_state(state); 981 mc2_update_ghost_state(state);
842 task_arrives(state, tsk); 982 task_arrives(state, tsk);
843 /* NOTE: drops state->lock */ 983 /* NOTE: drops state->lock */
844 TRACE("mc2_new()\n"); 984 TRACE("mc2_new()\n");
985 raw_spin_unlock(&_global_env.lock);
986
845 mc2_update_timer_and_unlock(state); 987 mc2_update_timer_and_unlock(state);
846 local_irq_restore(flags); 988 local_irq_restore(flags);
847 } else 989 } else
@@ -857,6 +999,8 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
857 TRACE_TASK(tsk, "mc2_task_new() next_replenishment = NULL\n"); 999 TRACE_TASK(tsk, "mc2_task_new() next_replenishment = NULL\n");
858} 1000}
859 1001
1002/* mc2_reservation_destroy - reservation_destroy system call backend
1003 */
860static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) 1004static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
861{ 1005{
862 long ret = -EINVAL; 1006 long ret = -EINVAL;
@@ -865,8 +1009,13 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
865 struct sup_reservation_environment *sup_env; 1009 struct sup_reservation_environment *sup_env;
866 int found = 0; 1010 int found = 0;
867 enum crit_level lv = get_task_crit_level(current); 1011 enum crit_level lv = get_task_crit_level(current);
1012 unsigned long flags;
868 1013
869 if (cpu == -1) { 1014 if (cpu == -1) {
1015 /* if the reservation is global reservation */
1016 local_irq_save(flags);
1017 state = local_cpu_state();
1018 raw_spin_lock(&state->lock);
870 raw_spin_lock(&_global_env.lock); 1019 raw_spin_lock(&_global_env.lock);
871 1020
872 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { 1021 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
@@ -901,34 +1050,16 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
901 } 1050 }
902 } 1051 }
903 1052
904/*
905list_for_each_entry(res, &_global_env.depleted_reservations, list) {
906 TRACE("DEPLETED LIST R%d\n", res->id);
907}
908list_for_each_entry(res, &_global_env.inactive_reservations, list) {
909 TRACE("INACTIVE LIST R%d\n", res->id);
910}
911list_for_each_entry(res, &_global_env.active_reservations, list) {
912 TRACE("ACTIVE LIST R%d\n", res->id);
913}
914*/
915 if (list_empty(&_global_env.active_reservations))
916 INIT_LIST_HEAD(&_global_env.active_reservations);
917 if (list_empty(&_global_env.depleted_reservations))
918 INIT_LIST_HEAD(&_global_env.depleted_reservations);
919 if (list_empty(&_global_env.inactive_reservations))
920 INIT_LIST_HEAD(&_global_env.inactive_reservations);
921 if (list_empty(&_global_env.next_events))
922 INIT_LIST_HEAD(&_global_env.next_events);
923
924 raw_spin_unlock(&_global_env.lock); 1053 raw_spin_unlock(&_global_env.lock);
1054 raw_spin_unlock(&state->lock);
1055 local_irq_restore(flags);
925 } else { 1056 } else {
1057 /* if the reservation is partitioned reservation */
926 state = cpu_state_for(cpu); 1058 state = cpu_state_for(cpu);
927 raw_spin_lock(&state->lock); 1059 raw_spin_lock_irqsave(&state->lock, flags);
928 1060
929 // res = sup_find_by_id(&state->sup_env, reservation_id); 1061 // res = sup_find_by_id(&state->sup_env, reservation_id);
930 sup_env = &state->sup_env; 1062 sup_env = &state->sup_env;
931 //if (!res) {
932 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { 1063 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
933 if (res->id == reservation_id) { 1064 if (res->id == reservation_id) {
934 if (lv == CRIT_LEVEL_A) { 1065 if (lv == CRIT_LEVEL_A) {
@@ -972,15 +1103,16 @@ list_for_each_entry(res, &_global_env.active_reservations, list) {
972 } 1103 }
973 } 1104 }
974 } 1105 }
975 //}
976 1106
977 raw_spin_unlock(&state->lock); 1107 raw_spin_unlock_irqrestore(&state->lock, flags);
978 } 1108 }
979 1109
980 TRACE("RESERVATION_DESTROY ret = %d\n", ret); 1110 TRACE("RESERVATION_DESTROY ret = %d\n", ret);
981 return ret; 1111 return ret;
982} 1112}
983 1113
1114/* mc2_task_exit - Task became a normal task (not real-time task)
1115 */
984static void mc2_task_exit(struct task_struct *tsk) 1116static void mc2_task_exit(struct task_struct *tsk)
985{ 1117{
986 unsigned long flags; 1118 unsigned long flags;
@@ -1007,42 +1139,30 @@ static void mc2_task_exit(struct task_struct *tsk)
1007 if (is_running(tsk)) { 1139 if (is_running(tsk)) {
1008 /* Assumption: litmus_clock() is synchronized across cores 1140 /* Assumption: litmus_clock() is synchronized across cores
1009 * [see comment in pres_task_resume()] */ 1141 * [see comment in pres_task_resume()] */
1010 //if (lv < CRIT_LEVEL_C) 1142
1011 // sup_update_time(&state->sup_env, litmus_clock());
1012 raw_spin_lock(&_global_env.lock); 1143 raw_spin_lock(&_global_env.lock);
1144 /* update both global and partitioned */
1013 mc2_update_time(lv, state, litmus_clock()); 1145 mc2_update_time(lv, state, litmus_clock());
1014 raw_spin_unlock(&_global_env.lock); 1146
1015 mc2_update_ghost_state(state); 1147 mc2_update_ghost_state(state);
1016 task_departs(tsk, 0); 1148 task_departs(tsk, 0);
1017 1149
1018 /* NOTE: drops state->lock */ 1150 /* NOTE: drops state->lock */
1019 TRACE("mc2_exit()\n"); 1151 TRACE("mc2_exit()\n");
1152 raw_spin_unlock(&_global_env.lock);
1020 mc2_update_timer_and_unlock(state); 1153 mc2_update_timer_and_unlock(state);
1021 local_irq_restore(flags); 1154 local_irq_restore(flags);
1022 } else 1155 } else
1023 raw_spin_unlock_irqrestore(&state->lock, flags); 1156 raw_spin_unlock_irqrestore(&state->lock, flags);
1024/* 1157
1025 if (tinfo->mc2_param.crit == CRIT_LEVEL_A) {
1026 struct table_driven_reservation *td_res;
1027 struct reservation *res;
1028 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
1029 td_res = container_of(res, struct table_driven_reservation, res);
1030 kfree(td_res->intervals);
1031 //kfree(td_res);
1032 } else if (tinfo->mc2_param.crit == CRIT_LEVEL_B) {
1033 struct polling_reservation *pres;
1034 struct reservation *res;
1035 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
1036 pres = container_of(res, struct polling_reservation, res);
1037 kfree(pres);
1038 }
1039*/
1040 kfree(tsk_rt(tsk)->plugin_state); 1158 kfree(tsk_rt(tsk)->plugin_state);
1041 tsk_rt(tsk)->plugin_state = NULL; 1159 tsk_rt(tsk)->plugin_state = NULL;
1042 kfree(tsk_rt(tsk)->mc2_data); 1160 kfree(tsk_rt(tsk)->mc2_data);
1043 tsk_rt(tsk)->mc2_data = NULL; 1161 tsk_rt(tsk)->mc2_data = NULL;
1044} 1162}
1045 1163
1164/* create_polling_reservation - create a new polling reservation
1165 */
1046static long create_polling_reservation( 1166static long create_polling_reservation(
1047 int res_type, 1167 int res_type,
1048 struct reservation_config *config) 1168 struct reservation_config *config)
@@ -1055,6 +1175,7 @@ static long create_polling_reservation(
1055 int periodic = res_type == PERIODIC_POLLING; 1175 int periodic = res_type == PERIODIC_POLLING;
1056 long err = -EINVAL; 1176 long err = -EINVAL;
1057 1177
1178 /* sanity checks */
1058 if (config->polling_params.budget > 1179 if (config->polling_params.budget >
1059 config->polling_params.period) { 1180 config->polling_params.period) {
1060 printk(KERN_ERR "invalid polling reservation (%u): " 1181 printk(KERN_ERR "invalid polling reservation (%u): "
@@ -1138,6 +1259,8 @@ static long create_polling_reservation(
1138 1259
1139#define MAX_INTERVALS 1024 1260#define MAX_INTERVALS 1024
1140 1261
1262/* create_table_driven_reservation - create a table_driven reservation
1263 */
1141static long create_table_driven_reservation( 1264static long create_table_driven_reservation(
1142 struct reservation_config *config) 1265 struct reservation_config *config)
1143{ 1266{
@@ -1238,6 +1361,8 @@ static long create_table_driven_reservation(
1238 return err; 1361 return err;
1239} 1362}
1240 1363
1364/* mc2_reservation_create - reservation_create system call backend
1365 */
1241static long mc2_reservation_create(int res_type, void* __user _config) 1366static long mc2_reservation_create(int res_type, void* __user _config)
1242{ 1367{
1243 long ret = -EINVAL; 1368 long ret = -EINVAL;