diff options
-rw-r--r-- | include/litmus/trace.h | 1 | ||||
-rw-r--r-- | litmus/bank_proc.c | 80 | ||||
-rw-r--r-- | litmus/jobs.c | 3 | ||||
-rw-r--r-- | litmus/litmus.c | 111 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 458 |
5 files changed, 96 insertions, 557 deletions
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index eb0a07f4ba04..4dbb39ea8a14 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE |
5 | 5 | ||
6 | |||
7 | #include <litmus/feather_trace.h> | 6 | #include <litmus/feather_trace.h> |
8 | #include <litmus/feather_buffer.h> | 7 | #include <litmus/feather_buffer.h> |
9 | 8 | ||
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index 08b58f94c391..097cff177a2d 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bank_proc.c -- Implementation of the page coloring for cache and bank partition. | 2 | * bank_proc.c -- Implementation of the page coloring for cache and bank partition. |
3 | * The file will keep a pool of colored pages. Users can require pages with | 3 | * The file will keep a pool of colored pages. Users can require pages with |
4 | * specific color or bank number. | 4 | * specific color or bank number. |
5 | * Part of the code is modified from Jonathan Herman's code | 5 | * Part of the code is modified from Jonathan Herman's code |
6 | */ | 6 | */ |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
@@ -19,9 +19,6 @@ | |||
19 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
20 | #include <litmus/litmus.h> | 20 | #include <litmus/litmus.h> |
21 | 21 | ||
22 | //#define TRACE(fmt, args...) do {} while (false) | ||
23 | //#define TRACE_TASK(fmt, args...) do {} while (false) | ||
24 | |||
25 | #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 | 22 | #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 |
26 | 23 | ||
27 | // This Address Decoding is used in imx6-sabredsd platform | 24 | // This Address Decoding is used in imx6-sabredsd platform |
@@ -86,7 +83,6 @@ int node_index[9] = { | |||
86 | 83 | ||
87 | struct mutex void_lockdown_proc; | 84 | struct mutex void_lockdown_proc; |
88 | 85 | ||
89 | |||
90 | /* | 86 | /* |
91 | * Every page list should contain a lock, a list, and a number recording how many pages it store | 87 | * Every page list should contain a lock, a list, and a number recording how many pages it store |
92 | */ | 88 | */ |
@@ -105,7 +101,6 @@ static struct color_group *color_groups; | |||
105 | */ | 101 | */ |
106 | unsigned int counting_one_set(unsigned int v) | 102 | unsigned int counting_one_set(unsigned int v) |
107 | { | 103 | { |
108 | // unsigned int v; // count the number of bits set in v | ||
109 | unsigned int c; // c accumulates the total bits set in v | 104 | unsigned int c; // c accumulates the total bits set in v |
110 | 105 | ||
111 | for (c = 0; v; v >>= 1) | 106 | for (c = 0; v; v >>= 1) |
@@ -257,8 +252,6 @@ static inline unsigned int page_list_index(struct page *page) | |||
257 | { | 252 | { |
258 | unsigned int idx; | 253 | unsigned int idx; |
259 | idx = (page_color(page) + page_bank(page)*(number_cachecolors)); | 254 | idx = (page_color(page) + page_bank(page)*(number_cachecolors)); |
260 | // printk("address = %lx, ", page_to_phys(page)); | ||
261 | // printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx); | ||
262 | 255 | ||
263 | return idx; | 256 | return idx; |
264 | } | 257 | } |
@@ -289,10 +282,10 @@ static void show_nr_pages(void) | |||
289 | printk("show nr pages***************************************\n"); | 282 | printk("show nr pages***************************************\n"); |
290 | for (i = 0; i < NUM_PAGE_LIST; ++i) { | 283 | for (i = 0; i < NUM_PAGE_LIST; ++i) { |
291 | cgroup = &color_groups[i]; | 284 | cgroup = &color_groups[i]; |
292 | printk("(%03d) = %03d, ", i, atomic_read(&cgroup->nr_pages)); | 285 | printk("(%03ld) = %03d, ", i, atomic_read(&cgroup->nr_pages)); |
293 | if((i % 8) ==7){ | 286 | if((i % 8) ==7) { |
294 | printk("\n"); | 287 | printk("\n"); |
295 | } | 288 | } |
296 | } | 289 | } |
297 | } | 290 | } |
298 | 291 | ||
@@ -316,6 +309,7 @@ void add_page_to_color_list(struct page *page) | |||
316 | * Replenish the page pool. | 309 | * Replenish the page pool. |
317 | * If the newly allocate page is what we want, it will be pushed to the correct page list | 310 | * If the newly allocate page is what we want, it will be pushed to the correct page list |
318 | * otherwise, it will be freed. | 311 | * otherwise, it will be freed. |
312 | * A user needs to invoke this function until the page pool has enough pages. | ||
319 | */ | 313 | */ |
320 | static int do_add_pages(void) | 314 | static int do_add_pages(void) |
321 | { | 315 | { |
@@ -329,8 +323,6 @@ static int do_add_pages(void) | |||
329 | 323 | ||
330 | // until all the page lists contain enough pages | 324 | // until all the page lists contain enough pages |
331 | for (i=0; i< 1024*20;i++) { | 325 | for (i=0; i< 1024*20;i++) { |
332 | //while (smallest_nr_pages() < PAGES_PER_COLOR) { | ||
333 | // printk("smallest = %d\n", smallest_nr_pages()); | ||
334 | page = alloc_page(GFP_HIGHUSER_MOVABLE); | 326 | page = alloc_page(GFP_HIGHUSER_MOVABLE); |
335 | 327 | ||
336 | if (unlikely(!page)) { | 328 | if (unlikely(!page)) { |
@@ -340,55 +332,20 @@ static int do_add_pages(void) | |||
340 | } | 332 | } |
341 | color = page_list_index(page); | 333 | color = page_list_index(page); |
342 | counter[color]++; | 334 | counter[color]++; |
343 | // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); | 335 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=0) { |
344 | //show_nr_pages(); | ||
345 | //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=16) { | ||
346 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { | ||
347 | //if ( PAGES_PER_COLOR && color>=16*2) { | ||
348 | add_page_to_color_list(page); | 336 | add_page_to_color_list(page); |
349 | // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); | 337 | } else { |
350 | } else{ | ||
351 | // Pages here will be freed later | 338 | // Pages here will be freed later |
352 | list_add_tail(&page->lru, &free_later); | 339 | list_add_tail(&page->lru, &free_later); |
353 | free_counter++; | 340 | free_counter++; |
354 | //list_del(&page->lru); | ||
355 | // __free_page(page); | ||
356 | // printk("useless page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); | ||
357 | } | 341 | } |
358 | //show_nr_pages(); | 342 | } |
359 | /* | ||
360 | if(free_counter >= PAGES_PER_COLOR) | ||
361 | { | ||
362 | printk("free unwanted page list eariler"); | ||
363 | free_counter = 0; | ||
364 | list_for_each_entry_safe(page, page_tmp, &free_later, lru) { | ||
365 | list_del(&page->lru); | ||
366 | __free_page(page); | ||
367 | } | ||
368 | |||
369 | show_nr_pages(); | ||
370 | } | ||
371 | */ | ||
372 | } | ||
373 | /* printk("page counter = \n"); | ||
374 | for (i=0; i<128; i++) | ||
375 | { | ||
376 | printk("(%03d) = %4d, ", i , counter[i]); | ||
377 | if(i%8 == 7){ | ||
378 | printk("\n"); | ||
379 | } | ||
380 | 343 | ||
381 | } | ||
382 | */ | ||
383 | //printk("After refill : \n"); | ||
384 | //show_nr_pages(); | ||
385 | #if 1 | ||
386 | // Free the unwanted pages | 344 | // Free the unwanted pages |
387 | list_for_each_entry_safe(page, page_tmp, &free_later, lru) { | 345 | list_for_each_entry_safe(page, page_tmp, &free_later, lru) { |
388 | list_del(&page->lru); | 346 | list_del(&page->lru); |
389 | __free_page(page); | 347 | __free_page(page); |
390 | } | 348 | } |
391 | #endif | ||
392 | out: | 349 | out: |
393 | return ret; | 350 | return ret; |
394 | } | 351 | } |
@@ -407,7 +364,6 @@ static struct page *new_alloc_page_color( unsigned long color) | |||
407 | 364 | ||
408 | if( (color <0) || (color)>(number_cachecolors*number_banks -1)) { | 365 | if( (color <0) || (color)>(number_cachecolors*number_banks -1)) { |
409 | TRACE_CUR("Wrong color %lu\n", color); | 366 | TRACE_CUR("Wrong color %lu\n", color); |
410 | // printk(KERN_WARNING "Wrong color %lu\n", color); | ||
411 | goto out; | 367 | goto out; |
412 | } | 368 | } |
413 | 369 | ||
@@ -416,7 +372,6 @@ static struct page *new_alloc_page_color( unsigned long color) | |||
416 | spin_lock(&cgroup->lock); | 372 | spin_lock(&cgroup->lock); |
417 | if (unlikely(!atomic_read(&cgroup->nr_pages))) { | 373 | if (unlikely(!atomic_read(&cgroup->nr_pages))) { |
418 | TRACE_CUR("No free %lu colored pages.\n", color); | 374 | TRACE_CUR("No free %lu colored pages.\n", color); |
419 | // printk(KERN_WARNING "no free %lu colored pages.\n", color); | ||
420 | goto out_unlock; | 375 | goto out_unlock; |
421 | } | 376 | } |
422 | rPage = list_first_entry(&cgroup->list, struct page, lru); | 377 | rPage = list_first_entry(&cgroup->list, struct page, lru); |
@@ -428,12 +383,6 @@ static struct page *new_alloc_page_color( unsigned long color) | |||
428 | out_unlock: | 383 | out_unlock: |
429 | spin_unlock(&cgroup->lock); | 384 | spin_unlock(&cgroup->lock); |
430 | out: | 385 | out: |
431 | /* | ||
432 | if( smallest_nr_pages() == 0) { | ||
433 | //do_add_pages(); | ||
434 | //printk(KERN_ALERT "ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n"); | ||
435 | } | ||
436 | */ | ||
437 | return rPage; | 386 | return rPage; |
438 | } | 387 | } |
439 | 388 | ||
@@ -456,9 +405,7 @@ struct page* get_colored_page(unsigned long color) | |||
456 | */ | 405 | */ |
457 | struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | 406 | struct page *new_alloc_page(struct page *page, unsigned long node, int **x) |
458 | { | 407 | { |
459 | struct color_group *cgroup; | ||
460 | struct page *rPage = NULL; | 408 | struct page *rPage = NULL; |
461 | unsigned int color; | ||
462 | int try = 0; | 409 | int try = 0; |
463 | unsigned int idx; | 410 | unsigned int idx; |
464 | 411 | ||
@@ -479,7 +426,7 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | |||
479 | if (try>=256) | 426 | if (try>=256) |
480 | break; | 427 | break; |
481 | idx = get_next_index(node, idx); | 428 | idx = get_next_index(node, idx); |
482 | printk(KERN_ALERT "try = %d out of page! requesting node = %d, idx = %d\n", try, node, idx); | 429 | printk(KERN_ALERT "try = %d out of page! requesting node = %ld, idx = %d\n", try, node, idx); |
483 | BUG_ON(idx<0 || idx>127); | 430 | BUG_ON(idx<0 || idx>127); |
484 | rPage = new_alloc_page_color(idx); | 431 | rPage = new_alloc_page_color(idx); |
485 | } | 432 | } |
@@ -494,20 +441,19 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | |||
494 | void reclaim_page(struct page *page) | 441 | void reclaim_page(struct page *page) |
495 | { | 442 | { |
496 | const unsigned long color = page_list_index(page); | 443 | const unsigned long color = page_list_index(page); |
497 | unsigned long nr_reclaimed = 0; | ||
498 | spin_lock(&reclaim_lock); | 444 | spin_lock(&reclaim_lock); |
499 | put_page(page); | 445 | put_page(page); |
500 | add_page_to_color_list(page); | 446 | add_page_to_color_list(page); |
501 | 447 | ||
502 | spin_unlock(&reclaim_lock); | 448 | spin_unlock(&reclaim_lock); |
503 | printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); | 449 | printk("Reclaimed page(%ld) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); |
504 | } | 450 | } |
505 | 451 | ||
506 | 452 | ||
507 | /* | 453 | /* |
508 | * Initialize the numbers of banks and cache colors | 454 | * Initialize the numbers of banks and cache colors |
509 | */ | 455 | */ |
510 | static int __init init_variables(void) | 456 | static void __init init_variables(void) |
511 | { | 457 | { |
512 | number_banks = counting_one_set(BANK_MASK); | 458 | number_banks = counting_one_set(BANK_MASK); |
513 | number_banks = two_exp(number_banks); | 459 | number_banks = two_exp(number_banks); |
@@ -592,7 +538,7 @@ out: | |||
592 | int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer, | 538 | int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer, |
593 | size_t *lenp, loff_t *ppos) | 539 | size_t *lenp, loff_t *ppos) |
594 | { | 540 | { |
595 | int ret = 0, i = 0; | 541 | int ret = 0; |
596 | mutex_lock(&void_lockdown_proc); | 542 | mutex_lock(&void_lockdown_proc); |
597 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 543 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
598 | if (ret) | 544 | if (ret) |
@@ -608,7 +554,7 @@ out: | |||
608 | int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer, | 554 | int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer, |
609 | size_t *lenp, loff_t *ppos) | 555 | size_t *lenp, loff_t *ppos) |
610 | { | 556 | { |
611 | int ret = 0, i = 0; | 557 | int ret = 0; |
612 | mutex_lock(&void_lockdown_proc); | 558 | mutex_lock(&void_lockdown_proc); |
613 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 559 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
614 | if (ret) | 560 | if (ret) |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 59c29d517074..368e0b308f3f 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -21,6 +21,7 @@ static inline void setup_release(struct task_struct *t, lt_t release) | |||
21 | /* update job sequence number */ | 21 | /* update job sequence number */ |
22 | t->rt_param.job_params.job_no++; | 22 | t->rt_param.job_params.job_no++; |
23 | } | 23 | } |
24 | |||
24 | #define INIT_PHASE_LENGTH_NS (1000000000) | 25 | #define INIT_PHASE_LENGTH_NS (1000000000) |
25 | 26 | ||
26 | void prepare_for_next_period(struct task_struct *t) | 27 | void prepare_for_next_period(struct task_struct *t) |
@@ -34,6 +35,8 @@ void prepare_for_next_period(struct task_struct *t) | |||
34 | (long long)litmus_clock() - | 35 | (long long)litmus_clock() - |
35 | (long long)t->rt_param.job_params.deadline; | 36 | (long long)t->rt_param.job_params.deadline; |
36 | 37 | ||
38 | /* Mode 0 is used for initializations * | ||
39 | * Use sporadic releases for all tasks not to overutilize cpus in mode 0 */ | ||
37 | if (tsk_rt(t)->sporadic_release) { | 40 | if (tsk_rt(t)->sporadic_release) { |
38 | TRACE_TASK(t, "sporadic release at %llu\n", | 41 | TRACE_TASK(t, "sporadic release at %llu\n", |
39 | tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no)); | 42 | tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no)); |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 90e21eeadabb..400fd1472705 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -37,9 +37,6 @@ | |||
37 | #include <trace/events/litmus.h> | 37 | #include <trace/events/litmus.h> |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | //#define TRACE(fmt, args...) do {} while (false) | ||
41 | //#define TRACE_TASK(fmt, args...) do {} while (false) | ||
42 | |||
43 | extern void l2c310_flush_all(void); | 40 | extern void l2c310_flush_all(void); |
44 | 41 | ||
45 | /* Number of RT tasks that exist in the system */ | 42 | /* Number of RT tasks that exist in the system */ |
@@ -354,12 +351,24 @@ extern int isolate_lru_page(struct page *page); | |||
354 | extern void putback_movable_page(struct page *page); | 351 | extern void putback_movable_page(struct page *page); |
355 | extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); | 352 | extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); |
356 | 353 | ||
354 | /* | ||
355 | * sys_set_page_color | ||
356 | * @cpu: CPU number to assign page colors. | ||
357 | * Syscall for recoloring pages | ||
358 | * Returns -1 on error. | ||
359 | * N on success. N is the number of pages that could not | ||
360 | * be moved. A return of zero means that all pages | ||
361 | * were successfully moved. Currently, two pages | ||
362 | * cannot be moved, signal handler and litmus ctrl | ||
363 | * pages. | ||
364 | * Only mc2 tasks may be configured with this system call. | ||
365 | * Use static linking to isolate all pages. | ||
366 | */ | ||
357 | asmlinkage long sys_set_page_color(int cpu) | 367 | asmlinkage long sys_set_page_color(int cpu) |
358 | { | 368 | { |
359 | long ret = 0; | 369 | long ret = 0; |
360 | //struct page *page_itr = NULL; | ||
361 | struct vm_area_struct *vma_itr = NULL; | 370 | struct vm_area_struct *vma_itr = NULL; |
362 | int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0; | 371 | int nr_pages = 0, nr_failed = 0, nr_not_migrated = 0; |
363 | unsigned long node; | 372 | unsigned long node; |
364 | enum crit_level lv; | 373 | enum crit_level lv; |
365 | struct mm_struct *mm; | 374 | struct mm_struct *mm; |
@@ -375,19 +384,13 @@ asmlinkage long sys_set_page_color(int cpu) | |||
375 | mm = get_task_mm(current); | 384 | mm = get_task_mm(current); |
376 | put_task_struct(current); | 385 | put_task_struct(current); |
377 | 386 | ||
378 | //down_read(¤t->mm->mmap_sem); | ||
379 | down_read(&mm->mmap_sem); | 387 | down_read(&mm->mmap_sem); |
380 | //TRACE_TASK(current, "SYSCALL set_page_color\n"); | ||
381 | vma_itr = mm->mmap; | 388 | vma_itr = mm->mmap; |
382 | while (vma_itr != NULL) { | 389 | while (vma_itr != NULL) { |
383 | unsigned int num_pages = 0, i; | 390 | unsigned int num_pages = 0, i; |
384 | struct page *old_page = NULL; | 391 | struct page *old_page = NULL; |
385 | 392 | ||
386 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; | 393 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; |
387 | // print vma flags | ||
388 | //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); | ||
389 | //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); | ||
390 | //printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot); | ||
391 | for (i = 0; i < num_pages; i++) { | 394 | for (i = 0; i < num_pages; i++) { |
392 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); | 395 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); |
393 | 396 | ||
@@ -401,30 +404,15 @@ asmlinkage long sys_set_page_color(int cpu) | |||
401 | put_page(old_page); | 404 | put_page(old_page); |
402 | continue; | 405 | continue; |
403 | } | 406 | } |
404 | 407 | ret = isolate_lru_page(old_page); | |
405 | //TRACE_TASK(current, "addr: %08x, pfn: %x, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); | 408 | if (!ret) { |
406 | 409 | list_add_tail(&old_page->lru, &pagelist); | |
407 | //if (page_mapcount(old_page) == 1) { | 410 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); |
408 | ret = isolate_lru_page(old_page); | 411 | nr_pages++; |
409 | if (!ret) { | 412 | } else { |
410 | list_add_tail(&old_page->lru, &pagelist); | 413 | nr_failed++; |
411 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); | ||
412 | nr_pages++; | ||
413 | } else { | ||
414 | //TRACE_TASK(current, "isolate_lru_page failed\n"); | ||
415 | //TRACE_TASK(current, "page_lru = %d PageLRU = %d\n", page_lru(old_page), PageLRU(old_page)); | ||
416 | nr_failed++; | ||
417 | } | ||
418 | //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); | ||
419 | put_page(old_page); | ||
420 | //} | ||
421 | /* | ||
422 | else { | ||
423 | nr_shared_pages++; | ||
424 | //printk(KERN_INFO "SHARED _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); | ||
425 | put_page(old_page); | ||
426 | } | 414 | } |
427 | */ | 415 | put_page(old_page); |
428 | } | 416 | } |
429 | 417 | ||
430 | vma_itr = vma_itr->vm_next; | 418 | vma_itr = vma_itr->vm_next; |
@@ -434,7 +422,7 @@ asmlinkage long sys_set_page_color(int cpu) | |||
434 | if (tsk_rt(current)->mc2_data) | 422 | if (tsk_rt(current)->mc2_data) |
435 | lv = tsk_rt(current)->mc2_data->crit; | 423 | lv = tsk_rt(current)->mc2_data->crit; |
436 | else | 424 | else |
437 | BUG();//lv = 0; | 425 | BUG(); //lv = 0; |
438 | 426 | ||
439 | if (cpu == -1) | 427 | if (cpu == -1) |
440 | node = 8; | 428 | node = 8; |
@@ -444,34 +432,16 @@ asmlinkage long sys_set_page_color(int cpu) | |||
444 | if (!list_empty(&pagelist)) { | 432 | if (!list_empty(&pagelist)) { |
445 | ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); | 433 | ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); |
446 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); | 434 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); |
447 | printk(KERN_INFO "%ld pages not migrated.\n", ret); | ||
448 | nr_not_migrated = ret; | 435 | nr_not_migrated = ret; |
449 | if (ret) { | 436 | if (ret) { |
450 | putback_movable_pages(&pagelist); | 437 | putback_movable_pages(&pagelist); |
451 | } | 438 | } |
452 | } | 439 | } |
453 | 440 | ||
454 | /* handle sigpage and litmus ctrl_page */ | ||
455 | /* vma_itr = current->mm->mmap; | ||
456 | while (vma_itr != NULL) { | ||
457 | if (vma_itr->vm_start == tsk_rt(current)->addr_ctrl_page) { | ||
458 | TRACE("litmus ctrl_page = %08x\n", vma_itr->vm_start); | ||
459 | vma_itr->vm_page_prot = PAGE_SHARED; | ||
460 | break; | ||
461 | } | ||
462 | vma_itr = vma_itr->vm_next; | ||
463 | } | ||
464 | */ | ||
465 | up_read(&mm->mmap_sem); | 441 | up_read(&mm->mmap_sem); |
466 | 442 | ||
467 | /* | 443 | TRACE_TASK(current, "node = %ld, nr_migrated_pages = %d, nr_pages = %d nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_pages, nr_failed); |
468 | list_for_each_entry(page_itr, &shared_pagelist, lru) { | 444 | |
469 | TRACE("S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr)); | ||
470 | } | ||
471 | */ | ||
472 | TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); | ||
473 | printk(KERN_INFO "node = %ld, nr_migrated_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_failed-2, nr_failed); | ||
474 | //printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id())); | ||
475 | return ret; | 445 | return ret; |
476 | } | 446 | } |
477 | 447 | ||
@@ -479,12 +449,12 @@ asmlinkage long sys_set_page_color(int cpu) | |||
479 | asmlinkage long sys_test_call(unsigned int param) | 449 | asmlinkage long sys_test_call(unsigned int param) |
480 | { | 450 | { |
481 | long ret = 0; | 451 | long ret = 0; |
482 | unsigned long flags; | ||
483 | struct vm_area_struct *vma_itr = NULL; | 452 | struct vm_area_struct *vma_itr = NULL; |
484 | 453 | ||
485 | TRACE_CUR("test_call param = %d\n", param); | 454 | TRACE_CUR("test_call param = %d\n", param); |
486 | 455 | ||
487 | if (param == 0) { | 456 | if (param == 0) { |
457 | /* Print page information */ | ||
488 | down_read(¤t->mm->mmap_sem); | 458 | down_read(¤t->mm->mmap_sem); |
489 | vma_itr = current->mm->mmap; | 459 | vma_itr = current->mm->mmap; |
490 | while (vma_itr != NULL) { | 460 | while (vma_itr != NULL) { |
@@ -494,37 +464,12 @@ asmlinkage long sys_test_call(unsigned int param) | |||
494 | printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); | 464 | printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); |
495 | printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); | 465 | printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); |
496 | printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); | 466 | printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); |
497 | /* if (vma_itr->vm_file) { | ||
498 | struct file *fp = vma_itr->vm_file; | ||
499 | unsigned long fcount = atomic_long_read(&(fp->f_count)); | ||
500 | printk(KERN_INFO "f_count : %ld\n", fcount); | ||
501 | if (fcount > 1) { | ||
502 | vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot); | ||
503 | } | ||
504 | } | ||
505 | printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); | ||
506 | */ | ||
507 | vma_itr = vma_itr->vm_next; | 467 | vma_itr = vma_itr->vm_next; |
508 | } | 468 | } |
509 | printk(KERN_INFO "--------------------------------------------\n"); | 469 | printk(KERN_INFO "--------------------------------------------\n"); |
510 | up_read(¤t->mm->mmap_sem); | 470 | up_read(¤t->mm->mmap_sem); |
511 | |||
512 | local_irq_save(flags); | ||
513 | l2c310_flush_all(); | ||
514 | local_irq_restore(flags); | ||
515 | } | ||
516 | else if (param == 1) { | ||
517 | int i; | ||
518 | for (i = 0; i < 4; i++) { | ||
519 | lock_cache(i, 0x00003fff); | ||
520 | } | ||
521 | } | ||
522 | else if (param == 2) { | ||
523 | int i; | ||
524 | for (i = 0; i < 4; i++) { | ||
525 | lock_cache(i, 0xffffffff); | ||
526 | } | ||
527 | } | 471 | } |
472 | |||
528 | return ret; | 473 | return ret; |
529 | } | 474 | } |
530 | 475 | ||
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 7f6fefff0a3b..d7cf3fb83296 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Implementation of the Mixed-Criticality on MultiCore scheduler | 4 | * Implementation of the Mixed-Criticality on MultiCore scheduler |
5 | * | 5 | * |
6 | * Thus plugin implements a scheduling algorithm proposed in | 6 | * This plugin implements a scheduling algorithm proposed in |
7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. | 7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. |
8 | */ | 8 | */ |
9 | 9 | ||
@@ -29,19 +29,12 @@ | |||
29 | #include <litmus/reservation.h> | 29 | #include <litmus/reservation.h> |
30 | #include <litmus/polling_reservations.h> | 30 | #include <litmus/polling_reservations.h> |
31 | 31 | ||
32 | #ifdef CONFIG_PGMRT_SUPPORT | ||
33 | #include <litmus/pgm.h> | ||
34 | #endif | ||
35 | |||
36 | //#define TRACE(fmt, args...) do {} while (false) | ||
37 | //#define TRACE_TASK(fmt, args...) do {} while (false) | ||
38 | |||
39 | #define BUDGET_ENFORCEMENT_AT_C 0 | 32 | #define BUDGET_ENFORCEMENT_AT_C 0 |
40 | 33 | ||
41 | extern atomic_t num_sync_released; | 34 | extern atomic_t num_sync_released; |
42 | extern void do_partition(enum crit_level lv, int cpu); | 35 | extern void do_partition(enum crit_level lv, int cpu); |
43 | 36 | ||
44 | /* _global_env - reservation container for level-C tasks*/ | 37 | /* _global_env - reservation container for level-C tasks */ |
45 | struct gmp_reservation_environment _global_env_modes[NR_MODES]; | 38 | struct gmp_reservation_environment _global_env_modes[NR_MODES]; |
46 | struct gmp_reservation_environment *_global_env; | 39 | struct gmp_reservation_environment *_global_env; |
47 | raw_spinlock_t global_lock; | 40 | raw_spinlock_t global_lock; |
@@ -55,7 +48,7 @@ struct cpu_entry { | |||
55 | int cpu; | 48 | int cpu; |
56 | enum crit_level lv; | 49 | enum crit_level lv; |
57 | /* if will_schedule is true, this cpu is already selected and | 50 | /* if will_schedule is true, this cpu is already selected and |
58 | call mc2_schedule() soon. */ | 51 | mc2_schedule() will be executed soon. */ |
59 | bool will_schedule; | 52 | bool will_schedule; |
60 | }; | 53 | }; |
61 | 54 | ||
@@ -69,6 +62,7 @@ struct cpu_priority _lowest_prio_cpu; | |||
69 | 62 | ||
70 | /* mc2_task_state - a task state structure */ | 63 | /* mc2_task_state - a task state structure */ |
71 | struct mc2_task_state { | 64 | struct mc2_task_state { |
65 | /* A task can be shared by multiple modes */ | ||
72 | struct task_client res_info[NR_MODES]; | 66 | struct task_client res_info[NR_MODES]; |
73 | /* if cpu == -1, this task is a global task (level C) */ | 67 | /* if cpu == -1, this task is a global task (level C) */ |
74 | int cpu; | 68 | int cpu; |
@@ -78,7 +72,6 @@ struct mc2_task_state { | |||
78 | 72 | ||
79 | /* mc2_cpu_state - maintain the scheduled state and ghost jobs | 73 | /* mc2_cpu_state - maintain the scheduled state and ghost jobs |
80 | * timer : timer for partitioned tasks (level A and B) | 74 | * timer : timer for partitioned tasks (level A and B) |
81 | * g_timer : timer for global tasks (level C) | ||
82 | */ | 75 | */ |
83 | struct mc2_cpu_state { | 76 | struct mc2_cpu_state { |
84 | raw_spinlock_t lock; | 77 | raw_spinlock_t lock; |
@@ -89,21 +82,16 @@ struct mc2_cpu_state { | |||
89 | 82 | ||
90 | int cpu; | 83 | int cpu; |
91 | struct task_struct* scheduled; | 84 | struct task_struct* scheduled; |
92 | //struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | ||
93 | //bool spin_flag; //not used on cpu 0 | ||
94 | }; | 85 | }; |
95 | 86 | ||
96 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state); | 87 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state); |
97 | 88 | ||
98 | static int resched_cpu[NR_CPUS]; | 89 | static int resched_cpu[NR_CPUS]; |
99 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); | 90 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); |
100 | //level_a_priorities unused | ||
101 | //static int level_a_priorities[NR_CPUS]; | ||
102 | 91 | ||
103 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) | 92 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) |
104 | #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) | 93 | #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) |
105 | 94 | ||
106 | |||
107 | unsigned int mode; //currently executing mode, from 0 to NR_MODES-1 | 95 | unsigned int mode; //currently executing mode, from 0 to NR_MODES-1 |
108 | unsigned int requested_mode; //The pending mode | 96 | unsigned int requested_mode; //The pending mode |
109 | /* Prevent multiple requests from entering and prevent request from entering while old | 97 | /* Prevent multiple requests from entering and prevent request from entering while old |
@@ -118,6 +106,8 @@ bool cpu_0_task_exist; | |||
118 | bool mode_changed; | 106 | bool mode_changed; |
119 | bool mode_poll_exited; | 107 | bool mode_poll_exited; |
120 | static DEFINE_PER_CPU(unsigned long, mode_counter); | 108 | static DEFINE_PER_CPU(unsigned long, mode_counter); |
109 | |||
110 | /* Mode change macros */ | ||
121 | #define local_mode_counter() (this_cpu_ptr(&mode_counter)) | 111 | #define local_mode_counter() (this_cpu_ptr(&mode_counter)) |
122 | #define cpu_0_mode_counter() (&per_cpu(mode_counter, 0)) | 112 | #define cpu_0_mode_counter() (&per_cpu(mode_counter, 0)) |
123 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) | 113 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) |
@@ -135,7 +125,6 @@ asmlinkage long sys_enact_mode(void) | |||
135 | struct reservation *res; | 125 | struct reservation *res; |
136 | struct list_head *pos; | 126 | struct list_head *pos; |
137 | unsigned long flags; | 127 | unsigned long flags; |
138 | TRACE_TASK(current, "ENACTING SYSCALL\n"); | ||
139 | if (state->cpu == 0 && !mode_poll_exited){ | 128 | if (state->cpu == 0 && !mode_poll_exited){ |
140 | unsigned long *other_cpu_counter; | 129 | unsigned long *other_cpu_counter; |
141 | unsigned long cpu0_val = this_cpu_read(mode_counter); | 130 | unsigned long cpu0_val = this_cpu_read(mode_counter); |
@@ -149,7 +138,6 @@ asmlinkage long sys_enact_mode(void) | |||
149 | mode_changed = false; | 138 | mode_changed = false; |
150 | if (pending){ //MCR has entered | 139 | if (pending){ //MCR has entered |
151 | raw_spin_lock_irqsave(&state->lock, flags); | 140 | raw_spin_lock_irqsave(&state->lock, flags); |
152 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
153 | raw_spin_lock(&global_lock); | 141 | raw_spin_lock(&global_lock); |
154 | raw_spin_lock(&mode_lock); | 142 | raw_spin_lock(&mode_lock); |
155 | 143 | ||
@@ -208,13 +196,11 @@ asmlinkage long sys_enact_mode(void) | |||
208 | } | 196 | } |
209 | if( ready ){ //C is throttled | 197 | if( ready ){ //C is throttled |
210 | lt_t new_mode_basetime = get_release(current); | 198 | lt_t new_mode_basetime = get_release(current); |
211 | //TRACE("Timer canceled\n"); | ||
212 | //hrtimer_cancel(&state->timer);//stop listening to old mode timers | ||
213 | mode = requested_mode; | 199 | mode = requested_mode; |
214 | TRACE("Mode has been changed.\n"); | 200 | TRACE("Mode has been changed.\n"); |
215 | mode_changed = true; | 201 | mode_changed = true; |
216 | _global_env = &_global_env_modes[mode]; | 202 | _global_env = &_global_env_modes[mode]; |
217 | //set res->reported for new global tasks | 203 | /* set res->reported for new global tasks */ |
218 | list_for_each(pos, &_global_env->active_reservations){ | 204 | list_for_each(pos, &_global_env->active_reservations){ |
219 | res = list_entry(pos, struct reservation, list); | 205 | res = list_entry(pos, struct reservation, list); |
220 | release_at(res->tsk, new_mode_basetime); | 206 | release_at(res->tsk, new_mode_basetime); |
@@ -231,7 +217,6 @@ asmlinkage long sys_enact_mode(void) | |||
231 | res->reported = 0; | 217 | res->reported = 0; |
232 | } | 218 | } |
233 | gmp_update_time(_global_env, litmus_clock()); | 219 | gmp_update_time(_global_env, litmus_clock()); |
234 | //raw_spin_lock(&state->lock); | ||
235 | 220 | ||
236 | state->sup_env = &state->sup_env_modes[mode]; | 221 | state->sup_env = &state->sup_env_modes[mode]; |
237 | list_for_each(pos, &state->sup_env->active_reservations){ | 222 | list_for_each(pos, &state->sup_env->active_reservations){ |
@@ -247,57 +232,33 @@ asmlinkage long sys_enact_mode(void) | |||
247 | release_at(res->tsk, new_mode_basetime); | 232 | release_at(res->tsk, new_mode_basetime); |
248 | } | 233 | } |
249 | sup_update_time(state->sup_env, litmus_clock()); | 234 | sup_update_time(state->sup_env, litmus_clock()); |
250 | //raw_spin_unlock(&state->lock); | ||
251 | sched_trace_enact_mode(current); | 235 | sched_trace_enact_mode(current); |
252 | TRACE("ENACT\n"); | 236 | TRACE("ENACT\n"); |
253 | } | 237 | } |
254 | raw_spin_unlock(&mode_lock); | 238 | raw_spin_unlock(&mode_lock); |
255 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
256 | raw_spin_unlock(&global_lock); | 239 | raw_spin_unlock(&global_lock); |
257 | //raw_spin_unlock(&state->lock); | ||
258 | raw_spin_unlock_irqrestore(&state->lock, flags); | 240 | raw_spin_unlock_irqrestore(&state->lock, flags); |
259 | 241 | ||
260 | raw_spin_lock(&state->lock); | 242 | raw_spin_lock(&state->lock); |
261 | mc2_update_timer_and_unlock(state); | 243 | mc2_update_timer_and_unlock(state); |
262 | } | 244 | } |
263 | this_cpu_inc(mode_counter); | 245 | this_cpu_inc(mode_counter); |
264 | //local_irq_restore(flags); | ||
265 | //cpu_0_spin_flag = !cpu_0_spin_flag; | ||
266 | } | 246 | } |
267 | else if (!mode_poll_exited) { | 247 | else if (!mode_poll_exited) { |
268 | unsigned long *cpu0_counter = cpu_0_mode_counter(); | 248 | unsigned long *cpu0_counter = cpu_0_mode_counter(); |
269 | unsigned long my_val; | 249 | unsigned long my_val; |
270 | //int timeout = 0; | ||
271 | this_cpu_inc(mode_counter); | 250 | this_cpu_inc(mode_counter); |
272 | my_val = this_cpu_read(mode_counter); | 251 | my_val = this_cpu_read(mode_counter); |
273 | //spin, wait for CPU 0 to stabilize mode decision | 252 | //spin, wait for CPU 0 to stabilize mode decision |
274 | //before scheduling next hyperperiod | 253 | //before scheduling next hyperperiod |
275 | //TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); | 254 | |
276 | /* | ||
277 | if (state->spin_flag) { | ||
278 | while(cpu_0_spin_flag) | ||
279 | udelay(1); | ||
280 | } | ||
281 | else { | ||
282 | while(!cpu_0_spin_flag) | ||
283 | udelay(1); | ||
284 | } | ||
285 | */ | ||
286 | //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); | ||
287 | while (*cpu0_counter < my_val && !mode_poll_exited){ | 255 | while (*cpu0_counter < my_val && !mode_poll_exited){ |
288 | udelay(1); | 256 | udelay(1); |
289 | //if (timeout++ > 1000){ | ||
290 | // if (!cpu_0_task_exist){ | ||
291 | // break; | ||
292 | // } | ||
293 | // timeout = 0; | ||
294 | //} | ||
295 | } | 257 | } |
296 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); | 258 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); |
297 | //local_irq_save(flags); | ||
298 | if (mode_changed) { | 259 | if (mode_changed) { |
299 | lt_t new_mode_basetime = get_release(current); | 260 | lt_t new_mode_basetime = get_release(current); |
300 | //TRACE("CPU%d mode changed\n",state->cpu); | 261 | TRACE("CPU%d mode changed\n",state->cpu); |
301 | hrtimer_cancel(&state->timer); //stop listening to old mode timers | 262 | hrtimer_cancel(&state->timer); //stop listening to old mode timers |
302 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); | 263 | TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); |
303 | raw_spin_lock_irqsave(&state->lock, flags); | 264 | raw_spin_lock_irqsave(&state->lock, flags); |
@@ -319,24 +280,13 @@ asmlinkage long sys_enact_mode(void) | |||
319 | 280 | ||
320 | raw_spin_lock(&state->lock); | 281 | raw_spin_lock(&state->lock); |
321 | mc2_update_timer_and_unlock(state); | 282 | mc2_update_timer_and_unlock(state); |
322 | //local_irq_restore(flags); | ||
323 | 283 | ||
324 | } | 284 | } |
325 | //state->spin_flag = !state->spin_flag; | ||
326 | } | 285 | } |
327 | else { | 286 | else { |
328 | //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); | 287 | TRACE("CPU%d exits sys_enact_mode(). No cpu_0_task_exist.%d\n",state->cpu, mode_changed); |
329 | return 0; | 288 | return 0; |
330 | } | 289 | } |
331 | TRACE("CPU%d enact syscall ends m_c? %d new_mode %d\n",state->cpu, mode_changed, mode); | ||
332 | //if mode didn't change this has no effect on what's being scheduled | ||
333 | //raw_spin_lock(&state->lock); | ||
334 | //state->sup_env = &state->sup_env_modes[mode]; | ||
335 | //raw_spin_unlock(&state->lock); | ||
336 | //sup_update_time(state->sup_env, litmus_clock()); | ||
337 | //raw_spin_lock(&state->lock); | ||
338 | //mc2_update_timer_and_unlock(state); | ||
339 | TRACE("is timer active? %d remaining %llu\n",hrtimer_active(&state->timer), hrtimer_get_remaining(&state->timer)); | ||
340 | 290 | ||
341 | return 0; | 291 | return 0; |
342 | } | 292 | } |
@@ -426,21 +376,6 @@ static enum crit_level get_task_crit_level(struct task_struct *tsk) | |||
426 | return mp->crit; | 376 | return mp->crit; |
427 | } | 377 | } |
428 | 378 | ||
429 | static int is_init_finished(struct task_struct *tsk) | ||
430 | { | ||
431 | struct mc2_task *mp; | ||
432 | |||
433 | if (!tsk || !is_realtime(tsk)) | ||
434 | return 0; | ||
435 | |||
436 | mp = tsk_rt(tsk)->mc2_data; | ||
437 | |||
438 | if (!mp) | ||
439 | return 0; | ||
440 | else | ||
441 | return mp->init_finished; | ||
442 | } | ||
443 | |||
444 | /* task_depart - remove a task from its reservation | 379 | /* task_depart - remove a task from its reservation |
445 | * If the job has remaining budget, convert it to a ghost job | 380 | * If the job has remaining budget, convert it to a ghost job |
446 | * and update crit_entries[] | 381 | * and update crit_entries[] |
@@ -450,7 +385,6 @@ static int is_init_finished(struct task_struct *tsk) | |||
450 | static void task_departs(struct task_struct *tsk, int job_complete) | 385 | static void task_departs(struct task_struct *tsk, int job_complete) |
451 | { | 386 | { |
452 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 387 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
453 | //struct mc2_cpu_state* state = local_cpu_state(); | ||
454 | struct reservation* res = NULL; | 388 | struct reservation* res = NULL; |
455 | struct reservation_client *client = NULL; | 389 | struct reservation_client *client = NULL; |
456 | int i; | 390 | int i; |
@@ -470,14 +404,6 @@ static void task_departs(struct task_struct *tsk, int job_complete) | |||
470 | res->ops->client_departs(res, client, job_complete); | 404 | res->ops->client_departs(res, client, job_complete); |
471 | } | 405 | } |
472 | 406 | ||
473 | /* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ | ||
474 | /* | ||
475 | if (job_complete) { | ||
476 | //res->cur_budget = 0; | ||
477 | } | ||
478 | */ | ||
479 | /* fix end */ | ||
480 | |||
481 | tinfo->has_departed = true; | 407 | tinfo->has_departed = true; |
482 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); | 408 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); |
483 | } | 409 | } |
@@ -557,12 +483,7 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
557 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | 483 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; |
558 | /* If a CPU will call schedule() in the near future, we don't | 484 | /* If a CPU will call schedule() in the near future, we don't |
559 | return that CPU. */ | 485 | return that CPU. */ |
560 | /* | 486 | |
561 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | ||
562 | ce->scheduled ? (ce->scheduled)->comm : "null", | ||
563 | ce->scheduled ? (ce->scheduled)->pid : 0, | ||
564 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | ||
565 | */ | ||
566 | if (!ce->will_schedule) { | 487 | if (!ce->will_schedule) { |
567 | if (!ce->scheduled) { | 488 | if (!ce->scheduled) { |
568 | /* Idle cpu, return this. */ | 489 | /* Idle cpu, return this. */ |
@@ -599,7 +520,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
599 | { | 520 | { |
600 | int local, cpus; | 521 | int local, cpus; |
601 | lt_t update, now; | 522 | lt_t update, now; |
602 | //enum crit_level lv = get_task_crit_level(state->scheduled); | ||
603 | struct next_timer_event *event, *next; | 523 | struct next_timer_event *event, *next; |
604 | int reschedule[NR_CPUS]; | 524 | int reschedule[NR_CPUS]; |
605 | unsigned long flags; | 525 | unsigned long flags; |
@@ -618,7 +538,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
618 | */ | 538 | */ |
619 | local = local_cpu_state() == state; | 539 | local = local_cpu_state() == state; |
620 | 540 | ||
621 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
622 | raw_spin_lock(&global_lock); | 541 | raw_spin_lock(&global_lock); |
623 | 542 | ||
624 | list_for_each_entry_safe(event, next, &_global_env->next_events, list) { | 543 | list_for_each_entry_safe(event, next, &_global_env->next_events, list) { |
@@ -632,18 +551,11 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
632 | if (event->timer_armed_on == NO_CPU) { | 551 | if (event->timer_armed_on == NO_CPU) { |
633 | struct reservation *res = gmp_find_by_id(_global_env, event->id); | 552 | struct reservation *res = gmp_find_by_id(_global_env, event->id); |
634 | int cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); | 553 | int cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); |
635 | //TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); | ||
636 | list_del(&event->list); | 554 | list_del(&event->list); |
637 | kfree(event); | 555 | kfree(event); |
638 | if (cpu != NO_CPU) { | 556 | if (cpu != NO_CPU) { |
639 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
640 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 557 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
641 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | 558 | reschedule[cpu] = 1; |
642 | |||
643 | //if (cpu == local_cpu_state()->cpu) | ||
644 | // litmus_reschedule_local(); | ||
645 | //else | ||
646 | reschedule[cpu] = 1; | ||
647 | } | 559 | } |
648 | } | 560 | } |
649 | } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { | 561 | } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { |
@@ -655,7 +567,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
655 | 567 | ||
656 | /* Must drop state lock before calling into hrtimer_start(), which | 568 | /* Must drop state lock before calling into hrtimer_start(), which |
657 | * may raise a softirq, which in turn may wake ksoftirqd. */ | 569 | * may raise a softirq, which in turn may wake ksoftirqd. */ |
658 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | 570 | |
659 | raw_spin_unlock(&global_lock); | 571 | raw_spin_unlock(&global_lock); |
660 | local_irq_restore(flags); | 572 | local_irq_restore(flags); |
661 | raw_spin_unlock(&state->lock); | 573 | raw_spin_unlock(&state->lock); |
@@ -663,11 +575,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
663 | if ((update <= now) || reschedule[state->cpu]) { | 575 | if ((update <= now) || reschedule[state->cpu]) { |
664 | reschedule[state->cpu] = 0; | 576 | reschedule[state->cpu] = 0; |
665 | litmus_reschedule(state->cpu); | 577 | litmus_reschedule(state->cpu); |
666 | /* | ||
667 | raw_spin_lock(&state->lock); | ||
668 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
669 | raw_spin_unlock(&state->lock); | ||
670 | */ | ||
671 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | 578 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { |
672 | /* Reprogram only if not already set correctly. */ | 579 | /* Reprogram only if not already set correctly. */ |
673 | if (!hrtimer_active(&state->timer) || | 580 | if (!hrtimer_active(&state->timer) || |
@@ -708,22 +615,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
708 | state->cpu, | 615 | state->cpu, |
709 | hrtimer_active(&state->timer), | 616 | hrtimer_active(&state->timer), |
710 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | 617 | ktime_to_ns(hrtimer_get_expires(&state->timer))); |
711 | //litmus_reschedule(state->cpu); | ||
712 | /* | ||
713 | raw_spin_lock(&state->lock); | ||
714 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
715 | raw_spin_unlock(&state->lock); | ||
716 | reschedule[state->cpu] = 0; | ||
717 | */ | ||
718 | } | ||
719 | } | ||
720 | /* | ||
721 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | ||
722 | if (reschedule[cpus]) { | ||
723 | litmus_reschedule(cpus); | ||
724 | } | 618 | } |
725 | } | 619 | } |
726 | */ | ||
727 | } | 620 | } |
728 | 621 | ||
729 | /* update_cpu_prio - Update cpu's priority | 622 | /* update_cpu_prio - Update cpu's priority |
@@ -762,7 +655,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
762 | struct mc2_cpu_state *state; | 655 | struct mc2_cpu_state *state; |
763 | lt_t update, now; | 656 | lt_t update, now; |
764 | int global_schedule_now; | 657 | int global_schedule_now; |
765 | //lt_t remain_budget; // no ghost jobs | ||
766 | int reschedule[NR_CPUS]; | 658 | int reschedule[NR_CPUS]; |
767 | int cpus; | 659 | int cpus; |
768 | 660 | ||
@@ -786,18 +678,12 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
786 | 678 | ||
787 | TRACE("Timer fired at %llu\n", litmus_clock()); | 679 | TRACE("Timer fired at %llu\n", litmus_clock()); |
788 | raw_spin_lock_irqsave(&state->lock, flags); | 680 | raw_spin_lock_irqsave(&state->lock, flags); |
789 | //raw_spin_lock(&state->lock); | ||
790 | //local_irq_save(flags); | ||
791 | now = litmus_clock(); | 681 | now = litmus_clock(); |
792 | sup_update_time(state->sup_env, now); | 682 | sup_update_time(state->sup_env, now); |
793 | 683 | ||
794 | /* 9/20/2015 fix - no ghost job | ||
795 | remain_budget = mc2_update_ghost_state(state); | ||
796 | */ | ||
797 | update = state->sup_env->next_scheduler_update; | 684 | update = state->sup_env->next_scheduler_update; |
798 | now = state->sup_env->env.current_time; | 685 | now = state->sup_env->env.current_time; |
799 | 686 | ||
800 | |||
801 | if (update <= now) { | 687 | if (update <= now) { |
802 | litmus_reschedule_local(); | 688 | litmus_reschedule_local(); |
803 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { | 689 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { |
@@ -805,7 +691,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
805 | restart = HRTIMER_RESTART; | 691 | restart = HRTIMER_RESTART; |
806 | } | 692 | } |
807 | 693 | ||
808 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
809 | raw_spin_lock(&global_lock); | 694 | raw_spin_lock(&global_lock); |
810 | global_schedule_now = gmp_update_time(_global_env, litmus_clock()); | 695 | global_schedule_now = gmp_update_time(_global_env, litmus_clock()); |
811 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); | 696 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); |
@@ -814,39 +699,24 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
814 | while (global_schedule_now--) { | 699 | while (global_schedule_now--) { |
815 | int cpu = get_lowest_prio_cpu(0); | 700 | int cpu = get_lowest_prio_cpu(0); |
816 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 701 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
817 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
818 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 702 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
819 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
820 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
821 | if (cpu == state->cpu && update > now) | 703 | if (cpu == state->cpu && update > now) |
822 | ;//litmus_reschedule_local(); | 704 | ; |
823 | else | 705 | else |
824 | reschedule[cpu] = 1; | 706 | reschedule[cpu] = 1; |
825 | } | 707 | } |
826 | } | 708 | } |
827 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
828 | raw_spin_unlock(&global_lock); | 709 | raw_spin_unlock(&global_lock); |
829 | raw_spin_unlock_irqrestore(&state->lock, flags); | 710 | raw_spin_unlock_irqrestore(&state->lock, flags); |
830 | //raw_spin_unlock(&state->lock); | ||
831 | //local_irq_restore(flags); | ||
832 | 711 | ||
833 | TS_ISR_END; | 712 | TS_ISR_END; |
834 | 713 | ||
835 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 714 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
836 | if (reschedule[cpus]) { | 715 | if (reschedule[cpus]) { |
837 | litmus_reschedule(cpus); | 716 | litmus_reschedule(cpus); |
838 | /* | ||
839 | struct mc2_cpu_state *remote_state; | ||
840 | |||
841 | remote_state = cpu_state_for(cpus); | ||
842 | raw_spin_lock(&remote_state->lock); | ||
843 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | ||
844 | raw_spin_unlock(&remote_state->lock); | ||
845 | */ | ||
846 | } | 717 | } |
847 | } | 718 | } |
848 | 719 | ||
849 | |||
850 | return restart; | 720 | return restart; |
851 | } | 721 | } |
852 | 722 | ||
@@ -898,7 +768,6 @@ static long mc2_complete_job(void) | |||
898 | int i; | 768 | int i; |
899 | state = local_cpu_state(); | 769 | state = local_cpu_state(); |
900 | raw_spin_lock(&state->lock); | 770 | raw_spin_lock(&state->lock); |
901 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
902 | raw_spin_lock(&global_lock); | 771 | raw_spin_lock(&global_lock); |
903 | for (i = 0; i < NR_MODES; i++) { | 772 | for (i = 0; i < NR_MODES; i++) { |
904 | if (in_mode(current,i) || i == 0) { | 773 | if (in_mode(current,i) || i == 0) { |
@@ -956,10 +825,6 @@ static long mc2_complete_job(void) | |||
956 | res_reported--; | 825 | res_reported--; |
957 | TRACE_CUR("RES_REPORTED = %d\n", res_reported); | 826 | TRACE_CUR("RES_REPORTED = %d\n", res_reported); |
958 | res->reported = 1; | 827 | res->reported = 1; |
959 | //Current task doesn't exist in new mode | ||
960 | //if ( !in_mode(current, requested_mode) ){ | ||
961 | // litmus_reschedule_local(); | ||
962 | //} | ||
963 | } | 828 | } |
964 | raw_spin_unlock(&mode_lock); | 829 | raw_spin_unlock(&mode_lock); |
965 | } | 830 | } |
@@ -993,17 +858,8 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
993 | { | 858 | { |
994 | struct reservation *res, *next; | 859 | struct reservation *res, *next; |
995 | struct task_struct *tsk = NULL; | 860 | struct task_struct *tsk = NULL; |
996 | //struct crit_entry *ce; | ||
997 | enum crit_level lv; | 861 | enum crit_level lv; |
998 | lt_t time_slice; | 862 | lt_t time_slice; |
999 | |||
1000 | |||
1001 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
1002 | if (res->state == RESERVATION_ACTIVE) { | ||
1003 | struct task_struct *t = res->ops->dispatch_client(res, &time_slice); | ||
1004 | TRACE_TASK(tsk, "CPU%d ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", state->cpu, res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); | ||
1005 | } | ||
1006 | } | ||
1007 | 863 | ||
1008 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 864 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
1009 | if (res->state == RESERVATION_ACTIVE) { | 865 | if (res->state == RESERVATION_ACTIVE) { |
@@ -1014,22 +870,10 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
1014 | sup_scheduler_update_after(sup_env, res->cur_budget); | 870 | sup_scheduler_update_after(sup_env, res->cur_budget); |
1015 | return tsk; | 871 | return tsk; |
1016 | } else { | 872 | } else { |
1017 | //if (!is_init_finished(tsk)) { | ||
1018 | // TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); | ||
1019 | // if (num_sync_released != 0 && mode == 0) { | ||
1020 | //ce = &state->crit_entries[lv]; | ||
1021 | sup_scheduler_update_after(sup_env, res->cur_budget); | 873 | sup_scheduler_update_after(sup_env, res->cur_budget); |
1022 | res->blocked_by_ghost = 0; | 874 | res->blocked_by_ghost = 0; |
1023 | res->is_ghost = NO_CPU; | 875 | res->is_ghost = NO_CPU; |
1024 | return tsk; | 876 | return tsk; |
1025 | /* | ||
1026 | } else if (res->mode == mode) { | ||
1027 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
1028 | res->blocked_by_ghost = 0; | ||
1029 | res->is_ghost = NO_CPU; | ||
1030 | return tsk; | ||
1031 | } | ||
1032 | */ | ||
1033 | } | 877 | } |
1034 | } | 878 | } |
1035 | } | 879 | } |
@@ -1042,19 +886,10 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
1042 | { | 886 | { |
1043 | struct reservation *res, *next; | 887 | struct reservation *res, *next; |
1044 | struct task_struct *tsk = NULL; | 888 | struct task_struct *tsk = NULL; |
1045 | //struct crit_entry *ce; | ||
1046 | enum crit_level lv; | 889 | enum crit_level lv; |
1047 | lt_t time_slice; | 890 | lt_t time_slice; |
1048 | 891 | ||
1049 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | 892 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { |
1050 | if (res->state == RESERVATION_ACTIVE) { | ||
1051 | struct task_struct *t = res->ops->dispatch_client(res, &time_slice); | ||
1052 | TRACE_TASK(tsk, "GLOBAL ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | //raw_spin_lock(&mode_lock); | ||
1057 | list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { | ||
1058 | BUG_ON(!res); | 893 | BUG_ON(!res); |
1059 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | 894 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { |
1060 | tsk = res->ops->dispatch_client(res, &time_slice); | 895 | tsk = res->ops->dispatch_client(res, &time_slice); |
@@ -1074,12 +909,10 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
1074 | res->is_ghost = NO_CPU; | 909 | res->is_ghost = NO_CPU; |
1075 | TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu); | 910 | TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu); |
1076 | res->scheduled_on = state->cpu; | 911 | res->scheduled_on = state->cpu; |
1077 | //raw_spin_unlock(&mode_lock); | ||
1078 | return tsk; | 912 | return tsk; |
1079 | } | 913 | } |
1080 | } | 914 | } |
1081 | } | 915 | } |
1082 | //raw_spin_unlock(&mode_lock); | ||
1083 | return NULL; | 916 | return NULL; |
1084 | } | 917 | } |
1085 | 918 | ||
@@ -1126,7 +959,7 @@ static inline void post_schedule(struct task_struct *next, int cpu) | |||
1126 | */ | 959 | */ |
1127 | static struct task_struct* mc2_schedule(struct task_struct * prev) | 960 | static struct task_struct* mc2_schedule(struct task_struct * prev) |
1128 | { | 961 | { |
1129 | int np, blocks, exists, to_schedule; | 962 | int np, blocks, exists; |
1130 | /* next == NULL means "schedule background work". */ | 963 | /* next == NULL means "schedule background work". */ |
1131 | lt_t now = litmus_clock(); | 964 | lt_t now = litmus_clock(); |
1132 | struct mc2_cpu_state *state = local_cpu_state(); | 965 | struct mc2_cpu_state *state = local_cpu_state(); |
@@ -1138,11 +971,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1138 | BUG_ON(state->scheduled && state->scheduled != prev); | 971 | BUG_ON(state->scheduled && state->scheduled != prev); |
1139 | BUG_ON(state->scheduled && !is_realtime(prev)); | 972 | BUG_ON(state->scheduled && !is_realtime(prev)); |
1140 | 973 | ||
1141 | //if (state->scheduled && state->scheduled != prev) | ||
1142 | // printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | ||
1143 | //if (state->scheduled && !is_realtime(prev)) | ||
1144 | // printk(KERN_ALERT "BUG2!!!!!!!! \n"); | ||
1145 | |||
1146 | /* (0) Determine state */ | 974 | /* (0) Determine state */ |
1147 | exists = state->scheduled != NULL; | 975 | exists = state->scheduled != NULL; |
1148 | blocks = exists && !is_current_running(); | 976 | blocks = exists && !is_current_running(); |
@@ -1151,32 +979,13 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1151 | /* update time */ | 979 | /* update time */ |
1152 | state->sup_env->will_schedule = true; | 980 | state->sup_env->will_schedule = true; |
1153 | sup_update_time(state->sup_env, now); | 981 | sup_update_time(state->sup_env, now); |
1154 | /* 9/20/2015 fix */ | 982 | |
1155 | //raw_spin_lock(&_global_env.lock); | ||
1156 | //to_schedule = gmp_update_time(&_global_env, now); | ||
1157 | //raw_spin_unlock(&_global_env.lock); | ||
1158 | |||
1159 | /* 9/20/2015 fix | ||
1160 | mc2_update_ghost_state(state); | ||
1161 | */ | ||
1162 | |||
1163 | /* remove task from reservation if it blocks */ | ||
1164 | /* | ||
1165 | if (is_realtime(prev) && !is_running(prev)) { | ||
1166 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
1167 | raw_spin_lock(&_global_env.lock); | ||
1168 | task_departs(prev, is_completed(prev)); | ||
1169 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
1170 | raw_spin_unlock(&_global_env.lock); | ||
1171 | }*/ | ||
1172 | if (is_realtime(current) && blocks) { | 983 | if (is_realtime(current) && blocks) { |
1173 | if (get_task_crit_level(current) == CRIT_LEVEL_C){ | 984 | if (get_task_crit_level(current) == CRIT_LEVEL_C){ |
1174 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1175 | raw_spin_lock(&global_lock); | 985 | raw_spin_lock(&global_lock); |
1176 | } | 986 | } |
1177 | task_departs(current, is_completed(current)); | 987 | task_departs(current, is_completed(current)); |
1178 | if (get_task_crit_level(current) == CRIT_LEVEL_C){ | 988 | if (get_task_crit_level(current) == CRIT_LEVEL_C){ |
1179 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1180 | raw_spin_unlock(&global_lock); | 989 | raw_spin_unlock(&global_lock); |
1181 | } | 990 | } |
1182 | } | 991 | } |
@@ -1186,7 +995,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1186 | state->scheduled = mc2_dispatch(state->sup_env, state); | 995 | state->scheduled = mc2_dispatch(state->sup_env, state); |
1187 | 996 | ||
1188 | if (!state->scheduled) { | 997 | if (!state->scheduled) { |
1189 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1190 | raw_spin_lock(&global_lock); | 998 | raw_spin_lock(&global_lock); |
1191 | if (is_realtime(prev)) | 999 | if (is_realtime(prev)) |
1192 | gmp_update_time(_global_env, now); | 1000 | gmp_update_time(_global_env, now); |
@@ -1200,26 +1008,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1200 | update_cpu_prio(state); | 1008 | update_cpu_prio(state); |
1201 | raw_spin_unlock(&global_lock); | 1009 | raw_spin_unlock(&global_lock); |
1202 | } | 1010 | } |
1203 | /* | ||
1204 | if (!state->scheduled) { | ||
1205 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); | ||
1206 | //to_schedule = gmp_update_time(_global_env, now); | ||
1207 | state->scheduled = mc2_global_dispatch(state); | ||
1208 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1209 | update_cpu_prio(state); | ||
1210 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); | ||
1211 | } else { | ||
1212 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); | ||
1213 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1214 | update_cpu_prio(state); | ||
1215 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); | ||
1216 | } | ||
1217 | */ | ||
1218 | |||
1219 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
1220 | //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
1221 | //update_cpu_prio(state); | ||
1222 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
1223 | 1011 | ||
1224 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | 1012 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ |
1225 | sched_state_task_picked(); | 1013 | sched_state_task_picked(); |
@@ -1235,41 +1023,22 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1235 | struct mc2_task_state* tinfo = get_mc2_state(prev); | 1023 | struct mc2_task_state* tinfo = get_mc2_state(prev); |
1236 | struct reservation* res = tinfo->res_info[mode].client.reservation; | 1024 | struct reservation* res = tinfo->res_info[mode].client.reservation; |
1237 | if (res) { | 1025 | if (res) { |
1238 | TRACE_TASK(prev, "PREV JOB of mode %d was scheduled_on = P%d\n", mode, res->scheduled_on); | ||
1239 | res->scheduled_on = NO_CPU; | 1026 | res->scheduled_on = NO_CPU; |
1240 | } | 1027 | } |
1241 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); | 1028 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); |
1242 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | 1029 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ |
1243 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | 1030 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { |
1244 | int cpu; | 1031 | int cpu; |
1245 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1246 | raw_spin_lock(&global_lock); | 1032 | raw_spin_lock(&global_lock); |
1247 | cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); | 1033 | cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); |
1248 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 1034 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
1249 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
1250 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 1035 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
1251 | resched_cpu[cpu] = 1; | 1036 | resched_cpu[cpu] = 1; |
1252 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
1253 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | ||
1254 | } | 1037 | } |
1255 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1256 | raw_spin_unlock(&global_lock); | 1038 | raw_spin_unlock(&global_lock); |
1257 | } | 1039 | } |
1258 | } | 1040 | } |
1259 | 1041 | ||
1260 | /* | ||
1261 | if (to_schedule != 0) { | ||
1262 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); | ||
1263 | while (to_schedule--) { | ||
1264 | int cpu = get_lowest_prio_cpu(0); | ||
1265 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | ||
1266 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
1267 | resched_cpu[cpu] = 1; | ||
1268 | } | ||
1269 | } | ||
1270 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); | ||
1271 | } | ||
1272 | */ | ||
1273 | post_schedule(state->scheduled, state->cpu); | 1042 | post_schedule(state->scheduled, state->cpu); |
1274 | 1043 | ||
1275 | raw_spin_unlock(&state->lock); | 1044 | raw_spin_unlock(&state->lock); |
@@ -1277,7 +1046,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1277 | TRACE_TASK(state->scheduled, "scheduled.\n"); | 1046 | TRACE_TASK(state->scheduled, "scheduled.\n"); |
1278 | } | 1047 | } |
1279 | 1048 | ||
1280 | |||
1281 | return state->scheduled; | 1049 | return state->scheduled; |
1282 | } | 1050 | } |
1283 | 1051 | ||
@@ -1308,7 +1076,6 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1308 | 1076 | ||
1309 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | 1077 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); |
1310 | 1078 | ||
1311 | //local_irq_save(flags); | ||
1312 | preempt_disable(); | 1079 | preempt_disable(); |
1313 | tinfo = get_mc2_state(tsk); | 1080 | tinfo = get_mc2_state(tsk); |
1314 | if (tinfo->cpu != -1) | 1081 | if (tinfo->cpu != -1) |
@@ -1317,41 +1084,36 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1317 | state = local_cpu_state(); | 1084 | state = local_cpu_state(); |
1318 | preempt_enable(); | 1085 | preempt_enable(); |
1319 | 1086 | ||
1320 | /* 9/20/2015 fix | ||
1321 | raw_spin_lock(&_global_env.lock); | ||
1322 | */ | ||
1323 | /* Requeue only if self-suspension was already processed. */ | 1087 | /* Requeue only if self-suspension was already processed. */ |
1324 | if (tinfo->has_departed) | 1088 | if (tinfo->has_departed) |
1325 | { | 1089 | { |
1326 | /* We don't want to consider jobs before synchronous releases */ | 1090 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE |
1091 | switch(get_task_crit_level(tsk)) { | ||
1092 | case CRIT_LEVEL_A: | ||
1093 | TS_RELEASE_LATENCY_A(get_release(tsk)); | ||
1094 | break; | ||
1095 | case CRIT_LEVEL_B: | ||
1096 | TS_RELEASE_LATENCY_B(get_release(tsk)); | ||
1097 | break; | ||
1098 | case CRIT_LEVEL_C: | ||
1099 | TS_RELEASE_LATENCY_C(get_release(tsk)); | ||
1100 | break; | ||
1101 | default: | ||
1102 | break; | ||
1103 | } | ||
1104 | #endif | ||
1105 | /* We don't want to consider jobs in the initialization mode */ | ||
1327 | if (tsk_rt(tsk)->job_params.job_no == 2) { | 1106 | if (tsk_rt(tsk)->job_params.job_no == 2) { |
1328 | /* | ||
1329 | switch(get_task_crit_level(tsk)) { | ||
1330 | case CRIT_LEVEL_A: | ||
1331 | TS_RELEASE_LATENCY_A(get_release(tsk)); | ||
1332 | break; | ||
1333 | case CRIT_LEVEL_B: | ||
1334 | TS_RELEASE_LATENCY_B(get_release(tsk)); | ||
1335 | break; | ||
1336 | case CRIT_LEVEL_C: | ||
1337 | TS_RELEASE_LATENCY_C(get_release(tsk)); | ||
1338 | break; | ||
1339 | default: | ||
1340 | break; | ||
1341 | } | ||
1342 | */ | ||
1343 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); | ||
1344 | tsk_mc2_data(tsk)->init_finished = 1; | 1107 | tsk_mc2_data(tsk)->init_finished = 1; |
1345 | atomic_dec(&num_sync_released); | 1108 | atomic_dec(&num_sync_released); |
1346 | 1109 | ||
1347 | if (atomic_read(&num_sync_released) == 0) { | 1110 | if (atomic_read(&num_sync_released) == 0) { |
1348 | lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); | 1111 | lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); |
1349 | TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n"); | ||
1350 | sys_request_mode(1); | 1112 | sys_request_mode(1); |
1351 | sched_trace_sys_start(&start); | 1113 | sched_trace_sys_start(&start); |
1352 | } | 1114 | } |
1353 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released)); | ||
1354 | } | 1115 | } |
1116 | |||
1355 | raw_spin_lock_irqsave(&state->lock, flags); | 1117 | raw_spin_lock_irqsave(&state->lock, flags); |
1356 | 1118 | ||
1357 | /* Assumption: litmus_clock() is synchronized across cores, | 1119 | /* Assumption: litmus_clock() is synchronized across cores, |
@@ -1361,18 +1123,12 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1361 | sup_update_time(state->sup_env, litmus_clock()); | 1123 | sup_update_time(state->sup_env, litmus_clock()); |
1362 | task_arrives(state, tsk); | 1124 | task_arrives(state, tsk); |
1363 | } else { | 1125 | } else { |
1364 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1365 | raw_spin_lock(&global_lock); | 1126 | raw_spin_lock(&global_lock); |
1366 | gmp_update_time(_global_env, litmus_clock()); | 1127 | gmp_update_time(_global_env, litmus_clock()); |
1367 | task_arrives(state, tsk); | 1128 | task_arrives(state, tsk); |
1368 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1369 | raw_spin_unlock(&global_lock); | 1129 | raw_spin_unlock(&global_lock); |
1370 | } | 1130 | } |
1371 | 1131 | ||
1372 | /* 9/20/2015 fix | ||
1373 | mc2_update_ghost_state(state); | ||
1374 | */ | ||
1375 | //task_arrives(state, tsk); | ||
1376 | /* NOTE: drops state->lock */ | 1132 | /* NOTE: drops state->lock */ |
1377 | TRACE_TASK(tsk, "mc2_resume()\n"); | 1133 | TRACE_TASK(tsk, "mc2_resume()\n"); |
1378 | raw_spin_unlock_irqrestore(&state->lock, flags); | 1134 | raw_spin_unlock_irqrestore(&state->lock, flags); |
@@ -1381,12 +1137,8 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1381 | mc2_update_timer_and_unlock(state); | 1137 | mc2_update_timer_and_unlock(state); |
1382 | } else { | 1138 | } else { |
1383 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | 1139 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); |
1384 | //raw_spin_unlock(&_global_env.lock); | ||
1385 | } | 1140 | } |
1386 | 1141 | ||
1387 | //local_irq_restore(flags); | ||
1388 | |||
1389 | //gmp_free_passed_event(); | ||
1390 | resume_legacy_task_model_updates(tsk); | 1142 | resume_legacy_task_model_updates(tsk); |
1391 | } | 1143 | } |
1392 | 1144 | ||
@@ -1417,8 +1169,6 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1417 | 1169 | ||
1418 | if (lv < CRIT_LEVEL_C) { | 1170 | if (lv < CRIT_LEVEL_C) { |
1419 | state = cpu_state_for(task_cpu(tsk)); | 1171 | state = cpu_state_for(task_cpu(tsk)); |
1420 | //local_irq_save(flags); | ||
1421 | //raw_spin_lock(&state->lock); | ||
1422 | raw_spin_lock_irqsave(&state->lock, flags); | 1172 | raw_spin_lock_irqsave(&state->lock, flags); |
1423 | 1173 | ||
1424 | tinfo->mc2_param.crit = mp->crit; | 1174 | tinfo->mc2_param.crit = mp->crit; |
@@ -1427,15 +1177,11 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1427 | tinfo->mc2_param.res_id = mp->res_id; | 1177 | tinfo->mc2_param.res_id = mp->res_id; |
1428 | tinfo->mc2_param.mode_mask = mp->mode_mask; | 1178 | tinfo->mc2_param.mode_mask = mp->mode_mask; |
1429 | tinfo->mc2_param.init_finished = 0; | 1179 | tinfo->mc2_param.init_finished = 0; |
1430 | // TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); | 1180 | |
1431 | |||
1432 | // TRACE_TASK(tsk, "Mode 0\n"); | ||
1433 | res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id); | 1181 | res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id); |
1434 | 1182 | ||
1435 | /* found the appropriate reservation */ | 1183 | /* found the appropriate reservation */ |
1436 | if (res) { | 1184 | if (res) { |
1437 | // TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n"); | ||
1438 | |||
1439 | /* initial values */ | 1185 | /* initial values */ |
1440 | err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); | 1186 | err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); |
1441 | } | 1187 | } |
@@ -1446,16 +1192,13 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1446 | 1192 | ||
1447 | for(i = 1; i < NR_MODES; i++){ | 1193 | for(i = 1; i < NR_MODES; i++){ |
1448 | if (!in_mode(tsk, i)){ | 1194 | if (!in_mode(tsk, i)){ |
1449 | //task not present in mode | 1195 | // task not present in mode |
1450 | continue; | 1196 | continue; |
1451 | } | 1197 | } |
1452 | // TRACE_TASK(tsk, "Mode %d\n",i); | ||
1453 | res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); | 1198 | res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); |
1454 | 1199 | ||
1455 | /* found the appropriate reservation */ | 1200 | /* found the appropriate reservation */ |
1456 | if (res) { | 1201 | if (res) { |
1457 | // TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i); | ||
1458 | |||
1459 | /* initial values */ | 1202 | /* initial values */ |
1460 | err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); | 1203 | err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); |
1461 | } | 1204 | } |
@@ -1475,18 +1218,10 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1475 | cpu_0_task_exist = true; | 1218 | cpu_0_task_exist = true; |
1476 | } | 1219 | } |
1477 | atomic_inc(&num_sync_released); | 1220 | atomic_inc(&num_sync_released); |
1478 | //raw_spin_unlock(&state->lock); | ||
1479 | //local_irq_restore(flags); | ||
1480 | raw_spin_unlock_irqrestore(&state->lock, flags); | 1221 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1481 | } else if (lv == CRIT_LEVEL_C) { | 1222 | } else if (lv == CRIT_LEVEL_C) { |
1482 | // TRACE_TASK(tsk, "Task being admitted is Level C\n"); | ||
1483 | state = local_cpu_state(); | 1223 | state = local_cpu_state(); |
1484 | raw_spin_lock_irqsave(&state->lock, flags); | 1224 | raw_spin_lock_irqsave(&state->lock, flags); |
1485 | //local_irq_save(flags); | ||
1486 | //raw_spin_lock(&state->lock); | ||
1487 | //state = local_cpu_state(); | ||
1488 | |||
1489 | //raw_spin_lock(&state->lock); | ||
1490 | 1225 | ||
1491 | tinfo->mc2_param.crit = mp->crit; | 1226 | tinfo->mc2_param.crit = mp->crit; |
1492 | tinfo->cpu = -1; | 1227 | tinfo->cpu = -1; |
@@ -1495,18 +1230,11 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1495 | tinfo->mc2_param.mode_mask = mp->mode_mask; | 1230 | tinfo->mc2_param.mode_mask = mp->mode_mask; |
1496 | tinfo->mc2_param.init_finished = 0; | 1231 | tinfo->mc2_param.init_finished = 0; |
1497 | 1232 | ||
1498 | // TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); | ||
1499 | |||
1500 | // TRACE_TASK(tsk, "Mode 0\n"); | ||
1501 | |||
1502 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1503 | raw_spin_lock(&global_lock); | 1233 | raw_spin_lock(&global_lock); |
1504 | res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); | 1234 | res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); |
1505 | 1235 | ||
1506 | /* found the appropriate reservation */ | 1236 | /* found the appropriate reservation */ |
1507 | if (res) { | 1237 | if (res) { |
1508 | // TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n"); | ||
1509 | |||
1510 | /* initial values */ | 1238 | /* initial values */ |
1511 | err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); | 1239 | err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); |
1512 | } | 1240 | } |
@@ -1522,8 +1250,6 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1522 | 1250 | ||
1523 | /* found the appropriate reservation (or vCPU) */ | 1251 | /* found the appropriate reservation (or vCPU) */ |
1524 | if (res) { | 1252 | if (res) { |
1525 | TRACE_TASK(tsk, "GMP FOUND RES ID in mode %d\n", i); | ||
1526 | |||
1527 | /* initial values */ | 1253 | /* initial values */ |
1528 | err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); | 1254 | err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); |
1529 | 1255 | ||
@@ -1548,10 +1274,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1548 | 1274 | ||
1549 | atomic_inc(&num_sync_released); | 1275 | atomic_inc(&num_sync_released); |
1550 | raw_spin_unlock_irqrestore(&state->lock, flags); | 1276 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1551 | //raw_spin_unlock(&state->lock); | ||
1552 | //local_irq_restore(flags); | ||
1553 | } | 1277 | } |
1554 | |||
1555 | 1278 | ||
1556 | if (err) | 1279 | if (err) |
1557 | kfree(tinfo); | 1280 | kfree(tinfo); |
@@ -1568,7 +1291,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1568 | { | 1291 | { |
1569 | unsigned long flags; | 1292 | unsigned long flags; |
1570 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 1293 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
1571 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); | 1294 | struct mc2_cpu_state *state; |
1572 | struct reservation *res; | 1295 | struct reservation *res; |
1573 | enum crit_level lv = get_task_crit_level(tsk); | 1296 | enum crit_level lv = get_task_crit_level(tsk); |
1574 | lt_t release = 0; | 1297 | lt_t release = 0; |
@@ -1584,54 +1307,42 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1584 | state = cpu_state_for(tinfo->cpu); | 1307 | state = cpu_state_for(tinfo->cpu); |
1585 | 1308 | ||
1586 | 1309 | ||
1587 | /* acquire the lock protecting the state and disable interrupts */ | ||
1588 | //raw_spin_lock(&_global_env.lock); | ||
1589 | //raw_spin_lock(&state->lock); | ||
1590 | if (is_running) { | 1310 | if (is_running) { |
1591 | state->scheduled = tsk; | 1311 | state->scheduled = tsk; |
1592 | /* make sure this task should actually be running */ | 1312 | /* make sure this task should actually be running */ |
1593 | litmus_reschedule_local(); | 1313 | litmus_reschedule_local(); |
1594 | } | 1314 | } |
1595 | 1315 | ||
1316 | /* acquire the lock protecting the state and disable interrupts */ | ||
1596 | local_irq_save(flags); | 1317 | local_irq_save(flags); |
1597 | raw_spin_lock(&state->lock); | 1318 | raw_spin_lock(&state->lock); |
1598 | 1319 | ||
1599 | if (lv == CRIT_LEVEL_C) { | 1320 | if (lv == CRIT_LEVEL_C) { |
1600 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1601 | raw_spin_lock(&global_lock); | 1321 | raw_spin_lock(&global_lock); |
1602 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); | 1322 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); |
1603 | } | 1323 | } |
1604 | else { | 1324 | else { |
1605 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); | 1325 | res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); |
1606 | } | 1326 | } |
1607 | 1327 | BUG_ON(!res); | |
1608 | //BUG_ON(!res); | 1328 | |
1609 | // the current mode doesn't have this task. | 1329 | // the current mode doesn't have this task. |
1610 | // do not update timer and set the next release time. | 1330 | // do not update timer and set the next release time. |
1611 | 1331 | ||
1612 | //res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
1613 | BUG_ON(!res); | ||
1614 | |||
1615 | if (on_runqueue || is_running) { | 1332 | if (on_runqueue || is_running) { |
1616 | /* Assumption: litmus_clock() is synchronized across cores | 1333 | /* Assumption: litmus_clock() is synchronized across cores |
1617 | * [see comment in pres_task_resume()] */ | 1334 | * [see comment in pres_task_resume()] */ |
1618 | if (lv == CRIT_LEVEL_C) { | 1335 | if (lv == CRIT_LEVEL_C) { |
1619 | gmp_update_time(_global_env, litmus_clock()); | 1336 | gmp_update_time(_global_env, litmus_clock()); |
1620 | //raw_spin_unlock(&_global_env.lock); | ||
1621 | } | 1337 | } |
1622 | else | 1338 | else |
1623 | sup_update_time(state->sup_env, litmus_clock()); | 1339 | sup_update_time(state->sup_env, litmus_clock()); |
1624 | //mc2_update_time(lv, state, litmus_clock()); | 1340 | |
1625 | /* 9/20/2015 fix | ||
1626 | mc2_update_ghost_state(state); | ||
1627 | */ | ||
1628 | task_arrives(state, tsk); | 1341 | task_arrives(state, tsk); |
1629 | if (lv == CRIT_LEVEL_C){ | 1342 | if (lv == CRIT_LEVEL_C){ |
1630 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1631 | raw_spin_unlock(&global_lock); | 1343 | raw_spin_unlock(&global_lock); |
1632 | } | 1344 | } |
1633 | /* NOTE: drops state->lock */ | 1345 | /* NOTE: drops state->lock */ |
1634 | TRACE("mc2_new()\n"); | ||
1635 | raw_spin_unlock(&state->lock); | 1346 | raw_spin_unlock(&state->lock); |
1636 | local_irq_restore(flags); | 1347 | local_irq_restore(flags); |
1637 | 1348 | ||
@@ -1639,7 +1350,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1639 | mc2_update_timer_and_unlock(state); | 1350 | mc2_update_timer_and_unlock(state); |
1640 | } else { | 1351 | } else { |
1641 | if (lv == CRIT_LEVEL_C){ | 1352 | if (lv == CRIT_LEVEL_C){ |
1642 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1643 | raw_spin_unlock(&global_lock); | 1353 | raw_spin_unlock(&global_lock); |
1644 | } | 1354 | } |
1645 | raw_spin_unlock(&state->lock); | 1355 | raw_spin_unlock(&state->lock); |
@@ -1647,14 +1357,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1647 | } | 1357 | } |
1648 | release = res->next_replenishment; | 1358 | release = res->next_replenishment; |
1649 | 1359 | ||
1650 | //local_irq_restore(flags); | ||
1651 | |||
1652 | if (!release) { | 1360 | if (!release) { |
1653 | /*TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | ||
1654 | release = res->next_replenishment; | ||
1655 | TRACE_TASK(tsk, "mc2_task_new() next_release SET! = %llu\n", release); | ||
1656 | release_at(tsk, release); | ||
1657 | */ | ||
1658 | BUG(); | 1361 | BUG(); |
1659 | } | 1362 | } |
1660 | else | 1363 | else |
@@ -1670,22 +1373,17 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1670 | struct reservation *res = NULL, *next; | 1373 | struct reservation *res = NULL, *next; |
1671 | struct sup_reservation_environment *sup_env; | 1374 | struct sup_reservation_environment *sup_env; |
1672 | int found = 0; | 1375 | int found = 0; |
1673 | //enum crit_level lv = get_task_crit_level(current); | ||
1674 | unsigned long flags; | 1376 | unsigned long flags; |
1675 | int i; | 1377 | int i; |
1676 | 1378 | ||
1677 | if (cpu == -1) { | 1379 | if (cpu == -1) { |
1678 | struct next_timer_event *event, *e_next; | 1380 | struct next_timer_event *event, *e_next; |
1679 | local_irq_save(flags); | 1381 | local_irq_save(flags); |
1680 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1681 | raw_spin_lock(&global_lock); | 1382 | raw_spin_lock(&global_lock); |
1682 | 1383 | ||
1683 | /* if the reservation is global reservation */ | 1384 | /* if the reservation is global reservation, |
1684 | //state = local_cpu_state(); | 1385 | * delete reservation id in all modes */ |
1685 | //delete reservation id in all modes | ||
1686 | for(i = 0; i < NR_MODES; i++) { | 1386 | for(i = 0; i < NR_MODES; i++) { |
1687 | //raw_spin_lock(&state->lock); | ||
1688 | |||
1689 | list_for_each_entry_safe(res, next, &_global_env_modes[i].depleted_reservations, list) { | 1387 | list_for_each_entry_safe(res, next, &_global_env_modes[i].depleted_reservations, list) { |
1690 | if (res->id == reservation_id) { | 1388 | if (res->id == reservation_id) { |
1691 | list_del(&res->list); | 1389 | list_del(&res->list); |
@@ -1715,17 +1413,14 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1715 | } | 1413 | } |
1716 | } | 1414 | } |
1717 | 1415 | ||
1718 | //raw_spin_unlock(&state->lock); | ||
1719 | list_for_each_entry_safe(event, e_next, &_global_env_modes[i].next_events, list) { | 1416 | list_for_each_entry_safe(event, e_next, &_global_env_modes[i].next_events, list) { |
1720 | if (event->id == reservation_id) { | 1417 | if (event->id == reservation_id) { |
1721 | list_del(&event->list); | 1418 | list_del(&event->list); |
1722 | TRACE("EVENT id %d deleted\n", event->id); | ||
1723 | kfree(event); | 1419 | kfree(event); |
1724 | } | 1420 | } |
1725 | } | 1421 | } |
1726 | } | 1422 | } |
1727 | 1423 | ||
1728 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1729 | raw_spin_unlock(&global_lock); | 1424 | raw_spin_unlock(&global_lock); |
1730 | local_irq_restore(flags); | 1425 | local_irq_restore(flags); |
1731 | } else { | 1426 | } else { |
@@ -1734,8 +1429,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1734 | for (i = 0; i < NR_MODES; i++){ | 1429 | for (i = 0; i < NR_MODES; i++){ |
1735 | local_irq_save(flags); | 1430 | local_irq_save(flags); |
1736 | raw_spin_lock(&state->lock); | 1431 | raw_spin_lock(&state->lock); |
1737 | |||
1738 | // res = sup_find_by_id(state->sup_env, reservation_id); | ||
1739 | sup_env = &(state->sup_env_modes[i]); | 1432 | sup_env = &(state->sup_env_modes[i]); |
1740 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | 1433 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { |
1741 | if (res->id == reservation_id) { | 1434 | if (res->id == reservation_id) { |
@@ -1750,7 +1443,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1750 | kfree(res); | 1443 | kfree(res); |
1751 | found = 1; | 1444 | found = 1; |
1752 | ret = 0; | 1445 | ret = 0; |
1753 | TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); | ||
1754 | } | 1446 | } |
1755 | } | 1447 | } |
1756 | if (!found) { | 1448 | if (!found) { |
@@ -1766,7 +1458,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1766 | kfree(res); | 1458 | kfree(res); |
1767 | found = 1; | 1459 | found = 1; |
1768 | ret = 0; | 1460 | ret = 0; |
1769 | TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); | ||
1770 | } | 1461 | } |
1771 | } | 1462 | } |
1772 | } | 1463 | } |
@@ -1783,7 +1474,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1783 | kfree(res); | 1474 | kfree(res); |
1784 | found = 1; | 1475 | found = 1; |
1785 | ret = 0; | 1476 | ret = 0; |
1786 | TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); | ||
1787 | } | 1477 | } |
1788 | } | 1478 | } |
1789 | } | 1479 | } |
@@ -1805,9 +1495,7 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1805 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 1495 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
1806 | struct mc2_cpu_state *state; | 1496 | struct mc2_cpu_state *state; |
1807 | enum crit_level lv = tinfo->mc2_param.crit; | 1497 | enum crit_level lv = tinfo->mc2_param.crit; |
1808 | //struct crit_entry* ce; | 1498 | int cpu, i; |
1809 | int cpu; | ||
1810 | int i; | ||
1811 | 1499 | ||
1812 | local_irq_save(flags); | 1500 | local_irq_save(flags); |
1813 | if (tinfo->cpu != -1) | 1501 | if (tinfo->cpu != -1) |
@@ -1819,10 +1507,6 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1819 | 1507 | ||
1820 | if (state->scheduled == tsk) | 1508 | if (state->scheduled == tsk) |
1821 | state->scheduled = NULL; | 1509 | state->scheduled = NULL; |
1822 | |||
1823 | //ce = &state->crit_entries[lv]; | ||
1824 | //if (ce->running == tsk) | ||
1825 | // ce->running = NULL; | ||
1826 | 1510 | ||
1827 | /* remove from queues */ | 1511 | /* remove from queues */ |
1828 | if (is_running(tsk)) { | 1512 | if (is_running(tsk)) { |
@@ -1832,37 +1516,24 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1832 | /* update both global and partitioned */ | 1516 | /* update both global and partitioned */ |
1833 | if (lv < CRIT_LEVEL_C) { | 1517 | if (lv < CRIT_LEVEL_C) { |
1834 | sup_update_time(state->sup_env, litmus_clock()); | 1518 | sup_update_time(state->sup_env, litmus_clock()); |
1835 | /* TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); | 1519 | } |
1836 | gmp_update_time(_global_env, litmus_clock()); | ||
1837 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); | ||
1838 | */ } | ||
1839 | else if (lv == CRIT_LEVEL_C) { | 1520 | else if (lv == CRIT_LEVEL_C) { |
1840 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1841 | raw_spin_lock(&global_lock); | 1521 | raw_spin_lock(&global_lock); |
1842 | gmp_update_time(_global_env, litmus_clock()); | 1522 | gmp_update_time(_global_env, litmus_clock()); |
1843 | //raw_spin_unlock(&_global_env.lock); | ||
1844 | } | 1523 | } |
1845 | /* 9/20/2015 fix | ||
1846 | mc2_update_ghost_state(state); | ||
1847 | */ | ||
1848 | task_departs(tsk, 0); | 1524 | task_departs(tsk, 0); |
1849 | if (lv == CRIT_LEVEL_C){ | 1525 | if (lv == CRIT_LEVEL_C){ |
1850 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1851 | raw_spin_unlock(&global_lock); | 1526 | raw_spin_unlock(&global_lock); |
1852 | } | 1527 | } |
1853 | /* NOTE: drops state->lock */ | ||
1854 | TRACE("mc2_exit()\n"); | ||
1855 | |||
1856 | atomic_dec(&num_sync_released); | 1528 | atomic_dec(&num_sync_released); |
1857 | 1529 | ||
1530 | /* NOTE: drops state->lock */ | ||
1858 | mc2_update_timer_and_unlock(state); | 1531 | mc2_update_timer_and_unlock(state); |
1859 | } else { | 1532 | } else { |
1860 | raw_spin_unlock(&state->lock); | 1533 | raw_spin_unlock(&state->lock); |
1861 | |||
1862 | } | 1534 | } |
1863 | 1535 | ||
1864 | if (lv == CRIT_LEVEL_C) { | 1536 | if (lv == CRIT_LEVEL_C) { |
1865 | //raw_spin_lock(&global_lock); | ||
1866 | raw_spin_lock(&mode_lock); | 1537 | raw_spin_lock(&mode_lock); |
1867 | for(i = 1; i < NR_MODES; i++){ | 1538 | for(i = 1; i < NR_MODES; i++){ |
1868 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) | 1539 | if ( !(tsk_mc2_data(tsk)->mode_mask & (1<<i)) ) |
@@ -1871,7 +1542,6 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1871 | } | 1542 | } |
1872 | mode_sizes[0]--; | 1543 | mode_sizes[0]--; |
1873 | raw_spin_unlock(&mode_lock); | 1544 | raw_spin_unlock(&mode_lock); |
1874 | //raw_spin_unlock(&global_lock); | ||
1875 | 1545 | ||
1876 | for_each_online_cpu(cpu) { | 1546 | for_each_online_cpu(cpu) { |
1877 | state = cpu_state_for(cpu); | 1547 | state = cpu_state_for(cpu); |
@@ -1881,20 +1551,14 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1881 | 1551 | ||
1882 | if (state->scheduled == tsk) | 1552 | if (state->scheduled == tsk) |
1883 | state->scheduled = NULL; | 1553 | state->scheduled = NULL; |
1884 | 1554 | ||
1885 | //ce = &state->crit_entries[lv]; | ||
1886 | //if (ce->running == tsk) | ||
1887 | // ce->running = NULL; | ||
1888 | |||
1889 | raw_spin_unlock(&state->lock); | 1555 | raw_spin_unlock(&state->lock); |
1890 | } | 1556 | } |
1891 | } | 1557 | } |
1892 | 1558 | ||
1893 | local_irq_restore(flags); | 1559 | local_irq_restore(flags); |
1894 | 1560 | ||
1895 | if (is_mode_poll_task(tsk)){// && (tinfo->cpu == 0)) { | 1561 | if (is_mode_poll_task(tsk)) { |
1896 | //cpu_0_spin_flag = !cpu_0_spin_flag; // release other cpu before exit. | ||
1897 | //cpu_0_task_exist = false; | ||
1898 | mode_poll_exited = true; | 1562 | mode_poll_exited = true; |
1899 | } | 1563 | } |
1900 | 1564 | ||
@@ -1911,7 +1575,6 @@ static long create_polling_reservation( | |||
1911 | struct reservation_config *config) | 1575 | struct reservation_config *config) |
1912 | { | 1576 | { |
1913 | struct mc2_cpu_state *state = NULL; | 1577 | struct mc2_cpu_state *state = NULL; |
1914 | //struct reservation* res = NULL; | ||
1915 | struct polling_reservation *pres; | 1578 | struct polling_reservation *pres; |
1916 | unsigned long flags; | 1579 | unsigned long flags; |
1917 | int use_edf = config->priority == LITMUS_NO_PRIORITY; | 1580 | int use_edf = config->priority == LITMUS_NO_PRIORITY; |
@@ -1955,7 +1618,7 @@ static long create_polling_reservation( | |||
1955 | TRACE("CREATE_POLLING_RESERVATION id %d mode %d\n", config->id, config->mode); | 1618 | TRACE("CREATE_POLLING_RESERVATION id %d mode %d\n", config->id, config->mode); |
1956 | if (config->cpu != -1) { | 1619 | if (config->cpu != -1) { |
1957 | int i, is_exist = 0; | 1620 | int i, is_exist = 0; |
1958 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | 1621 | |
1959 | state = cpu_state_for(config->cpu); | 1622 | state = cpu_state_for(config->cpu); |
1960 | raw_spin_lock_irqsave(&state->lock, flags); | 1623 | raw_spin_lock_irqsave(&state->lock, flags); |
1961 | 1624 | ||
@@ -2007,10 +1670,6 @@ static long create_polling_reservation( | |||
2007 | pres->res.blocked_by_ghost = 0; | 1670 | pres->res.blocked_by_ghost = 0; |
2008 | pres->res.is_ghost = NO_CPU; | 1671 | pres->res.is_ghost = NO_CPU; |
2009 | pres->res.mode = config->mode; | 1672 | pres->res.mode = config->mode; |
2010 | /*if (config->priority == LITMUS_MAX_PRIORITY) { | ||
2011 | level_a_priorities[config->cpu]++; | ||
2012 | pres->res.priority = level_a_priorities[config->cpu]; | ||
2013 | }*/ | ||
2014 | if (!use_edf) | 1673 | if (!use_edf) |
2015 | pres->res.priority = config->priority; | 1674 | pres->res.priority = config->priority; |
2016 | sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res); | 1675 | sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res); |
@@ -2021,7 +1680,6 @@ static long create_polling_reservation( | |||
2021 | } | 1680 | } |
2022 | 1681 | ||
2023 | raw_spin_unlock_irqrestore(&state->lock, flags); | 1682 | raw_spin_unlock_irqrestore(&state->lock, flags); |
2024 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
2025 | 1683 | ||
2026 | } else { | 1684 | } else { |
2027 | int i, is_exist = 0; | 1685 | int i, is_exist = 0; |
@@ -2062,7 +1720,6 @@ static long create_polling_reservation( | |||
2062 | pres_0->res.tsk = current; | 1720 | pres_0->res.tsk = current; |
2063 | } | 1721 | } |
2064 | 1722 | ||
2065 | |||
2066 | //force id's unique within desired mode | 1723 | //force id's unique within desired mode |
2067 | if (gmp_find_by_id(&(_global_env_modes[config->mode]), config->id)){ | 1724 | if (gmp_find_by_id(&(_global_env_modes[config->mode]), config->id)){ |
2068 | resExist = true; | 1725 | resExist = true; |
@@ -2293,18 +1950,16 @@ static void mc2_setup_domain_proc(void) | |||
2293 | 1950 | ||
2294 | static long mc2_activate_plugin(void) | 1951 | static long mc2_activate_plugin(void) |
2295 | { | 1952 | { |
2296 | int cpu;//, lv; | 1953 | int cpu; |
2297 | struct mc2_cpu_state *state; | 1954 | struct mc2_cpu_state *state; |
2298 | struct cpu_entry *ce; | 1955 | struct cpu_entry *ce; |
2299 | int i; | 1956 | int i; |
2300 | //unsigned long *cpu_counter; | ||
2301 | 1957 | ||
2302 | for(i = 0; i < NR_MODES; i++){ | 1958 | for(i = 0; i < NR_MODES; i++){ |
2303 | gmp_init(&(_global_env_modes[i])); | 1959 | gmp_init(&(_global_env_modes[i])); |
2304 | } | 1960 | } |
2305 | _global_env = &_global_env_modes[0]; | 1961 | _global_env = &_global_env_modes[0]; |
2306 | 1962 | ||
2307 | //raw_spin_lock_init(&_lowest_prio_cpu.lock); | ||
2308 | raw_spin_lock_init(&mode_lock); | 1963 | raw_spin_lock_init(&mode_lock); |
2309 | raw_spin_lock_init(&global_lock); | 1964 | raw_spin_lock_init(&global_lock); |
2310 | 1965 | ||
@@ -2314,7 +1969,6 @@ static long mc2_activate_plugin(void) | |||
2314 | TRACE("Initializing CPU%d...\n", cpu); | 1969 | TRACE("Initializing CPU%d...\n", cpu); |
2315 | 1970 | ||
2316 | resched_cpu[cpu] = 0; | 1971 | resched_cpu[cpu] = 0; |
2317 | //level_a_priorities[cpu] = 0; | ||
2318 | this_cpu_write(mode_counter, 0); | 1972 | this_cpu_write(mode_counter, 0); |
2319 | 1973 | ||
2320 | state = cpu_state_for(cpu); | 1974 | state = cpu_state_for(cpu); |
@@ -2330,11 +1984,7 @@ static long mc2_activate_plugin(void) | |||
2330 | printk(KERN_ALERT "CPU%d state->lock %p\n", cpu, &state->lock); | 1984 | printk(KERN_ALERT "CPU%d state->lock %p\n", cpu, &state->lock); |
2331 | state->cpu = cpu; | 1985 | state->cpu = cpu; |
2332 | state->scheduled = NULL; | 1986 | state->scheduled = NULL; |
2333 | //for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | 1987 | |
2334 | // struct crit_entry *cr_entry = &state->crit_entries[lv]; | ||
2335 | // cr_entry->level = lv; | ||
2336 | // cr_entry->running = NULL; | ||
2337 | //} | ||
2338 | for(i = 0; i < NR_MODES; i++){ | 1988 | for(i = 0; i < NR_MODES; i++){ |
2339 | sup_init(&(state->sup_env_modes[i])); | 1989 | sup_init(&(state->sup_env_modes[i])); |
2340 | } | 1990 | } |
@@ -2342,7 +1992,6 @@ static long mc2_activate_plugin(void) | |||
2342 | 1992 | ||
2343 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | 1993 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
2344 | state->timer.function = on_scheduling_timer; | 1994 | state->timer.function = on_scheduling_timer; |
2345 | //state->spin_flag = false; | ||
2346 | this_cpu_write(mode_counter, 0); | 1995 | this_cpu_write(mode_counter, 0); |
2347 | } | 1996 | } |
2348 | 1997 | ||
@@ -2357,8 +2006,6 @@ static long mc2_activate_plugin(void) | |||
2357 | mode_sizes[i] = 0; | 2006 | mode_sizes[i] = 0; |
2358 | } | 2007 | } |
2359 | res_reported = 0; | 2008 | res_reported = 0; |
2360 | //cpu_0_spin_flag = false; | ||
2361 | //cpu_0_task_exist = false; | ||
2362 | 2009 | ||
2363 | return 0; | 2010 | return 0; |
2364 | } | 2011 | } |
@@ -2436,7 +2083,6 @@ static long mc2_deactivate_plugin(void) | |||
2436 | raw_spin_unlock(&state->lock); | 2083 | raw_spin_unlock(&state->lock); |
2437 | } | 2084 | } |
2438 | 2085 | ||
2439 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
2440 | raw_spin_lock(&global_lock); | 2086 | raw_spin_lock(&global_lock); |
2441 | for(i = 0; i < NR_MODES; i++){ | 2087 | for(i = 0; i < NR_MODES; i++){ |
2442 | _global_env = &_global_env_modes[i]; | 2088 | _global_env = &_global_env_modes[i]; |