diff options
-rw-r--r-- | litmus/bank_proc.c | 19 | ||||
-rw-r--r-- | litmus/cache_proc.c | 52 | ||||
-rw-r--r-- | litmus/litmus.c | 110 | ||||
-rw-r--r-- | litmus/polling_reservations.c | 3 | ||||
-rw-r--r-- | litmus/reservation.c | 3 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 362 | ||||
-rw-r--r-- | mm/migrate.c | 12 | ||||
-rw-r--r-- | mm/rmap.c | 4 |
8 files changed, 53 insertions, 512 deletions
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index df9f5730ed05..2284f4fd5816 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c | |||
@@ -247,8 +247,8 @@ static int do_add_pages(void) | |||
247 | counter[color]++; | 247 | counter[color]++; |
248 | // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); | 248 | // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); |
249 | //show_nr_pages(); | 249 | //show_nr_pages(); |
250 | //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) { | 250 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) { |
251 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { | 251 | //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { |
252 | //if ( PAGES_PER_COLOR && color>=16*2) { | 252 | //if ( PAGES_PER_COLOR && color>=16*2) { |
253 | add_page_to_color_list(page); | 253 | add_page_to_color_list(page); |
254 | // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); | 254 | // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); |
@@ -304,7 +304,7 @@ out: | |||
304 | * This function should not be accessed by others directly. | 304 | * This function should not be accessed by others directly. |
305 | * | 305 | * |
306 | */ | 306 | */ |
307 | static struct page *new_alloc_page_color( unsigned long color) | 307 | static struct page *new_alloc_page_color( unsigned long color, int do_refill) |
308 | { | 308 | { |
309 | // printk("allocate new page color = %d\n", color); | 309 | // printk("allocate new page color = %d\n", color); |
310 | struct color_group *cgroup; | 310 | struct color_group *cgroup; |
@@ -333,18 +333,19 @@ static struct page *new_alloc_page_color( unsigned long color) | |||
333 | out_unlock: | 333 | out_unlock: |
334 | spin_unlock(&cgroup->lock); | 334 | spin_unlock(&cgroup->lock); |
335 | out: | 335 | out: |
336 | if( smallest_nr_pages() == 0) | 336 | if( smallest_nr_pages() == 0 && do_refill == 1) |
337 | { | 337 | { |
338 | do_add_pages(); | 338 | do_add_pages(); |
339 | // printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n"); | 339 | // printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n"); |
340 | 340 | ||
341 | } | 341 | } |
342 | |||
342 | return rPage; | 343 | return rPage; |
343 | } | 344 | } |
344 | 345 | ||
345 | struct page* get_colored_page(unsigned long color) | 346 | struct page* get_colored_page(unsigned long color) |
346 | { | 347 | { |
347 | return new_alloc_page_color(color); | 348 | return new_alloc_page_color(color, 1); |
348 | } | 349 | } |
349 | 350 | ||
350 | /* | 351 | /* |
@@ -368,12 +369,12 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | |||
368 | unsigned int color; | 369 | unsigned int color; |
369 | 370 | ||
370 | 371 | ||
371 | unsigned int idx = 0; | 372 | unsigned int idx = 0; |
373 | do { | ||
372 | idx += num_by_bitmask_index(set_partition[node], set_index[node]); | 374 | idx += num_by_bitmask_index(set_partition[node], set_index[node]); |
373 | idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]); | 375 | idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]); |
374 | //printk("node = %d, idx = %d\n", node, idx); | 376 | rPage = new_alloc_page_color(idx, 0); |
375 | 377 | } while (rPage == NULL); | |
376 | rPage = new_alloc_page_color(idx); | ||
377 | 378 | ||
378 | 379 | ||
379 | set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]); | 380 | set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]); |
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c index 87077d4366dc..15c1b0145be2 100644 --- a/litmus/cache_proc.c +++ b/litmus/cache_proc.c | |||
@@ -1006,59 +1006,9 @@ int setup_flusher_array(void) | |||
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | flusher_pages[way] = flusher_color_arr; | 1008 | flusher_pages[way] = flusher_color_arr; |
1009 | /* This is ugly. */ | ||
1010 | for (color = 0; color < MAX_NR_COLORS; color++) { | 1009 | for (color = 0; color < MAX_NR_COLORS; color++) { |
1011 | int node; | 1010 | int node; |
1012 | switch (color) { | 1011 | node = color + 112; // populate from bank 7 |
1013 | case 0: | ||
1014 | node = 48; | ||
1015 | break; | ||
1016 | case 1: | ||
1017 | node = 49; | ||
1018 | break; | ||
1019 | case 2: | ||
1020 | node = 50; | ||
1021 | break; | ||
1022 | case 3: | ||
1023 | node = 51; | ||
1024 | break; | ||
1025 | case 4: | ||
1026 | node = 68; | ||
1027 | break; | ||
1028 | case 5: | ||
1029 | node = 69; | ||
1030 | break; | ||
1031 | case 6: | ||
1032 | node = 86; | ||
1033 | break; | ||
1034 | case 7: | ||
1035 | node = 87; | ||
1036 | break; | ||
1037 | case 8: | ||
1038 | node = 88; | ||
1039 | break; | ||
1040 | case 9: | ||
1041 | node = 105; | ||
1042 | break; | ||
1043 | case 10: | ||
1044 | node = 106; | ||
1045 | break; | ||
1046 | case 11: | ||
1047 | node = 107; | ||
1048 | break; | ||
1049 | case 12: | ||
1050 | node = 108; | ||
1051 | break; | ||
1052 | case 13: | ||
1053 | node = 125; | ||
1054 | break; | ||
1055 | case 14: | ||
1056 | node = 126; | ||
1057 | break; | ||
1058 | case 15: | ||
1059 | node = 127; | ||
1060 | break; | ||
1061 | } | ||
1062 | page = get_colored_page(node); | 1012 | page = get_colored_page(node); |
1063 | if (!page) { | 1013 | if (!page) { |
1064 | printk(KERN_WARNING "no more colored pages\n"); | 1014 | printk(KERN_WARNING "no more colored pages\n"); |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 1f5e49114b2c..7fbabcee64d5 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -385,7 +385,7 @@ asmlinkage long sys_set_page_color(int cpu) | |||
385 | put_task_struct(current); | 385 | put_task_struct(current); |
386 | 386 | ||
387 | down_read(&mm->mmap_sem); | 387 | down_read(&mm->mmap_sem); |
388 | TRACE_TASK(current, "SYSCALL set_page_color\n"); | 388 | |
389 | vma_itr = mm->mmap; | 389 | vma_itr = mm->mmap; |
390 | /* Iterate all vm_area_struct */ | 390 | /* Iterate all vm_area_struct */ |
391 | while (vma_itr != NULL) { | 391 | while (vma_itr != NULL) { |
@@ -439,10 +439,6 @@ asmlinkage long sys_set_page_color(int cpu) | |||
439 | lib_page->r_pfn[cpu_i] = INVALID_PFN; | 439 | lib_page->r_pfn[cpu_i] = INVALID_PFN; |
440 | } | 440 | } |
441 | list_add_tail(&lib_page->list, &shared_lib_pages); | 441 | list_add_tail(&lib_page->list, &shared_lib_pages); |
442 | TRACE_TASK(current, "NEW PAGE %05lx ADDED.\n", lib_page->master_pfn); | ||
443 | } | ||
444 | else { | ||
445 | TRACE_TASK(current, "FOUND PAGE %05lx in the list.\n", lib_page->master_pfn); | ||
446 | } | 442 | } |
447 | 443 | ||
448 | /* add to task_shared_pagelist */ | 444 | /* add to task_shared_pagelist */ |
@@ -475,15 +471,11 @@ asmlinkage long sys_set_page_color(int cpu) | |||
475 | } | 471 | } |
476 | 472 | ||
477 | ret = 0; | 473 | ret = 0; |
478 | if (!is_realtime(current)) | 474 | lv = tsk_rt(current)->mc2_data->crit; |
475 | if (cpu == -1) | ||
479 | node = 8; | 476 | node = 8; |
480 | else { | 477 | else |
481 | lv = tsk_rt(current)->mc2_data->crit; | 478 | node = cpu*2 + lv; |
482 | if (cpu == -1) | ||
483 | node = 8; | ||
484 | else | ||
485 | node = cpu*2 + lv; | ||
486 | } | ||
487 | 479 | ||
488 | /* Migrate private pages */ | 480 | /* Migrate private pages */ |
489 | if (!list_empty(&pagelist)) { | 481 | if (!list_empty(&pagelist)) { |
@@ -511,60 +503,11 @@ asmlinkage long sys_set_page_color(int cpu) | |||
511 | printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated); | 503 | printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated); |
512 | 504 | ||
513 | flush_cache(1); | 505 | flush_cache(1); |
514 | |||
515 | /* for debug START */ | ||
516 | TRACE_TASK(current, "PSL PAGES\n"); | ||
517 | { | ||
518 | struct shared_lib_page *lpage; | ||
519 | |||
520 | rcu_read_lock(); | ||
521 | list_for_each_entry(lpage, &shared_lib_pages, list) | ||
522 | { | ||
523 | TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3], lpage->r_pfn[4]); | ||
524 | } | ||
525 | rcu_read_unlock(); | ||
526 | } | ||
527 | #if 0 | ||
528 | TRACE_TASK(current, "AFTER migration\n"); | ||
529 | down_read(&mm->mmap_sem); | ||
530 | vma_itr = mm->mmap; | ||
531 | while (vma_itr != NULL) { | ||
532 | unsigned int num_pages = 0, i; | ||
533 | struct page *old_page = NULL; | ||
534 | |||
535 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; | ||
536 | for (i = 0; i < num_pages; i++) { | ||
537 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); | ||
538 | if (IS_ERR(old_page)) | ||
539 | continue; | ||
540 | if (!old_page) | ||
541 | continue; | ||
542 | |||
543 | if (PageReserved(old_page)) { | ||
544 | TRACE("Reserved Page!\n"); | ||
545 | put_page(old_page); | ||
546 | continue; | ||
547 | } | ||
548 | |||
549 | if (page_count(old_page) - page_mapcount(old_page) == 1) { | ||
550 | put_page(old_page); | ||
551 | continue; | ||
552 | } | ||
553 | |||
554 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); | ||
555 | put_page(old_page); | ||
556 | } | ||
557 | |||
558 | vma_itr = vma_itr->vm_next; | ||
559 | } | ||
560 | up_read(&mm->mmap_sem); | ||
561 | /* for debug FIN. */ | ||
562 | #endif | ||
563 | 506 | ||
564 | return nr_not_migrated; | 507 | return nr_not_migrated; |
565 | } | 508 | } |
566 | 509 | ||
567 | /* sys_test_call() is a test system call for developing */ | 510 | /* sys_test_call() is a test system call for debugging */ |
568 | asmlinkage long sys_test_call(unsigned int param) | 511 | asmlinkage long sys_test_call(unsigned int param) |
569 | { | 512 | { |
570 | long ret = 0; | 513 | long ret = 0; |
@@ -572,6 +515,12 @@ asmlinkage long sys_test_call(unsigned int param) | |||
572 | 515 | ||
573 | TRACE_CUR("test_call param = %d\n", param); | 516 | TRACE_CUR("test_call param = %d\n", param); |
574 | 517 | ||
518 | /* if param == 0, | ||
519 | * show vm regions and the page frame numbers | ||
520 | * associated with the vm region. | ||
521 | * if param == 1, | ||
522 | * print the master list. | ||
523 | */ | ||
575 | if (param == 0) { | 524 | if (param == 0) { |
576 | down_read(¤t->mm->mmap_sem); | 525 | down_read(¤t->mm->mmap_sem); |
577 | vma_itr = current->mm->mmap; | 526 | vma_itr = current->mm->mmap; |
@@ -584,16 +533,7 @@ asmlinkage long sys_test_call(unsigned int param) | |||
584 | TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags); | 533 | TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags); |
585 | TRACE_TASK(current, "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); | 534 | TRACE_TASK(current, "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); |
586 | TRACE_TASK(current, "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); | 535 | TRACE_TASK(current, "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); |
587 | /* if (vma_itr->vm_file) { | 536 | |
588 | struct file *fp = vma_itr->vm_file; | ||
589 | unsigned long fcount = atomic_long_read(&(fp->f_count)); | ||
590 | printk(KERN_INFO "f_count : %ld\n", fcount); | ||
591 | if (fcount > 1) { | ||
592 | vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot); | ||
593 | } | ||
594 | } | ||
595 | printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); | ||
596 | */ | ||
597 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; | 537 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; |
598 | for (i = 0; i < num_pages; i++) { | 538 | for (i = 0; i < num_pages; i++) { |
599 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); | 539 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); |
@@ -616,20 +556,20 @@ asmlinkage long sys_test_call(unsigned int param) | |||
616 | } | 556 | } |
617 | TRACE_TASK(current, "------------------------------------------------------\n"); | 557 | TRACE_TASK(current, "------------------------------------------------------\n"); |
618 | up_read(¤t->mm->mmap_sem); | 558 | up_read(¤t->mm->mmap_sem); |
619 | } | 559 | } else if (param == 1) { |
620 | else if (param == 1) { | 560 | TRACE_TASK(current, "Shared pages and replicas.\n"); |
621 | int i; | 561 | { |
622 | flush_cache(1); | 562 | struct shared_lib_page *lpage; |
623 | for (i = 0; i < 4; i++) { | 563 | |
624 | lock_cache(i, 0x00003fff); | 564 | rcu_read_lock(); |
625 | } | 565 | list_for_each_entry(lpage, &shared_lib_pages, list) |
626 | } | 566 | { |
627 | else if (param == 2) { | 567 | TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3], lpage->r_pfn[4]); |
628 | int i; | 568 | } |
629 | for (i = 0; i < 4; i++) { | 569 | rcu_read_unlock(); |
630 | lock_cache(i, 0xffffffff); | ||
631 | } | 570 | } |
632 | } | 571 | } |
572 | |||
633 | return ret; | 573 | return ret; |
634 | } | 574 | } |
635 | 575 | ||
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c index 06bc1f5b9267..d44a403a60b7 100644 --- a/litmus/polling_reservations.c +++ b/litmus/polling_reservations.c | |||
@@ -4,9 +4,6 @@ | |||
4 | #include <litmus/reservation.h> | 4 | #include <litmus/reservation.h> |
5 | #include <litmus/polling_reservations.h> | 5 | #include <litmus/polling_reservations.h> |
6 | 6 | ||
7 | #define TRACE(fmt, args...) do {} while (false) | ||
8 | #define TRACE_TASK(fmt, args...) do {} while (false) | ||
9 | |||
10 | static void periodic_polling_client_arrives( | 7 | static void periodic_polling_client_arrives( |
11 | struct reservation* res, | 8 | struct reservation* res, |
12 | struct reservation_client *client | 9 | struct reservation_client *client |
diff --git a/litmus/reservation.c b/litmus/reservation.c index cdda89d4208f..5eee01ae3647 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c | |||
@@ -4,9 +4,6 @@ | |||
4 | #include <litmus/litmus.h> | 4 | #include <litmus/litmus.h> |
5 | #include <litmus/reservation.h> | 5 | #include <litmus/reservation.h> |
6 | 6 | ||
7 | #define TRACE(fmt, args...) do {} while (false) | ||
8 | #define TRACE_TASK(fmt, args...) do {} while (false) | ||
9 | |||
10 | #define BUDGET_ENFORCEMENT_AT_C 0 | 7 | #define BUDGET_ENFORCEMENT_AT_C 0 |
11 | 8 | ||
12 | void reservation_init(struct reservation *res) | 9 | void reservation_init(struct reservation *res) |
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 588f78e2107f..6c02a56959b5 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Implementation of the Mixed-Criticality on MultiCore scheduler | 4 | * Implementation of the Mixed-Criticality on MultiCore scheduler |
5 | * | 5 | * |
6 | * Thus plugin implements a scheduling algorithm proposed in | 6 | * This plugin implements a scheduling algorithm proposed in |
7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. | 7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. |
8 | */ | 8 | */ |
9 | 9 | ||
@@ -27,9 +27,6 @@ | |||
27 | #include <litmus/reservation.h> | 27 | #include <litmus/reservation.h> |
28 | #include <litmus/polling_reservations.h> | 28 | #include <litmus/polling_reservations.h> |
29 | 29 | ||
30 | #define TRACE(fmt, args...) do {} while (false) | ||
31 | #define TRACE_TASK(fmt, args...) do {} while (false) | ||
32 | |||
33 | #define BUDGET_ENFORCEMENT_AT_C 0 | 30 | #define BUDGET_ENFORCEMENT_AT_C 0 |
34 | 31 | ||
35 | extern void do_partition(enum crit_level lv, int cpu); | 32 | extern void do_partition(enum crit_level lv, int cpu); |
@@ -133,7 +130,7 @@ static enum crit_level get_task_crit_level(struct task_struct *tsk) | |||
133 | static void task_departs(struct task_struct *tsk, int job_complete) | 130 | static void task_departs(struct task_struct *tsk, int job_complete) |
134 | { | 131 | { |
135 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | 132 | struct mc2_task_state* tinfo = get_mc2_state(tsk); |
136 | //struct mc2_cpu_state* state = local_cpu_state(); | 133 | |
137 | struct reservation* res = NULL; | 134 | struct reservation* res = NULL; |
138 | struct reservation_client *client = NULL; | 135 | struct reservation_client *client = NULL; |
139 | 136 | ||
@@ -144,33 +141,15 @@ static void task_departs(struct task_struct *tsk, int job_complete) | |||
144 | BUG_ON(!res); | 141 | BUG_ON(!res); |
145 | BUG_ON(!client); | 142 | BUG_ON(!client); |
146 | 143 | ||
147 | /* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ | 144 | /* No ghost job handling, empty remaining budget */ |
148 | if (job_complete) { | 145 | if (job_complete) { |
149 | res->cur_budget = 0; | 146 | res->cur_budget = 0; |
150 | sched_trace_task_completion(tsk, 0); | 147 | sched_trace_task_completion(tsk, 0); |
151 | } | 148 | } |
152 | /* fix end */ | ||
153 | 149 | ||
154 | res->ops->client_departs(res, client, job_complete); | 150 | res->ops->client_departs(res, client, job_complete); |
155 | tinfo->has_departed = true; | 151 | tinfo->has_departed = true; |
156 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); | 152 | TRACE_TASK(tsk, "Client departs with budget %llu at %llu\n", res->cur_budget, litmus_clock()); |
157 | /* 9/18/2015 fix start - no remaining budget | ||
158 | * | ||
159 | if (job_complete && res->cur_budget) { | ||
160 | struct crit_entry* ce; | ||
161 | enum crit_level lv = tinfo->mc2_param.crit; | ||
162 | |||
163 | ce = &state->crit_entries[lv]; | ||
164 | ce->running = tsk; | ||
165 | res->is_ghost = state->cpu; | ||
166 | #if BUDGET_ENFORCEMENT_AT_C | ||
167 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
168 | #endif | ||
169 | TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); | ||
170 | } | ||
171 | * fix -end | ||
172 | */ | ||
173 | |||
174 | } | 153 | } |
175 | 154 | ||
176 | /* task_arrive - put a task into its reservation | 155 | /* task_arrive - put a task into its reservation |
@@ -188,20 +167,8 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
188 | 167 | ||
189 | tinfo->has_departed = false; | 168 | tinfo->has_departed = false; |
190 | 169 | ||
191 | switch(lv) { | ||
192 | case CRIT_LEVEL_A: | ||
193 | case CRIT_LEVEL_B: | ||
194 | TS_RELEASE_START; | ||
195 | break; | ||
196 | case CRIT_LEVEL_C: | ||
197 | TS_RELEASE_C_START; | ||
198 | break; | ||
199 | default: | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | res->ops->client_arrives(res, client); | 170 | res->ops->client_arrives(res, client); |
204 | TRACE_TASK(tsk, "CLIENT ARRIVES at %llu\n", litmus_clock()); | 171 | TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock()); |
205 | 172 | ||
206 | if (lv != NUM_CRIT_LEVELS) { | 173 | if (lv != NUM_CRIT_LEVELS) { |
207 | struct crit_entry *ce; | 174 | struct crit_entry *ce; |
@@ -210,22 +177,6 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | |||
210 | if (ce->running == tsk) | 177 | if (ce->running == tsk) |
211 | ce->running = NULL; | 178 | ce->running = NULL; |
212 | } | 179 | } |
213 | /* do we need this?? | ||
214 | if (resched_cpu[state->cpu]) | ||
215 | litmus_reschedule(state->cpu); | ||
216 | */ | ||
217 | |||
218 | switch(lv) { | ||
219 | case CRIT_LEVEL_A: | ||
220 | case CRIT_LEVEL_B: | ||
221 | TS_RELEASE_END; | ||
222 | break; | ||
223 | case CRIT_LEVEL_C: | ||
224 | TS_RELEASE_C_END; | ||
225 | break; | ||
226 | default: | ||
227 | break; | ||
228 | } | ||
229 | } | 180 | } |
230 | 181 | ||
231 | /* get_lowest_prio_cpu - return the lowest priority cpu | 182 | /* get_lowest_prio_cpu - return the lowest priority cpu |
@@ -239,10 +190,8 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
239 | int cpu, ret = NO_CPU; | 190 | int cpu, ret = NO_CPU; |
240 | lt_t latest_deadline = 0; | 191 | lt_t latest_deadline = 0; |
241 | 192 | ||
242 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
243 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | 193 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; |
244 | if (!ce->will_schedule && !ce->scheduled) { | 194 | if (!ce->will_schedule && !ce->scheduled) { |
245 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
246 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | 195 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); |
247 | return ce->cpu; | 196 | return ce->cpu; |
248 | } else { | 197 | } else { |
@@ -260,7 +209,6 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
260 | if (!ce->will_schedule) { | 209 | if (!ce->will_schedule) { |
261 | if (!ce->scheduled) { | 210 | if (!ce->scheduled) { |
262 | /* Idle cpu, return this. */ | 211 | /* Idle cpu, return this. */ |
263 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
264 | TRACE("CPU %d is the lowest!\n", ce->cpu); | 212 | TRACE("CPU %d is the lowest!\n", ce->cpu); |
265 | return ce->cpu; | 213 | return ce->cpu; |
266 | } else if (ce->lv == CRIT_LEVEL_C && | 214 | } else if (ce->lv == CRIT_LEVEL_C && |
@@ -270,8 +218,6 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
270 | } | 218 | } |
271 | } | 219 | } |
272 | } | 220 | } |
273 | |||
274 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
275 | 221 | ||
276 | if (priority >= latest_deadline) | 222 | if (priority >= latest_deadline) |
277 | ret = NO_CPU; | 223 | ret = NO_CPU; |
@@ -281,36 +227,6 @@ static int get_lowest_prio_cpu(lt_t priority) | |||
281 | return ret; | 227 | return ret; |
282 | } | 228 | } |
283 | 229 | ||
284 | /* mc2_update_time - update time for a given criticality level. | ||
285 | * caller must hold a proper lock | ||
286 | * (cpu_state lock or global lock) | ||
287 | */ | ||
288 | /* 9/24/2015 temporally not using | ||
289 | static void mc2_update_time(enum crit_level lv, | ||
290 | struct mc2_cpu_state *state, lt_t time) | ||
291 | { | ||
292 | int global_schedule_now; | ||
293 | |||
294 | if (lv < CRIT_LEVEL_C) | ||
295 | sup_update_time(&state->sup_env, time); | ||
296 | else if (lv == CRIT_LEVEL_C) { | ||
297 | global_schedule_now = gmp_update_time(&_global_env, time); | ||
298 | while (global_schedule_now--) { | ||
299 | int cpu = get_lowest_prio_cpu(0); | ||
300 | if (cpu != NO_CPU) { | ||
301 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
302 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
303 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
304 | TRACE("LOWEST CPU = P%d\n", cpu); | ||
305 | litmus_reschedule(cpu); | ||
306 | } | ||
307 | } | ||
308 | } | ||
309 | else | ||
310 | TRACE("update_time(): Criticality level error!!!!\n"); | ||
311 | } | ||
312 | */ | ||
313 | |||
314 | /* NOTE: drops state->lock */ | 230 | /* NOTE: drops state->lock */ |
315 | /* mc2_update_timer_and_unlock - set a timer and g_timer and unlock | 231 | /* mc2_update_timer_and_unlock - set a timer and g_timer and unlock |
316 | * Whenever res_env.current_time is updated, | 232 | * Whenever res_env.current_time is updated, |
@@ -324,7 +240,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
324 | { | 240 | { |
325 | int local, cpus; | 241 | int local, cpus; |
326 | lt_t update, now; | 242 | lt_t update, now; |
327 | //enum crit_level lv = get_task_crit_level(state->scheduled); | ||
328 | struct next_timer_event *event, *next; | 243 | struct next_timer_event *event, *next; |
329 | int reschedule[NR_CPUS]; | 244 | int reschedule[NR_CPUS]; |
330 | 245 | ||
@@ -357,9 +272,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
357 | list_del(&event->list); | 272 | list_del(&event->list); |
358 | kfree(event); | 273 | kfree(event); |
359 | if (cpu != NO_CPU) { | 274 | if (cpu != NO_CPU) { |
360 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
361 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 275 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
362 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
363 | if (cpu == local_cpu_state()->cpu) | 276 | if (cpu == local_cpu_state()->cpu) |
364 | litmus_reschedule_local(); | 277 | litmus_reschedule_local(); |
365 | else | 278 | else |
@@ -381,11 +294,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
381 | if (update <= now || reschedule[state->cpu]) { | 294 | if (update <= now || reschedule[state->cpu]) { |
382 | reschedule[state->cpu] = 0; | 295 | reschedule[state->cpu] = 0; |
383 | litmus_reschedule(state->cpu); | 296 | litmus_reschedule(state->cpu); |
384 | /* | ||
385 | raw_spin_lock(&state->lock); | ||
386 | preempt_if_preemptable(state->scheduled, state->cpu); | ||
387 | raw_spin_unlock(&state->lock); | ||
388 | */ | ||
389 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | 297 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { |
390 | /* Reprogram only if not already set correctly. */ | 298 | /* Reprogram only if not already set correctly. */ |
391 | if (!hrtimer_active(&state->timer) || | 299 | if (!hrtimer_active(&state->timer) || |
@@ -428,7 +336,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
428 | state->cpu, | 336 | state->cpu, |
429 | hrtimer_active(&state->timer), | 337 | hrtimer_active(&state->timer), |
430 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | 338 | ktime_to_ns(hrtimer_get_expires(&state->timer))); |
431 | //litmus_reschedule(state->cpu); | ||
432 | raw_spin_lock(&state->lock); | 339 | raw_spin_lock(&state->lock); |
433 | preempt_if_preemptable(state->scheduled, state->cpu); | 340 | preempt_if_preemptable(state->scheduled, state->cpu); |
434 | raw_spin_unlock(&state->lock); | 341 | raw_spin_unlock(&state->lock); |
@@ -438,85 +345,10 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
438 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 345 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
439 | if (reschedule[cpus]) { | 346 | if (reschedule[cpus]) { |
440 | litmus_reschedule(cpus); | 347 | litmus_reschedule(cpus); |
441 | /* | ||
442 | struct mc2_cpu_state *remote_state; | ||
443 | remote_state = cpu_state_for(cpus); | ||
444 | raw_spin_lock(&remote_state->lock); | ||
445 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | ||
446 | raw_spin_unlock(&remote_state->lock); | ||
447 | */ | ||
448 | } | 348 | } |
449 | } | 349 | } |
450 | } | 350 | } |
451 | 351 | ||
452 | /* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs | ||
453 | * If the budget of a ghost is exhausted, | ||
454 | * clear is_ghost and reschedule | ||
455 | */ | ||
456 | /* | ||
457 | static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state) | ||
458 | { | ||
459 | int lv = 0; | ||
460 | struct crit_entry* ce; | ||
461 | struct reservation *res; | ||
462 | struct mc2_task_state *tinfo; | ||
463 | lt_t ret = ULLONG_MAX; | ||
464 | |||
465 | BUG_ON(!state); | ||
466 | |||
467 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | ||
468 | ce = &state->crit_entries[lv]; | ||
469 | if (ce->running != NULL) { | ||
470 | //printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0); | ||
471 | tinfo = get_mc2_state(ce->running); | ||
472 | if (!tinfo) | ||
473 | continue; | ||
474 | |||
475 | res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
476 | //BUG_ON(!res); | ||
477 | if (!res) { | ||
478 | printk(KERN_ALERT "mc2_update_ghost_state(): R%d not found!\n", tinfo->mc2_param.res_id); | ||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | TRACE("LV %d running id %d budget %llu\n", | ||
483 | lv, tinfo->mc2_param.res_id, res->cur_budget); | ||
484 | // If the budget is exhausted, clear is_ghost and reschedule | ||
485 | if (!res->cur_budget) { | ||
486 | struct sup_reservation_environment* sup_env = &state->sup_env; | ||
487 | |||
488 | TRACE("GHOST FINISH id %d at %llu\n", | ||
489 | tinfo->mc2_param.res_id, litmus_clock()); | ||
490 | ce->running = NULL; | ||
491 | res->is_ghost = NO_CPU; | ||
492 | |||
493 | if (lv < CRIT_LEVEL_C) { | ||
494 | res = list_first_entry_or_null( | ||
495 | &sup_env->active_reservations, | ||
496 | struct reservation, list); | ||
497 | if (res) | ||
498 | litmus_reschedule_local(); | ||
499 | } else if (lv == CRIT_LEVEL_C) { | ||
500 | res = list_first_entry_or_null( | ||
501 | &_global_env.active_reservations, | ||
502 | struct reservation, list); | ||
503 | if (res) | ||
504 | litmus_reschedule(state->cpu); | ||
505 | } | ||
506 | } else { | ||
507 | //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget); | ||
508 | //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
509 | if (ret > res->cur_budget) { | ||
510 | ret = res->cur_budget; | ||
511 | } | ||
512 | } | ||
513 | } | ||
514 | } | ||
515 | |||
516 | return ret; | ||
517 | } | ||
518 | */ | ||
519 | |||
520 | /* update_cpu_prio - Update cpu's priority | 352 | /* update_cpu_prio - Update cpu's priority |
521 | * When a cpu picks a new task, call this function | 353 | * When a cpu picks a new task, call this function |
522 | * to update cpu priorities. | 354 | * to update cpu priorities. |
@@ -553,7 +385,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
553 | struct mc2_cpu_state *state; | 385 | struct mc2_cpu_state *state; |
554 | lt_t update, now; | 386 | lt_t update, now; |
555 | int global_schedule_now; | 387 | int global_schedule_now; |
556 | //lt_t remain_budget; // no ghost jobs | ||
557 | int reschedule[NR_CPUS]; | 388 | int reschedule[NR_CPUS]; |
558 | int cpus; | 389 | int cpus; |
559 | 390 | ||
@@ -573,25 +404,13 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
573 | TS_ISR_START; | 404 | TS_ISR_START; |
574 | 405 | ||
575 | TRACE("Timer fired at %llu\n", litmus_clock()); | 406 | TRACE("Timer fired at %llu\n", litmus_clock()); |
576 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
577 | raw_spin_lock_irqsave(&state->lock, flags); | 407 | raw_spin_lock_irqsave(&state->lock, flags); |
578 | now = litmus_clock(); | 408 | now = litmus_clock(); |
579 | sup_update_time(&state->sup_env, now); | 409 | sup_update_time(&state->sup_env, now); |
580 | 410 | ||
581 | /* 9/20/2015 fix - no ghost job | ||
582 | remain_budget = mc2_update_ghost_state(state); | ||
583 | */ | ||
584 | update = state->sup_env.next_scheduler_update; | 411 | update = state->sup_env.next_scheduler_update; |
585 | now = state->sup_env.env.current_time; | 412 | now = state->sup_env.env.current_time; |
586 | 413 | ||
587 | /* 9/20/2015 fix - no ghost job | ||
588 | if (remain_budget != ULLONG_MAX && update > now + remain_budget) { | ||
589 | update = now + remain_budget; | ||
590 | } | ||
591 | |||
592 | TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d remain_budget:%llu\n", now, update, state->cpu, global_schedule_now, remain_budget); | ||
593 | */ | ||
594 | |||
595 | if (update <= now) { | 414 | if (update <= now) { |
596 | litmus_reschedule_local(); | 415 | litmus_reschedule_local(); |
597 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { | 416 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { |
@@ -608,9 +427,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
608 | while (global_schedule_now--) { | 427 | while (global_schedule_now--) { |
609 | int cpu = get_lowest_prio_cpu(0); | 428 | int cpu = get_lowest_prio_cpu(0); |
610 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 429 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
611 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
612 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 430 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
613 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
614 | TRACE("LOWEST CPU = P%d\n", cpu); | 431 | TRACE("LOWEST CPU = P%d\n", cpu); |
615 | if (cpu == state->cpu && update > now) | 432 | if (cpu == state->cpu && update > now) |
616 | litmus_reschedule_local(); | 433 | litmus_reschedule_local(); |
@@ -621,21 +438,12 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
621 | raw_spin_unlock(&_global_env.lock); | 438 | raw_spin_unlock(&_global_env.lock); |
622 | 439 | ||
623 | raw_spin_unlock_irqrestore(&state->lock, flags); | 440 | raw_spin_unlock_irqrestore(&state->lock, flags); |
624 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
625 | 441 | ||
626 | TS_ISR_END; | 442 | TS_ISR_END; |
627 | 443 | ||
628 | for (cpus = 0; cpus<NR_CPUS; cpus++) { | 444 | for (cpus = 0; cpus<NR_CPUS; cpus++) { |
629 | if (reschedule[cpus]) { | 445 | if (reschedule[cpus]) { |
630 | litmus_reschedule(cpus); | 446 | litmus_reschedule(cpus); |
631 | /* | ||
632 | struct mc2_cpu_state *remote_state; | ||
633 | |||
634 | remote_state = cpu_state_for(cpus); | ||
635 | raw_spin_lock(&remote_state->lock); | ||
636 | preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); | ||
637 | raw_spin_unlock(&remote_state->lock); | ||
638 | */ | ||
639 | } | 447 | } |
640 | } | 448 | } |
641 | 449 | ||
@@ -688,23 +496,9 @@ static long mc2_complete_job(void) | |||
688 | /* set next_replenishtime to synchronous release time */ | 496 | /* set next_replenishtime to synchronous release time */ |
689 | BUG_ON(!res); | 497 | BUG_ON(!res); |
690 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | 498 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; |
691 | /* | ||
692 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | ||
693 | struct table_driven_reservation *tdres; | ||
694 | tdres = container_of(res, struct table_driven_reservation, res); | ||
695 | tdres->next_interval = 0; | ||
696 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | ||
697 | res->next_replenishment += tdres->intervals[0].start; | ||
698 | } | ||
699 | */ | ||
700 | res->cur_budget = 0; | 499 | res->cur_budget = 0; |
701 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | 500 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); |
702 | 501 | ||
703 | //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | ||
704 | |||
705 | //if (lv < CRIT_LEVEL_C) | ||
706 | // raw_spin_unlock(&state->lock); | ||
707 | //else | ||
708 | if (lv == CRIT_LEVEL_C) | 502 | if (lv == CRIT_LEVEL_C) |
709 | raw_spin_unlock(&_global_env.lock); | 503 | raw_spin_unlock(&_global_env.lock); |
710 | 504 | ||
@@ -762,17 +556,6 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
762 | res->blocked_by_ghost = 0; | 556 | res->blocked_by_ghost = 0; |
763 | res->is_ghost = NO_CPU; | 557 | res->is_ghost = NO_CPU; |
764 | return tsk; | 558 | return tsk; |
765 | /* no ghost jobs | ||
766 | if (likely(!ce->running)) { | ||
767 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
768 | res->blocked_by_ghost = 0; | ||
769 | res->is_ghost = NO_CPU; | ||
770 | return tsk; | ||
771 | } else { | ||
772 | res->blocked_by_ghost = 1; | ||
773 | TRACE_TASK(ce->running, " is GHOST\n"); | ||
774 | } | ||
775 | */ | ||
776 | } | 559 | } |
777 | } | 560 | } |
778 | } | 561 | } |
@@ -785,19 +568,10 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
785 | { | 568 | { |
786 | struct reservation *res, *next; | 569 | struct reservation *res, *next; |
787 | struct task_struct *tsk = NULL; | 570 | struct task_struct *tsk = NULL; |
788 | //struct crit_entry *ce; | 571 | |
789 | enum crit_level lv; | 572 | enum crit_level lv; |
790 | lt_t time_slice; | 573 | lt_t time_slice; |
791 | 574 | ||
792 | /* no eligible level A or B tasks exists */ | ||
793 | /* check the ghost job */ | ||
794 | /* | ||
795 | ce = &state->crit_entries[CRIT_LEVEL_C]; | ||
796 | if (ce->running) { | ||
797 | TRACE_TASK(ce->running," is GHOST\n"); | ||
798 | return NULL; | ||
799 | } | ||
800 | */ | ||
801 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | 575 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { |
802 | BUG_ON(!res); | 576 | BUG_ON(!res); |
803 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | 577 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { |
@@ -814,8 +588,6 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
814 | res->scheduled_on = state->cpu; | 588 | res->scheduled_on = state->cpu; |
815 | return tsk; | 589 | return tsk; |
816 | } else if (lv == CRIT_LEVEL_C) { | 590 | } else if (lv == CRIT_LEVEL_C) { |
817 | //ce = &state->crit_entries[lv]; | ||
818 | //if (likely(!ce->running)) { | ||
819 | #if BUDGET_ENFORCEMENT_AT_C | 591 | #if BUDGET_ENFORCEMENT_AT_C |
820 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | 592 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); |
821 | #endif | 593 | #endif |
@@ -824,11 +596,6 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
824 | res->is_ghost = NO_CPU; | 596 | res->is_ghost = NO_CPU; |
825 | res->scheduled_on = state->cpu; | 597 | res->scheduled_on = state->cpu; |
826 | return tsk; | 598 | return tsk; |
827 | //} else { | ||
828 | // res->blocked_by_ghost = 1; | ||
829 | // TRACE_TASK(ce->running, " is GHOST\n"); | ||
830 | // return NULL; | ||
831 | //} | ||
832 | } else { | 599 | } else { |
833 | BUG(); | 600 | BUG(); |
834 | } | 601 | } |
@@ -884,13 +651,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
884 | 651 | ||
885 | pre_schedule(prev, state->cpu); | 652 | pre_schedule(prev, state->cpu); |
886 | 653 | ||
887 | /* 9/20/2015 fix | ||
888 | raw_spin_lock(&_global_env.lock); | ||
889 | */ | ||
890 | raw_spin_lock(&state->lock); | 654 | raw_spin_lock(&state->lock); |
891 | 655 | ||
892 | //BUG_ON(state->scheduled && state->scheduled != prev); | ||
893 | //BUG_ON(state->scheduled && !is_realtime(prev)); | ||
894 | if (state->scheduled && state->scheduled != prev) | 656 | if (state->scheduled && state->scheduled != prev) |
895 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | 657 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); |
896 | if (state->scheduled && !is_realtime(prev)) | 658 | if (state->scheduled && !is_realtime(prev)) |
@@ -911,24 +673,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
911 | 673 | ||
912 | now = litmus_clock(); | 674 | now = litmus_clock(); |
913 | sup_update_time(&state->sup_env, now); | 675 | sup_update_time(&state->sup_env, now); |
914 | /* 9/20/2015 fix */ | 676 | |
915 | //raw_spin_lock(&_global_env.lock); | ||
916 | //to_schedule = gmp_update_time(&_global_env, now); | ||
917 | //raw_spin_unlock(&_global_env.lock); | ||
918 | |||
919 | /* 9/20/2015 fix | ||
920 | mc2_update_ghost_state(state); | ||
921 | */ | ||
922 | |||
923 | /* remove task from reservation if it blocks */ | ||
924 | /* | ||
925 | if (is_realtime(prev) && !is_running(prev)) { | ||
926 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
927 | raw_spin_lock(&_global_env.lock); | ||
928 | task_departs(prev, is_completed(prev)); | ||
929 | if (get_task_crit_level(prev) == CRIT_LEVEL_C) | ||
930 | raw_spin_unlock(&_global_env.lock); | ||
931 | }*/ | ||
932 | if (is_realtime(current) && blocks) { | 677 | if (is_realtime(current) && blocks) { |
933 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | 678 | if (get_task_crit_level(current) == CRIT_LEVEL_C) |
934 | raw_spin_lock(&_global_env.lock); | 679 | raw_spin_lock(&_global_env.lock); |
@@ -955,11 +700,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
955 | raw_spin_unlock(&_global_env.lock); | 700 | raw_spin_unlock(&_global_env.lock); |
956 | } | 701 | } |
957 | 702 | ||
958 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
959 | //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
960 | //update_cpu_prio(state); | ||
961 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
962 | |||
963 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | 703 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ |
964 | sched_state_task_picked(); | 704 | sched_state_task_picked(); |
965 | 705 | ||
@@ -982,10 +722,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
982 | cpu = get_lowest_prio_cpu(res?res->priority:0); | 722 | cpu = get_lowest_prio_cpu(res?res->priority:0); |
983 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | 723 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); |
984 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 724 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
985 | //raw_spin_lock(&_lowest_prio_cpu.lock); | ||
986 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | 725 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; |
987 | resched_cpu[cpu] = 1; | 726 | resched_cpu[cpu] = 1; |
988 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
989 | } | 727 | } |
990 | raw_spin_unlock(&_global_env.lock); | 728 | raw_spin_unlock(&_global_env.lock); |
991 | } | 729 | } |
@@ -1012,23 +750,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1012 | return state->scheduled; | 750 | return state->scheduled; |
1013 | } | 751 | } |
1014 | 752 | ||
1015 | static void resume_legacy_task_model_updates(struct task_struct *tsk) | ||
1016 | { | ||
1017 | lt_t now; | ||
1018 | if (is_sporadic(tsk)) { | ||
1019 | /* If this sporadic task was gone for a "long" time and woke up past | ||
1020 | * its deadline, then give it a new budget by triggering a job | ||
1021 | * release. This is purely cosmetic and has no effect on the | ||
1022 | * MC2 scheduler. */ | ||
1023 | |||
1024 | now = litmus_clock(); | ||
1025 | if (is_tardy(tsk, now)) { | ||
1026 | //release_at(tsk, now); | ||
1027 | //sched_trace_task_release(tsk); | ||
1028 | } | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | /* mc2_task_resume - Called when the state of tsk changes back to | 753 | /* mc2_task_resume - Called when the state of tsk changes back to |
1033 | * TASK_RUNNING. We need to requeue the task. | 754 | * TASK_RUNNING. We need to requeue the task. |
1034 | */ | 755 | */ |
@@ -1046,9 +767,6 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1046 | else | 767 | else |
1047 | state = local_cpu_state(); | 768 | state = local_cpu_state(); |
1048 | 769 | ||
1049 | /* 9/20/2015 fix | ||
1050 | raw_spin_lock(&_global_env.lock); | ||
1051 | */ | ||
1052 | /* Requeue only if self-suspension was already processed. */ | 770 | /* Requeue only if self-suspension was already processed. */ |
1053 | if (tinfo->has_departed) | 771 | if (tinfo->has_departed) |
1054 | { | 772 | { |
@@ -1083,22 +801,14 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1083 | raw_spin_unlock(&_global_env.lock); | 801 | raw_spin_unlock(&_global_env.lock); |
1084 | } | 802 | } |
1085 | 803 | ||
1086 | /* 9/20/2015 fix | ||
1087 | mc2_update_ghost_state(state); | ||
1088 | */ | ||
1089 | //task_arrives(state, tsk); | ||
1090 | /* NOTE: drops state->lock */ | 804 | /* NOTE: drops state->lock */ |
1091 | TRACE_TASK(tsk, "mc2_resume()\n"); | 805 | TRACE_TASK(tsk, "mc2_resume()\n"); |
1092 | mc2_update_timer_and_unlock(state); | 806 | mc2_update_timer_and_unlock(state); |
1093 | } else { | 807 | } else { |
1094 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | 808 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); |
1095 | //raw_spin_unlock(&_global_env.lock); | ||
1096 | } | 809 | } |
1097 | 810 | ||
1098 | local_irq_restore(flags); | 811 | local_irq_restore(flags); |
1099 | |||
1100 | //gmp_free_passed_event(); | ||
1101 | resume_legacy_task_model_updates(tsk); | ||
1102 | } | 812 | } |
1103 | 813 | ||
1104 | 814 | ||
@@ -1152,9 +862,6 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1152 | state = local_cpu_state(); | 862 | state = local_cpu_state(); |
1153 | raw_spin_lock_irqsave(&state->lock, flags); | 863 | raw_spin_lock_irqsave(&state->lock, flags); |
1154 | raw_spin_lock(&_global_env.lock); | 864 | raw_spin_lock(&_global_env.lock); |
1155 | //state = local_cpu_state(); | ||
1156 | |||
1157 | //raw_spin_lock(&state->lock); | ||
1158 | 865 | ||
1159 | res = gmp_find_by_id(&_global_env, mp->res_id); | 866 | res = gmp_find_by_id(&_global_env, mp->res_id); |
1160 | 867 | ||
@@ -1206,18 +913,16 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1206 | state = local_cpu_state(); | 913 | state = local_cpu_state(); |
1207 | else | 914 | else |
1208 | state = cpu_state_for(tinfo->cpu); | 915 | state = cpu_state_for(tinfo->cpu); |
916 | |||
1209 | 917 | ||
1210 | local_irq_save(flags); | ||
1211 | |||
1212 | /* acquire the lock protecting the state and disable interrupts */ | ||
1213 | //raw_spin_lock(&_global_env.lock); | ||
1214 | //raw_spin_lock(&state->lock); | ||
1215 | if (is_running) { | 918 | if (is_running) { |
1216 | state->scheduled = tsk; | 919 | state->scheduled = tsk; |
1217 | /* make sure this task should actually be running */ | 920 | /* make sure this task should actually be running */ |
1218 | litmus_reschedule_local(); | 921 | litmus_reschedule_local(); |
1219 | } | 922 | } |
1220 | 923 | ||
924 | /* acquire the lock protecting the state and disable interrupts */ | ||
925 | local_irq_save(flags); | ||
1221 | raw_spin_lock(&state->lock); | 926 | raw_spin_lock(&state->lock); |
1222 | 927 | ||
1223 | if (lv == CRIT_LEVEL_C) { | 928 | if (lv == CRIT_LEVEL_C) { |
@@ -1227,7 +932,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1227 | else { | 932 | else { |
1228 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); | 933 | res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); |
1229 | } | 934 | } |
1230 | //res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
1231 | release = res->next_replenishment; | 935 | release = res->next_replenishment; |
1232 | 936 | ||
1233 | if (on_runqueue || is_running) { | 937 | if (on_runqueue || is_running) { |
@@ -1235,14 +939,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1235 | * [see comment in pres_task_resume()] */ | 939 | * [see comment in pres_task_resume()] */ |
1236 | if (lv == CRIT_LEVEL_C) { | 940 | if (lv == CRIT_LEVEL_C) { |
1237 | gmp_update_time(&_global_env, litmus_clock()); | 941 | gmp_update_time(&_global_env, litmus_clock()); |
1238 | //raw_spin_unlock(&_global_env.lock); | ||
1239 | } | 942 | } |
1240 | else | 943 | else |
1241 | sup_update_time(&state->sup_env, litmus_clock()); | 944 | sup_update_time(&state->sup_env, litmus_clock()); |
1242 | //mc2_update_time(lv, state, litmus_clock()); | ||
1243 | /* 9/20/2015 fix | ||
1244 | mc2_update_ghost_state(state); | ||
1245 | */ | ||
1246 | task_arrives(state, tsk); | 945 | task_arrives(state, tsk); |
1247 | if (lv == CRIT_LEVEL_C) | 946 | if (lv == CRIT_LEVEL_C) |
1248 | raw_spin_unlock(&_global_env.lock); | 947 | raw_spin_unlock(&_global_env.lock); |
@@ -1254,13 +953,11 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1254 | if (lv == CRIT_LEVEL_C) | 953 | if (lv == CRIT_LEVEL_C) |
1255 | raw_spin_unlock(&_global_env.lock); | 954 | raw_spin_unlock(&_global_env.lock); |
1256 | raw_spin_unlock(&state->lock); | 955 | raw_spin_unlock(&state->lock); |
1257 | //raw_spin_unlock(&_global_env.lock); | ||
1258 | } | 956 | } |
1259 | local_irq_restore(flags); | 957 | local_irq_restore(flags); |
1260 | 958 | ||
1261 | if (!release) { | 959 | if (!release) { |
1262 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); | 960 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); |
1263 | //release_at(tsk, release); | ||
1264 | } | 961 | } |
1265 | else | 962 | else |
1266 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | 963 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); |
@@ -1275,15 +972,12 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1275 | struct reservation *res = NULL, *next; | 972 | struct reservation *res = NULL, *next; |
1276 | struct sup_reservation_environment *sup_env; | 973 | struct sup_reservation_environment *sup_env; |
1277 | int found = 0; | 974 | int found = 0; |
1278 | //enum crit_level lv = get_task_crit_level(current); | ||
1279 | unsigned long flags; | 975 | unsigned long flags; |
1280 | 976 | ||
1281 | if (cpu == -1) { | 977 | if (cpu == -1) { |
1282 | /* if the reservation is global reservation */ | 978 | /* if the reservation is global reservation */ |
1283 | local_irq_save(flags); | 979 | local_irq_save(flags); |
1284 | //state = local_cpu_state(); | ||
1285 | raw_spin_lock(&_global_env.lock); | 980 | raw_spin_lock(&_global_env.lock); |
1286 | //raw_spin_lock(&state->lock); | ||
1287 | 981 | ||
1288 | list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { | 982 | list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { |
1289 | if (res->id == reservation_id) { | 983 | if (res->id == reservation_id) { |
@@ -1314,7 +1008,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1314 | } | 1008 | } |
1315 | } | 1009 | } |
1316 | 1010 | ||
1317 | //raw_spin_unlock(&state->lock); | ||
1318 | raw_spin_unlock(&_global_env.lock); | 1011 | raw_spin_unlock(&_global_env.lock); |
1319 | local_irq_restore(flags); | 1012 | local_irq_restore(flags); |
1320 | } else { | 1013 | } else { |
@@ -1323,17 +1016,9 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1323 | local_irq_save(flags); | 1016 | local_irq_save(flags); |
1324 | raw_spin_lock(&state->lock); | 1017 | raw_spin_lock(&state->lock); |
1325 | 1018 | ||
1326 | // res = sup_find_by_id(&state->sup_env, reservation_id); | ||
1327 | sup_env = &state->sup_env; | 1019 | sup_env = &state->sup_env; |
1328 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | 1020 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { |
1329 | if (res->id == reservation_id) { | 1021 | if (res->id == reservation_id) { |
1330 | /* | ||
1331 | if (lv == CRIT_LEVEL_A) { | ||
1332 | struct table_driven_reservation *tdres; | ||
1333 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1334 | kfree(tdres->intervals); | ||
1335 | } | ||
1336 | */ | ||
1337 | list_del(&res->list); | 1022 | list_del(&res->list); |
1338 | kfree(res); | 1023 | kfree(res); |
1339 | found = 1; | 1024 | found = 1; |
@@ -1343,12 +1028,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1343 | if (!found) { | 1028 | if (!found) { |
1344 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { | 1029 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { |
1345 | if (res->id == reservation_id) { | 1030 | if (res->id == reservation_id) { |
1346 | /* if (lv == CRIT_LEVEL_A) { | ||
1347 | struct table_driven_reservation *tdres; | ||
1348 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1349 | kfree(tdres->intervals); | ||
1350 | } | ||
1351 | */ | ||
1352 | list_del(&res->list); | 1031 | list_del(&res->list); |
1353 | kfree(res); | 1032 | kfree(res); |
1354 | found = 1; | 1033 | found = 1; |
@@ -1359,12 +1038,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1359 | if (!found) { | 1038 | if (!found) { |
1360 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | 1039 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { |
1361 | if (res->id == reservation_id) { | 1040 | if (res->id == reservation_id) { |
1362 | /* if (lv == CRIT_LEVEL_A) { | ||
1363 | struct table_driven_reservation *tdres; | ||
1364 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1365 | kfree(tdres->intervals); | ||
1366 | } | ||
1367 | */ | ||
1368 | list_del(&res->list); | 1041 | list_del(&res->list); |
1369 | kfree(res); | 1042 | kfree(res); |
1370 | found = 1; | 1043 | found = 1; |
@@ -1419,11 +1092,7 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1419 | else if (lv == CRIT_LEVEL_C) { | 1092 | else if (lv == CRIT_LEVEL_C) { |
1420 | raw_spin_lock(&_global_env.lock); | 1093 | raw_spin_lock(&_global_env.lock); |
1421 | gmp_update_time(&_global_env, litmus_clock()); | 1094 | gmp_update_time(&_global_env, litmus_clock()); |
1422 | //raw_spin_unlock(&_global_env.lock); | ||
1423 | } | 1095 | } |
1424 | /* 9/20/2015 fix | ||
1425 | mc2_update_ghost_state(state); | ||
1426 | */ | ||
1427 | task_departs(tsk, 0); | 1096 | task_departs(tsk, 0); |
1428 | if (lv == CRIT_LEVEL_C) | 1097 | if (lv == CRIT_LEVEL_C) |
1429 | raw_spin_unlock(&_global_env.lock); | 1098 | raw_spin_unlock(&_global_env.lock); |
@@ -1506,8 +1175,6 @@ static long create_polling_reservation( | |||
1506 | return -ENOMEM; | 1175 | return -ENOMEM; |
1507 | 1176 | ||
1508 | if (config->cpu != -1) { | 1177 | if (config->cpu != -1) { |
1509 | |||
1510 | //raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
1511 | state = cpu_state_for(config->cpu); | 1178 | state = cpu_state_for(config->cpu); |
1512 | raw_spin_lock_irqsave(&state->lock, flags); | 1179 | raw_spin_lock_irqsave(&state->lock, flags); |
1513 | 1180 | ||
@@ -1521,10 +1188,6 @@ static long create_polling_reservation( | |||
1521 | pres->res.id = config->id; | 1188 | pres->res.id = config->id; |
1522 | pres->res.blocked_by_ghost = 0; | 1189 | pres->res.blocked_by_ghost = 0; |
1523 | pres->res.is_ghost = NO_CPU; | 1190 | pres->res.is_ghost = NO_CPU; |
1524 | /*if (config->priority == LITMUS_MAX_PRIORITY) { | ||
1525 | level_a_priorities[config->cpu]++; | ||
1526 | pres->res.priority = level_a_priorities[config->cpu]; | ||
1527 | }*/ | ||
1528 | if (!use_edf) | 1191 | if (!use_edf) |
1529 | pres->res.priority = config->priority; | 1192 | pres->res.priority = config->priority; |
1530 | sup_add_new_reservation(&state->sup_env, &pres->res); | 1193 | sup_add_new_reservation(&state->sup_env, &pres->res); |
@@ -1535,7 +1198,6 @@ static long create_polling_reservation( | |||
1535 | } | 1198 | } |
1536 | 1199 | ||
1537 | raw_spin_unlock_irqrestore(&state->lock, flags); | 1200 | raw_spin_unlock_irqrestore(&state->lock, flags); |
1538 | //raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
1539 | 1201 | ||
1540 | } else { | 1202 | } else { |
1541 | raw_spin_lock_irqsave(&_global_env.lock, flags); | 1203 | raw_spin_lock_irqsave(&_global_env.lock, flags); |
diff --git a/mm/migrate.c b/mm/migrate.c index a2e9cad083d5..8dd685be20d8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -409,7 +409,7 @@ int replicate_page_move_mapping(struct address_space *mapping, | |||
409 | void **pslot; | 409 | void **pslot; |
410 | 410 | ||
411 | BUG_ON(!mapping); | 411 | BUG_ON(!mapping); |
412 | TRACE_TASK(current, "page has mapping.\n"); | 412 | |
413 | spin_lock_irq(&mapping->tree_lock); | 413 | spin_lock_irq(&mapping->tree_lock); |
414 | 414 | ||
415 | pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); | 415 | pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); |
@@ -933,7 +933,8 @@ static int copy_to_new_page(struct page *newpage, struct page *page, | |||
933 | 933 | ||
934 | mapping = page_mapping(page); | 934 | mapping = page_mapping(page); |
935 | if (!mapping) { | 935 | if (!mapping) { |
936 | rc = migrate_page(mapping, newpage, page, mode); | 936 | /* a shared library page must have a mapping. */ |
937 | BUG(); | ||
937 | } | 938 | } |
938 | else if (mapping->a_ops->migratepage) { | 939 | else if (mapping->a_ops->migratepage) { |
939 | rc = replicate_page(mapping, newpage, page, mode, has_replica); | 940 | rc = replicate_page(mapping, newpage, page, mode, has_replica); |
@@ -1296,10 +1297,7 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page, | |||
1296 | } | 1297 | } |
1297 | } | 1298 | } |
1298 | rcu_read_unlock(); | 1299 | rcu_read_unlock(); |
1299 | 1300 | ||
1300 | if (master_exist_in_psl) | ||
1301 | TRACE_TASK(current, "Page %05lx exists in PSL list\n", lib_page->master_pfn); | ||
1302 | |||
1303 | if (lib_page->r_page[cpu] == NULL) { | 1301 | if (lib_page->r_page[cpu] == NULL) { |
1304 | newpage = get_new_page(page, private, &result); | 1302 | newpage = get_new_page(page, private, &result); |
1305 | if (!newpage) | 1303 | if (!newpage) |
@@ -1588,9 +1586,7 @@ int replicate_pages(struct list_head *from, new_page_t get_new_page, | |||
1588 | list_for_each_entry_safe(page, page2, from, lru) { | 1586 | list_for_each_entry_safe(page, page2, from, lru) { |
1589 | cond_resched(); | 1587 | cond_resched(); |
1590 | 1588 | ||
1591 | TRACE_TASK(current, "PageAnon=%d\n", PageAnon(page)); | ||
1592 | rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode); | 1589 | rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode); |
1593 | TRACE_TASK(current, "rc = %d\n", rc); | ||
1594 | 1590 | ||
1595 | switch(rc) { | 1591 | switch(rc) { |
1596 | case -ENOMEM: | 1592 | case -ENOMEM: |
@@ -1423,8 +1423,7 @@ static int try_to_unmap_one_entry(struct page *page, struct vm_area_struct *vma, | |||
1423 | /* Establish migration entry for a file page */ | 1423 | /* Establish migration entry for a file page */ |
1424 | swp_entry_t entry; | 1424 | swp_entry_t entry; |
1425 | entry = make_migration_entry(page, pte_write(pteval)); | 1425 | entry = make_migration_entry(page, pte_write(pteval)); |
1426 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 1426 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); |
1427 | //printk(KERN_ERR "established migration entry for page %05lx PTE_WRITE = %d\n", page_to_pfn(page), pte_write(pteval)); | ||
1428 | } else | 1427 | } else |
1429 | dec_mm_counter(mm, MM_FILEPAGES); | 1428 | dec_mm_counter(mm, MM_FILEPAGES); |
1430 | 1429 | ||
@@ -1467,7 +1466,6 @@ int try_to_unmap_one_only(struct page *page, struct vm_area_struct *vma, | |||
1467 | { | 1466 | { |
1468 | struct mm_struct *mm = vma->vm_mm; | 1467 | struct mm_struct *mm = vma->vm_mm; |
1469 | struct mm_struct *current_mm; | 1468 | struct mm_struct *current_mm; |
1470 | //int ret = SWAP_AGAIN; | ||
1471 | 1469 | ||
1472 | rcu_read_lock(); | 1470 | rcu_read_lock(); |
1473 | get_task_struct(current); | 1471 | get_task_struct(current); |