diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-10-12 17:10:34 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-10-12 17:10:34 -0400 |
commit | 2bed3116318647479e14aa22ff762bed16c066b4 (patch) | |
tree | a411ed894da2447c08d1134950049911a9edcaf9 /litmus | |
parent | 4172fff1a3870af7d65675e05eb0b7aba3804ea8 (diff) |
RTAS 2017 with debug infomation.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/bank_proc.c | 7 | ||||
-rw-r--r-- | litmus/cache_proc.c | 26 | ||||
-rw-r--r-- | litmus/litmus.c | 103 | ||||
-rw-r--r-- | litmus/polling_reservations.c | 2 | ||||
-rw-r--r-- | litmus/reservation.c | 4 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 4 |
6 files changed, 50 insertions, 96 deletions
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index 6103611211ce..df9f5730ed05 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c | |||
@@ -24,10 +24,12 @@ | |||
24 | // This Address Decoding is used in imx6-sabredsd platform | 24 | // This Address Decoding is used in imx6-sabredsd platform |
25 | #define BANK_MASK 0x38000000 | 25 | #define BANK_MASK 0x38000000 |
26 | #define BANK_SHIFT 27 | 26 | #define BANK_SHIFT 27 |
27 | |||
27 | #define CACHE_MASK 0x0000f000 | 28 | #define CACHE_MASK 0x0000f000 |
28 | #define CACHE_SHIFT 12 | 29 | #define CACHE_SHIFT 12 |
29 | 30 | ||
30 | #define PAGES_PER_COLOR 1024 | 31 | #define PAGES_PER_COLOR 2000 |
32 | #define PAGES_PER_COLOR_HALF 1000 | ||
31 | unsigned int NUM_PAGE_LIST; //8*16 | 33 | unsigned int NUM_PAGE_LIST; //8*16 |
32 | 34 | ||
33 | unsigned int number_banks; | 35 | unsigned int number_banks; |
@@ -245,7 +247,8 @@ static int do_add_pages(void) | |||
245 | counter[color]++; | 247 | counter[color]++; |
246 | // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); | 248 | // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); |
247 | //show_nr_pages(); | 249 | //show_nr_pages(); |
248 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) { | 250 | //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) { |
251 | if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { | ||
249 | //if ( PAGES_PER_COLOR && color>=16*2) { | 252 | //if ( PAGES_PER_COLOR && color>=16*2) { |
250 | add_page_to_color_list(page); | 253 | add_page_to_color_list(page); |
251 | // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); | 254 | // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); |
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c index 49e98f6ed86a..87077d4366dc 100644 --- a/litmus/cache_proc.c +++ b/litmus/cache_proc.c | |||
@@ -357,17 +357,9 @@ int lock_all_handler(struct ctl_table *table, int write, void __user *buffer, | |||
357 | writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + | 357 | writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + |
358 | i * L2X0_LOCKDOWN_STRIDE); | 358 | i * L2X0_LOCKDOWN_STRIDE); |
359 | } | 359 | } |
360 | /* | 360 | |
361 | for (i = 0; i < nr_lockregs; i++) { | ||
362 | barrier(); | ||
363 | mem_lock(UNLOCK_ALL, i); | ||
364 | barrier(); | ||
365 | //writel_relaxed(nr_unlocked_way[16], ld_d_reg(i)); | ||
366 | //writel_relaxed(nr_unlocked_way[16], ld_i_reg(i)); | ||
367 | } | ||
368 | */ | ||
369 | } | 361 | } |
370 | printk("LOCK_ALL HANDLER\n"); | 362 | |
371 | local_irq_save(flags); | 363 | local_irq_save(flags); |
372 | print_lockdown_registers(smp_processor_id()); | 364 | print_lockdown_registers(smp_processor_id()); |
373 | l2c310_flush_all(); | 365 | l2c310_flush_all(); |
@@ -379,9 +371,6 @@ out: | |||
379 | 371 | ||
380 | void cache_lockdown(u32 lock_val, int cpu) | 372 | void cache_lockdown(u32 lock_val, int cpu) |
381 | { | 373 | { |
382 | //unsigned long flags; | ||
383 | //raw_spin_lock_irqsave(&cache_lock, flags); | ||
384 | |||
385 | __asm__ __volatile__ ( | 374 | __asm__ __volatile__ ( |
386 | " str %[lockval], [%[dcachereg]]\n" | 375 | " str %[lockval], [%[dcachereg]]\n" |
387 | " str %[lockval], [%[icachereg]]\n" | 376 | " str %[lockval], [%[icachereg]]\n" |
@@ -390,8 +379,6 @@ void cache_lockdown(u32 lock_val, int cpu) | |||
390 | [icachereg] "r" (ld_i_reg(cpu)), | 379 | [icachereg] "r" (ld_i_reg(cpu)), |
391 | [lockval] "r" (lock_val) | 380 | [lockval] "r" (lock_val) |
392 | : "cc"); | 381 | : "cc"); |
393 | |||
394 | //raw_spin_unlock_irqrestore(&cache_lock, flags); | ||
395 | } | 382 | } |
396 | 383 | ||
397 | void do_partition(enum crit_level lv, int cpu) | 384 | void do_partition(enum crit_level lv, int cpu) |
@@ -421,14 +408,12 @@ void do_partition(enum crit_level lv, int cpu) | |||
421 | 408 | ||
422 | } | 409 | } |
423 | barrier(); | 410 | barrier(); |
424 | //cache_lockdown(regs, cpu); | 411 | |
425 | writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE); | 412 | writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_D_BASE + cpu * L2X0_LOCKDOWN_STRIDE); |
426 | writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE); | 413 | writel_relaxed(regs, cache_base + L2X0_LOCKDOWN_WAY_I_BASE + cpu * L2X0_LOCKDOWN_STRIDE); |
427 | barrier(); | 414 | barrier(); |
428 | 415 | ||
429 | raw_spin_unlock_irqrestore(&cache_lock, flags); | 416 | raw_spin_unlock_irqrestore(&cache_lock, flags); |
430 | |||
431 | flush_cache(0); | ||
432 | } | 417 | } |
433 | 418 | ||
434 | void lock_cache(int cpu, u32 val) | 419 | void lock_cache(int cpu, u32 val) |
@@ -544,7 +529,6 @@ void inline enter_irq_mode(void) | |||
544 | 529 | ||
545 | if (os_isolation == 0) | 530 | if (os_isolation == 0) |
546 | return; | 531 | return; |
547 | |||
548 | prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu)); | 532 | prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu)); |
549 | prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu)); | 533 | prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu)); |
550 | 534 | ||
@@ -1011,7 +995,6 @@ int setup_flusher_array(void) | |||
1011 | ret = -EINVAL; | 995 | ret = -EINVAL; |
1012 | goto out; | 996 | goto out; |
1013 | } | 997 | } |
1014 | |||
1015 | for (way = 0; way < MAX_NR_WAYS; way++) { | 998 | for (way = 0; way < MAX_NR_WAYS; way++) { |
1016 | void **flusher_color_arr; | 999 | void **flusher_color_arr; |
1017 | flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages) | 1000 | flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages) |
@@ -1023,7 +1006,7 @@ int setup_flusher_array(void) | |||
1023 | } | 1006 | } |
1024 | 1007 | ||
1025 | flusher_pages[way] = flusher_color_arr; | 1008 | flusher_pages[way] = flusher_color_arr; |
1026 | 1009 | /* This is ugly. */ | |
1027 | for (color = 0; color < MAX_NR_COLORS; color++) { | 1010 | for (color = 0; color < MAX_NR_COLORS; color++) { |
1028 | int node; | 1011 | int node; |
1029 | switch (color) { | 1012 | switch (color) { |
@@ -1090,6 +1073,7 @@ int setup_flusher_array(void) | |||
1090 | } | 1073 | } |
1091 | } | 1074 | } |
1092 | } | 1075 | } |
1076 | |||
1093 | out: | 1077 | out: |
1094 | return ret; | 1078 | return ret; |
1095 | out_free: | 1079 | out_free: |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 11e4c5da9c10..1f5e49114b2c 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -352,28 +352,32 @@ extern int isolate_lru_page(struct page *page); | |||
352 | extern void putback_movable_page(struct page *page); | 352 | extern void putback_movable_page(struct page *page); |
353 | extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); | 353 | extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); |
354 | 354 | ||
355 | DECLARE_PER_CPU(struct list_head, shared_lib_page_list); | ||
356 | #define INVALID_PFN (0xffffffff) | 355 | #define INVALID_PFN (0xffffffff) |
357 | LIST_HEAD(shared_lib_pages); | 356 | LIST_HEAD(shared_lib_pages); |
358 | //struct list_head shared_lib_pages = LIST_HEAD_INIT(shared_lib_pages); | 357 | |
359 | EXPORT_SYMBOL(shared_lib_pages); | 358 | EXPORT_SYMBOL(shared_lib_pages); |
360 | 359 | ||
360 | /* Reallocate pages of a task | ||
361 | * Private pages - Migrate to a new page. | ||
362 | * Shared pages - Use a replica. Make a replica if necessary. | ||
363 | * @cpu : CPU id of the calling task | ||
364 | * returns the number of pages that is not moved. | ||
365 | */ | ||
361 | asmlinkage long sys_set_page_color(int cpu) | 366 | asmlinkage long sys_set_page_color(int cpu) |
362 | { | 367 | { |
363 | long ret = 0; | 368 | long ret = 0; |
364 | //struct page *page_itr = NULL; | ||
365 | struct vm_area_struct *vma_itr = NULL; | 369 | struct vm_area_struct *vma_itr = NULL; |
366 | int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0; | 370 | int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0; |
367 | unsigned long node; | 371 | unsigned long node; |
368 | enum crit_level lv; | 372 | enum crit_level lv; |
369 | struct mm_struct *mm; | 373 | struct mm_struct *mm; |
370 | //struct list_head *shared_pagelist = this_cpu_ptr(&shared_lib_page_list); | ||
371 | 374 | ||
372 | LIST_HEAD(pagelist); | 375 | LIST_HEAD(pagelist); |
373 | LIST_HEAD(task_shared_pagelist); | 376 | LIST_HEAD(task_shared_pagelist); |
374 | 377 | ||
375 | migrate_prep(); | 378 | migrate_prep(); |
376 | 379 | ||
380 | /* Find the current mm_struct */ | ||
377 | rcu_read_lock(); | 381 | rcu_read_lock(); |
378 | get_task_struct(current); | 382 | get_task_struct(current); |
379 | rcu_read_unlock(); | 383 | rcu_read_unlock(); |
@@ -383,16 +387,14 @@ asmlinkage long sys_set_page_color(int cpu) | |||
383 | down_read(&mm->mmap_sem); | 387 | down_read(&mm->mmap_sem); |
384 | TRACE_TASK(current, "SYSCALL set_page_color\n"); | 388 | TRACE_TASK(current, "SYSCALL set_page_color\n"); |
385 | vma_itr = mm->mmap; | 389 | vma_itr = mm->mmap; |
390 | /* Iterate all vm_area_struct */ | ||
386 | while (vma_itr != NULL) { | 391 | while (vma_itr != NULL) { |
387 | unsigned int num_pages = 0, i; | 392 | unsigned int num_pages = 0, i; |
388 | struct page *old_page = NULL; | 393 | struct page *old_page = NULL; |
389 | int pages_in_vma = 0; | 394 | int pages_in_vma = 0; |
390 | 395 | ||
391 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; | 396 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; |
392 | // print vma flags | 397 | /* Traverse all pages in vm_area_struct */ |
393 | //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); | ||
394 | //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); | ||
395 | //printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot); | ||
396 | for (i = 0; i < num_pages; i++) { | 398 | for (i = 0; i < num_pages; i++) { |
397 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); | 399 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); |
398 | 400 | ||
@@ -410,14 +412,13 @@ asmlinkage long sys_set_page_color(int cpu) | |||
410 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); | 412 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); |
411 | pages_in_vma++; | 413 | pages_in_vma++; |
412 | 414 | ||
413 | // for simple debug | 415 | /* Conditions for replicable pages */ |
414 | if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) { | 416 | if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) { |
415 | //if (page_count(old_page) < 10 && page_count(old_page) > 3 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) { | ||
416 | struct shared_lib_page *lib_page; | 417 | struct shared_lib_page *lib_page; |
417 | int is_exist = 0; | 418 | int is_exist = 0; |
418 | 419 | ||
419 | /* update PSL list */ | 420 | /* Update PSL (Per-core shared library (master)) list */ |
420 | /* check if this page is in the PSL list */ | 421 | /* Check if this page is in the PSL list */ |
421 | rcu_read_lock(); | 422 | rcu_read_lock(); |
422 | list_for_each_entry(lib_page, &shared_lib_pages, list) | 423 | list_for_each_entry(lib_page, &shared_lib_pages, list) |
423 | { | 424 | { |
@@ -432,10 +433,8 @@ asmlinkage long sys_set_page_color(int cpu) | |||
432 | int cpu_i; | 433 | int cpu_i; |
433 | lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL); | 434 | lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL); |
434 | lib_page->master_page = old_page; | 435 | lib_page->master_page = old_page; |
435 | //lib_page->r_page = NULL; | ||
436 | lib_page->master_pfn = page_to_pfn(old_page); | 436 | lib_page->master_pfn = page_to_pfn(old_page); |
437 | //lib_page->r_pfn = INVALID_PFN; | 437 | for (cpu_i = 0; cpu_i < NR_CPUS+1; cpu_i++) { |
438 | for (cpu_i = 0; cpu_i < NR_CPUS; cpu_i++) { | ||
439 | lib_page->r_page[cpu_i] = NULL; | 438 | lib_page->r_page[cpu_i] = NULL; |
440 | lib_page->r_pfn[cpu_i] = INVALID_PFN; | 439 | lib_page->r_pfn[cpu_i] = INVALID_PFN; |
441 | } | 440 | } |
@@ -452,9 +451,8 @@ asmlinkage long sys_set_page_color(int cpu) | |||
452 | list_add_tail(&old_page->lru, &task_shared_pagelist); | 451 | list_add_tail(&old_page->lru, &task_shared_pagelist); |
453 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); | 452 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); |
454 | nr_shared_pages++; | 453 | nr_shared_pages++; |
455 | TRACE_TASK(current, "SHARED isolate_lru_page success\n"); | ||
456 | } else { | 454 | } else { |
457 | TRACE_TASK(current, "SHARED isolate_lru_page failed\n"); | 455 | TRACE_TASK(current, "isolate_lru_page for a shared page failed\n"); |
458 | nr_failed++; | 456 | nr_failed++; |
459 | } | 457 | } |
460 | put_page(old_page); | 458 | put_page(old_page); |
@@ -466,34 +464,28 @@ asmlinkage long sys_set_page_color(int cpu) | |||
466 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); | 464 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); |
467 | nr_pages++; | 465 | nr_pages++; |
468 | } else { | 466 | } else { |
469 | TRACE_TASK(current, "isolate_lru_page failed\n"); | 467 | TRACE_TASK(current, "isolate_lru_page for a private page failed\n"); |
470 | nr_failed++; | 468 | nr_failed++; |
471 | } | 469 | } |
472 | //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); | ||
473 | put_page(old_page); | 470 | put_page(old_page); |
474 | //TRACE_TASK(current, "PRIVATE\n"); | ||
475 | } | 471 | } |
476 | } | 472 | } |
477 | TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4); | 473 | TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4); |
478 | vma_itr = vma_itr->vm_next; | 474 | vma_itr = vma_itr->vm_next; |
479 | } | 475 | } |
480 | |||
481 | //list_for_each_entry(page_itr, &pagelist, lru) { | ||
482 | // printk(KERN_INFO "B _mapcount = %d, _count = %d\n", page_mapcount(page_itr), page_count(page_itr)); | ||
483 | // } | ||
484 | 476 | ||
485 | ret = 0; | 477 | ret = 0; |
486 | if (!is_realtime(current)) | 478 | if (!is_realtime(current)) |
487 | lv = 1; | 479 | node = 8; |
488 | else { | 480 | else { |
489 | lv = tsk_rt(current)->mc2_data->crit; | 481 | lv = tsk_rt(current)->mc2_data->crit; |
482 | if (cpu == -1) | ||
483 | node = 8; | ||
484 | else | ||
485 | node = cpu*2 + lv; | ||
490 | } | 486 | } |
491 | 487 | ||
492 | if (cpu == -1) | 488 | /* Migrate private pages */ |
493 | node = 8; | ||
494 | else | ||
495 | node = cpu*2 + lv; | ||
496 | |||
497 | if (!list_empty(&pagelist)) { | 489 | if (!list_empty(&pagelist)) { |
498 | ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); | 490 | ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); |
499 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); | 491 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); |
@@ -502,52 +494,24 @@ asmlinkage long sys_set_page_color(int cpu) | |||
502 | putback_movable_pages(&pagelist); | 494 | putback_movable_pages(&pagelist); |
503 | } | 495 | } |
504 | } | 496 | } |
505 | /* | 497 | |
506 | { | 498 | /* Replicate shared pages */ |
507 | struct list_head *pos, *q; | ||
508 | list_for_each_safe(pos, q, &task_shared_pagelist) { | ||
509 | struct page *p_entry = NULL; | ||
510 | struct shared_lib_page *lib_desc = NULL; | ||
511 | |||
512 | p_entry = list_entry(pos, struct page, lru); | ||
513 | list_for_each_entry(lib_desc, &shared_lib_pages, list) { | ||
514 | if (p_entry == lib_desc->r_page) { | ||
515 | list_del(pos); | ||
516 | } | ||
517 | } | ||
518 | } | ||
519 | } | ||
520 | */ | ||
521 | if (!list_empty(&task_shared_pagelist)) { | 499 | if (!list_empty(&task_shared_pagelist)) { |
522 | if (node != 8) | 500 | ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); |
523 | ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); | ||
524 | else | ||
525 | ret = nr_shared_pages; | ||
526 | TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); | 501 | TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); |
527 | nr_not_migrated += ret; | 502 | nr_not_migrated += ret; |
528 | if (ret) { | 503 | if (ret) { |
529 | putback_movable_pages(&task_shared_pagelist); | 504 | putback_movable_pages(&task_shared_pagelist); |
530 | } | 505 | } |
531 | } | 506 | } |
532 | 507 | ||
533 | /* handle sigpage and litmus ctrl_page */ | ||
534 | /* vma_itr = current->mm->mmap; | ||
535 | while (vma_itr != NULL) { | ||
536 | if (vma_itr->vm_start == tsk_rt(current)->addr_ctrl_page) { | ||
537 | TRACE("litmus ctrl_page = %08x\n", vma_itr->vm_start); | ||
538 | vma_itr->vm_page_prot = PAGE_SHARED; | ||
539 | break; | ||
540 | } | ||
541 | vma_itr = vma_itr->vm_next; | ||
542 | } | ||
543 | */ | ||
544 | up_read(&mm->mmap_sem); | 508 | up_read(&mm->mmap_sem); |
545 | 509 | ||
546 | 510 | TRACE_TASK(current, "nr_pages = %d nr_failed = %d nr_not_migrated = %d\n", nr_pages, nr_failed, nr_not_migrated); | |
547 | TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); | ||
548 | printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated); | 511 | printk(KERN_INFO "node = %ld, nr_private_pages = %d, nr_shared_pages = %d, nr_failed_to_isolate_lru = %d, nr_not_migrated = %d\n", node, nr_pages, nr_shared_pages, nr_failed, nr_not_migrated); |
549 | 512 | ||
550 | flush_cache(1); | 513 | flush_cache(1); |
514 | |||
551 | /* for debug START */ | 515 | /* for debug START */ |
552 | TRACE_TASK(current, "PSL PAGES\n"); | 516 | TRACE_TASK(current, "PSL PAGES\n"); |
553 | { | 517 | { |
@@ -556,11 +520,11 @@ asmlinkage long sys_set_page_color(int cpu) | |||
556 | rcu_read_lock(); | 520 | rcu_read_lock(); |
557 | list_for_each_entry(lpage, &shared_lib_pages, list) | 521 | list_for_each_entry(lpage, &shared_lib_pages, list) |
558 | { | 522 | { |
559 | TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3]); | 523 | TRACE_TASK(current, "master_PFN = %05lx r_PFN = %05lx, %05lx, %05lx, %05lx, %05lx\n", lpage->master_pfn, lpage->r_pfn[0], lpage->r_pfn[1], lpage->r_pfn[2], lpage->r_pfn[3], lpage->r_pfn[4]); |
560 | } | 524 | } |
561 | rcu_read_unlock(); | 525 | rcu_read_unlock(); |
562 | } | 526 | } |
563 | 527 | #if 0 | |
564 | TRACE_TASK(current, "AFTER migration\n"); | 528 | TRACE_TASK(current, "AFTER migration\n"); |
565 | down_read(&mm->mmap_sem); | 529 | down_read(&mm->mmap_sem); |
566 | vma_itr = mm->mmap; | 530 | vma_itr = mm->mmap; |
@@ -595,8 +559,9 @@ asmlinkage long sys_set_page_color(int cpu) | |||
595 | } | 559 | } |
596 | up_read(&mm->mmap_sem); | 560 | up_read(&mm->mmap_sem); |
597 | /* for debug FIN. */ | 561 | /* for debug FIN. */ |
562 | #endif | ||
598 | 563 | ||
599 | return ret; | 564 | return nr_not_migrated; |
600 | } | 565 | } |
601 | 566 | ||
602 | /* sys_test_call() is a test system call for developing */ | 567 | /* sys_test_call() is a test system call for developing */ |
@@ -644,7 +609,7 @@ asmlinkage long sys_test_call(unsigned int param) | |||
644 | continue; | 609 | continue; |
645 | } | 610 | } |
646 | 611 | ||
647 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); | 612 | TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping)); |
648 | put_page(old_page); | 613 | put_page(old_page); |
649 | } | 614 | } |
650 | vma_itr = vma_itr->vm_next; | 615 | vma_itr = vma_itr->vm_next; |
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c index 4a2fee575127..06bc1f5b9267 100644 --- a/litmus/polling_reservations.c +++ b/litmus/polling_reservations.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <litmus/reservation.h> | 4 | #include <litmus/reservation.h> |
5 | #include <litmus/polling_reservations.h> | 5 | #include <litmus/polling_reservations.h> |
6 | 6 | ||
7 | #define TRACE(fmt, args...) do {} while (false) | ||
8 | #define TRACE_TASK(fmt, args...) do {} while (false) | ||
7 | 9 | ||
8 | static void periodic_polling_client_arrives( | 10 | static void periodic_polling_client_arrives( |
9 | struct reservation* res, | 11 | struct reservation* res, |
diff --git a/litmus/reservation.c b/litmus/reservation.c index 07e38cb7d138..cdda89d4208f 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c | |||
@@ -4,8 +4,8 @@ | |||
4 | #include <litmus/litmus.h> | 4 | #include <litmus/litmus.h> |
5 | #include <litmus/reservation.h> | 5 | #include <litmus/reservation.h> |
6 | 6 | ||
7 | //#define TRACE(fmt, args...) do {} while (false) | 7 | #define TRACE(fmt, args...) do {} while (false) |
8 | //#define TRACE_TASK(fmt, args...) do {} while (false) | 8 | #define TRACE_TASK(fmt, args...) do {} while (false) |
9 | 9 | ||
10 | #define BUDGET_ENFORCEMENT_AT_C 0 | 10 | #define BUDGET_ENFORCEMENT_AT_C 0 |
11 | 11 | ||
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 5c88a36aacec..588f78e2107f 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -27,8 +27,8 @@ | |||
27 | #include <litmus/reservation.h> | 27 | #include <litmus/reservation.h> |
28 | #include <litmus/polling_reservations.h> | 28 | #include <litmus/polling_reservations.h> |
29 | 29 | ||
30 | //#define TRACE(fmt, args...) do {} while (false) | 30 | #define TRACE(fmt, args...) do {} while (false) |
31 | //#define TRACE_TASK(fmt, args...) do {} while (false) | 31 | #define TRACE_TASK(fmt, args...) do {} while (false) |
32 | 32 | ||
33 | #define BUDGET_ENFORCEMENT_AT_C 0 | 33 | #define BUDGET_ENFORCEMENT_AT_C 0 |
34 | 34 | ||