diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2014-11-21 15:37:46 -0500 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2014-11-21 15:37:46 -0500 |
commit | 07d5680c4c476a4b68bd3cff134d99ca996b2481 (patch) | |
tree | 9b836d6bcee1f57779aba3d66937633124e2820a | |
parent | d77654f3287edf9fa6aeda97825e9a972bdc8821 (diff) |
static linking coloring
-rw-r--r-- | litmus/litmus.c | 94 |
1 files changed, 73 insertions, 21 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c index cdffbc617d3a..88cc3e043b0b 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -14,13 +14,16 @@ | |||
14 | #include <linux/sched/rt.h> | 14 | #include <linux/sched/rt.h> |
15 | #include <linux/rwsem.h> | 15 | #include <linux/rwsem.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/migrate.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/memcontrol.h> | ||
17 | 20 | ||
18 | #include <litmus/litmus.h> | 21 | #include <litmus/litmus.h> |
19 | #include <litmus/bheap.h> | 22 | #include <litmus/bheap.h> |
20 | #include <litmus/trace.h> | 23 | #include <litmus/trace.h> |
21 | #include <litmus/rt_domain.h> | 24 | #include <litmus/rt_domain.h> |
22 | #include <litmus/litmus_proc.h> | ||
23 | #include <litmus/sched_trace.h> | 25 | #include <litmus/sched_trace.h> |
26 | #include <litmus/litmus_proc.h> | ||
24 | #include <litmus/clock.h> | 27 | #include <litmus/clock.h> |
25 | 28 | ||
26 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
@@ -342,6 +345,7 @@ static inline unsigned long page_color(struct page *page) | |||
342 | return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); | 345 | return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); |
343 | } | 346 | } |
344 | 347 | ||
348 | /* | ||
345 | static struct page *walk_page_table(unsigned long addr) | 349 | static struct page *walk_page_table(unsigned long addr) |
346 | { | 350 | { |
347 | pgd_t *pgd; | 351 | pgd_t *pgd; |
@@ -353,15 +357,18 @@ static struct page *walk_page_table(unsigned long addr) | |||
353 | struct mm_struct *mm = current->mm; | 357 | struct mm_struct *mm = current->mm; |
354 | 358 | ||
355 | pgd = pgd_offset(mm, addr); | 359 | pgd = pgd_offset(mm, addr); |
356 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | 360 | //if (pgd_none(*pgd) || pgd_bad(*pgd)) |
361 | if (pgd_none_or_clear_bad(pgd)) | ||
357 | goto out; | 362 | goto out; |
358 | 363 | ||
359 | pud = pud_offset(pgd, addr); | 364 | pud = pud_offset(pgd, addr); |
360 | if (pud_none(*pud) || pud_bad(*pud)) | 365 | //if (pud_none(*pud) || pud_bad(*pud)) |
366 | if (pud_none_or_clear_bad(pud)) | ||
361 | goto out; | 367 | goto out; |
362 | 368 | ||
363 | pmd = pmd_offset(pud, addr); | 369 | pmd = pmd_offset(pud, addr); |
364 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | 370 | //if (pmd_none(*pmd) || pmd_bad(*pmd)) |
371 | if (pmd_none_or_clear_bad(pmd)) | ||
365 | goto out; | 372 | goto out; |
366 | 373 | ||
367 | ptep = pte_offset_map(pmd, addr); | 374 | ptep = pte_offset_map(pmd, addr); |
@@ -370,65 +377,110 @@ static struct page *walk_page_table(unsigned long addr) | |||
370 | pte = *ptep; | 377 | pte = *ptep; |
371 | 378 | ||
372 | page = pte_page(pte); | 379 | page = pte_page(pte); |
373 | if (page) | 380 | if (pfn_valid(__page_to_pfn(page))) { |
374 | printk(KERN_INFO "page frame struct is @ %p\n", page); | 381 | ;//printk(KERN_INFO "page frame struct is @ %p\n", page); |
382 | //printk(KERN_INFO "pfn is %lu\n", __page_to_pfn(page)); | ||
383 | } | ||
375 | 384 | ||
376 | pte_unmap(ptep); | 385 | pte_unmap(ptep); |
377 | 386 | ||
378 | out: | 387 | out: |
379 | return page; | 388 | return page; |
380 | } | 389 | } |
390 | */ | ||
391 | |||
392 | extern int isolate_lru_page(struct page *page); | ||
393 | extern void putback_lru_page(struct page *page); | ||
394 | |||
395 | static struct page *new_alloc_page(struct page *page, unsigned long node, int **x) | ||
396 | { | ||
397 | return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); | ||
398 | } | ||
381 | 399 | ||
382 | asmlinkage long sys_set_page_color(int cpu) | 400 | asmlinkage long sys_set_page_color(int cpu) |
383 | { | 401 | { |
384 | long ret = 0; | 402 | long ret = 0; |
385 | struct task_page *task_page_itr = NULL; | 403 | //struct task_page *task_page_itr = NULL; |
386 | struct task_page *task_page_itr_next = NULL; | 404 | //struct task_page *task_page_itr_next = NULL; |
387 | struct vm_area_struct *vma_itr = NULL; | 405 | struct vm_area_struct *vma_itr = NULL; |
388 | struct task_page *entry = NULL; | 406 | //struct task_page *entry = NULL; |
407 | int nr_pages = 0; | ||
408 | LIST_HEAD(pagelist); | ||
389 | 409 | ||
390 | down_read(¤t->mm->mmap_sem); | 410 | down_read(¤t->mm->mmap_sem); |
391 | printk(KERN_INFO "SYSCALL set_page_color\n"); | 411 | printk(KERN_INFO "SYSCALL set_page_color\n"); |
392 | vma_itr = current->mm->mmap; | 412 | vma_itr = current->mm->mmap; |
393 | while (vma_itr != NULL) { | 413 | while (vma_itr != NULL) { |
394 | unsigned int num_pages = 0, i; | 414 | unsigned int num_pages = 0, i; |
395 | struct page *new_page = NULL; | 415 | struct page *new_page = NULL, *old_page = NULL; |
416 | /* | ||
396 | entry = kmalloc(sizeof(struct task_page), GFP_ATOMIC); | 417 | entry = kmalloc(sizeof(struct task_page), GFP_ATOMIC); |
397 | if (entry == NULL) { | 418 | if (entry == NULL) { |
398 | return -ENOSPC; | 419 | return -ENOSPC; |
399 | } | 420 | } |
400 | entry->vm_start = vma_itr->vm_start; | 421 | entry->vm_start = vma_itr->vm_start; |
401 | entry->vm_end = vma_itr->vm_end; | 422 | entry->vm_end = vma_itr->vm_end; |
402 | num_pages = (entry->vm_end - entry->vm_start) / PAGE_SIZE; | 423 | */ |
424 | num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; | ||
403 | // print vma flags | 425 | // print vma flags |
404 | printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); | 426 | //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); |
405 | printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); | 427 | //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); |
406 | 428 | ||
407 | for (i = 0; i < num_pages; i++) { | 429 | for (i = 0; i < num_pages; i++) { |
408 | alloc: | 430 | /* |
409 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma_itr, vma_itr->vm_start); | 431 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma_itr, vma_itr->vm_start); |
410 | if (!new_page) | 432 | if (!new_page) |
411 | return -ENOSPC; | 433 | return -ENOSPC; |
412 | if ( (page_color(new_page)>>2) != cpu) { | 434 | printk(KERN_INFO "PAGE_COLOR: %lu\n", page_color(new_page)); |
413 | 435 | */ | |
436 | //old_page = walk_page_table(vma_itr->vm_start + PAGE_SIZE*i); | ||
437 | old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); | ||
438 | |||
439 | if (IS_ERR(old_page)) | ||
440 | continue; | ||
441 | if (!old_page) | ||
442 | continue; | ||
443 | if (PageReserved(old_page)) | ||
444 | goto put_and_next; | ||
414 | 445 | ||
446 | ret = isolate_lru_page(old_page); | ||
447 | //if (pfn_valid(__page_to_pfn(old_page))) | ||
448 | if (!ret) { | ||
449 | //printk(KERN_INFO "page_mapcount = %d\n", page_mapcount(old_page)); | ||
450 | printk(KERN_INFO "addr: %lu, pfn: %lu mapcount: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page)); | ||
451 | list_add_tail(&old_page->lru, &pagelist); | ||
452 | inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); | ||
453 | nr_pages++; | ||
454 | } | ||
455 | put_and_next: | ||
456 | put_page(old_page); | ||
415 | } | 457 | } |
416 | //entry->page = walk_page_table(entry->vm_start); | ||
417 | 458 | ||
418 | INIT_LIST_HEAD(&entry->list); | 459 | //INIT_LIST_HEAD(&entry->list); |
419 | list_add(&entry->list, &task_page_list); | 460 | //list_add(&entry->list, &task_page_list); |
420 | 461 | ||
421 | vma_itr = vma_itr->vm_next; | 462 | vma_itr = vma_itr->vm_next; |
422 | } | 463 | } |
464 | |||
465 | ret = 0; | ||
466 | if (!list_empty(&pagelist)) { | ||
467 | ret = migrate_pages(&pagelist, new_alloc_page, 0, MIGRATE_ASYNC, MR_SYSCALL); | ||
468 | if (ret) { | ||
469 | printk(KERN_INFO "%ld pages not migrated.\n", ret); | ||
470 | putback_lru_pages(&pagelist); | ||
471 | } | ||
472 | } | ||
423 | 473 | ||
424 | up_read(¤t->mm->mmap_sem); | 474 | up_read(¤t->mm->mmap_sem); |
425 | 475 | ||
476 | /* | ||
426 | list_for_each_entry_safe(task_page_itr, task_page_itr_next, &task_page_list, list) { | 477 | list_for_each_entry_safe(task_page_itr, task_page_itr_next, &task_page_list, list) { |
427 | //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", task_page_itr->vm_start, task_page_itr->vm_end, (task_page_itr->vm_end - task_page_itr->vm_start)/PAGE_SIZE); | 478 | //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", task_page_itr->vm_start, task_page_itr->vm_end, (task_page_itr->vm_end - task_page_itr->vm_start)/PAGE_SIZE); |
428 | list_del(&task_page_itr->list); | 479 | list_del(&task_page_itr->list); |
429 | kfree(task_page_itr); | 480 | kfree(task_page_itr); |
430 | } | 481 | } |
431 | 482 | */ | |
483 | printk(KERN_INFO "nr_pages = %d\n", nr_pages); | ||
432 | return ret; | 484 | return ret; |
433 | } | 485 | } |
434 | 486 | ||