diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2013-09-11 17:22:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 18:57:49 -0400 |
commit | 98094945785464c657d598291d714d11694c8cd9 (patch) | |
tree | f6b1e06e67a26577368e79b5323fdba2a14075bb /mm | |
parent | 86cdb465cf3a9d81058b517af05074157fa9dcdd (diff) |
mm/mempolicy: rename check_*range to queue_pages_*range
The function check_range() (and its family) is not well-named, because it
does not only checking something, but moving pages from list to list to do
page migration for them. So queue_pages_*range is more desirable name.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index c7c359213ae1..9d778637b088 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -476,8 +476,11 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { | |||
476 | static void migrate_page_add(struct page *page, struct list_head *pagelist, | 476 | static void migrate_page_add(struct page *page, struct list_head *pagelist, |
477 | unsigned long flags); | 477 | unsigned long flags); |
478 | 478 | ||
479 | /* Scan through pages checking if pages follow certain conditions. */ | 479 | /* |
480 | static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 480 | * Scan through pages checking if pages follow certain conditions, |
481 | * and move them to the pagelist if they do. | ||
482 | */ | ||
483 | static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | ||
481 | unsigned long addr, unsigned long end, | 484 | unsigned long addr, unsigned long end, |
482 | const nodemask_t *nodes, unsigned long flags, | 485 | const nodemask_t *nodes, unsigned long flags, |
483 | void *private) | 486 | void *private) |
@@ -515,8 +518,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
515 | return addr != end; | 518 | return addr != end; |
516 | } | 519 | } |
517 | 520 | ||
518 | static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd, | 521 | static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, |
519 | const nodemask_t *nodes, unsigned long flags, | 522 | pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, |
520 | void *private) | 523 | void *private) |
521 | { | 524 | { |
522 | #ifdef CONFIG_HUGETLB_PAGE | 525 | #ifdef CONFIG_HUGETLB_PAGE |
@@ -539,7 +542,7 @@ unlock: | |||
539 | #endif | 542 | #endif |
540 | } | 543 | } |
541 | 544 | ||
542 | static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, | 545 | static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
543 | unsigned long addr, unsigned long end, | 546 | unsigned long addr, unsigned long end, |
544 | const nodemask_t *nodes, unsigned long flags, | 547 | const nodemask_t *nodes, unsigned long flags, |
545 | void *private) | 548 | void *private) |
@@ -553,21 +556,21 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
553 | if (!pmd_present(*pmd)) | 556 | if (!pmd_present(*pmd)) |
554 | continue; | 557 | continue; |
555 | if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { | 558 | if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { |
556 | check_hugetlb_pmd_range(vma, pmd, nodes, | 559 | queue_pages_hugetlb_pmd_range(vma, pmd, nodes, |
557 | flags, private); | 560 | flags, private); |
558 | continue; | 561 | continue; |
559 | } | 562 | } |
560 | split_huge_page_pmd(vma, addr, pmd); | 563 | split_huge_page_pmd(vma, addr, pmd); |
561 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 564 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
562 | continue; | 565 | continue; |
563 | if (check_pte_range(vma, pmd, addr, next, nodes, | 566 | if (queue_pages_pte_range(vma, pmd, addr, next, nodes, |
564 | flags, private)) | 567 | flags, private)) |
565 | return -EIO; | 568 | return -EIO; |
566 | } while (pmd++, addr = next, addr != end); | 569 | } while (pmd++, addr = next, addr != end); |
567 | return 0; | 570 | return 0; |
568 | } | 571 | } |
569 | 572 | ||
570 | static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | 573 | static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, |
571 | unsigned long addr, unsigned long end, | 574 | unsigned long addr, unsigned long end, |
572 | const nodemask_t *nodes, unsigned long flags, | 575 | const nodemask_t *nodes, unsigned long flags, |
573 | void *private) | 576 | void *private) |
@@ -582,14 +585,14 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | |||
582 | continue; | 585 | continue; |
583 | if (pud_none_or_clear_bad(pud)) | 586 | if (pud_none_or_clear_bad(pud)) |
584 | continue; | 587 | continue; |
585 | if (check_pmd_range(vma, pud, addr, next, nodes, | 588 | if (queue_pages_pmd_range(vma, pud, addr, next, nodes, |
586 | flags, private)) | 589 | flags, private)) |
587 | return -EIO; | 590 | return -EIO; |
588 | } while (pud++, addr = next, addr != end); | 591 | } while (pud++, addr = next, addr != end); |
589 | return 0; | 592 | return 0; |
590 | } | 593 | } |
591 | 594 | ||
592 | static inline int check_pgd_range(struct vm_area_struct *vma, | 595 | static inline int queue_pages_pgd_range(struct vm_area_struct *vma, |
593 | unsigned long addr, unsigned long end, | 596 | unsigned long addr, unsigned long end, |
594 | const nodemask_t *nodes, unsigned long flags, | 597 | const nodemask_t *nodes, unsigned long flags, |
595 | void *private) | 598 | void *private) |
@@ -602,7 +605,7 @@ static inline int check_pgd_range(struct vm_area_struct *vma, | |||
602 | next = pgd_addr_end(addr, end); | 605 | next = pgd_addr_end(addr, end); |
603 | if (pgd_none_or_clear_bad(pgd)) | 606 | if (pgd_none_or_clear_bad(pgd)) |
604 | continue; | 607 | continue; |
605 | if (check_pud_range(vma, pgd, addr, next, nodes, | 608 | if (queue_pages_pud_range(vma, pgd, addr, next, nodes, |
606 | flags, private)) | 609 | flags, private)) |
607 | return -EIO; | 610 | return -EIO; |
608 | } while (pgd++, addr = next, addr != end); | 611 | } while (pgd++, addr = next, addr != end); |
@@ -640,12 +643,14 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, | |||
640 | #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ | 643 | #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ |
641 | 644 | ||
642 | /* | 645 | /* |
643 | * Check if all pages in a range are on a set of nodes. | 646 | * Walk through page tables and collect pages to be migrated. |
644 | * If pagelist != NULL then isolate pages from the LRU and | 647 | * |
645 | * put them on the pagelist. | 648 | * If pages found in a given range are on a set of nodes (determined by |
649 | * @nodes and @flags,) it's isolated and queued to the pagelist which is | ||
650 | * passed via @private.) | ||
646 | */ | 651 | */ |
647 | static struct vm_area_struct * | 652 | static struct vm_area_struct * |
648 | check_range(struct mm_struct *mm, unsigned long start, unsigned long end, | 653 | queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, |
649 | const nodemask_t *nodes, unsigned long flags, void *private) | 654 | const nodemask_t *nodes, unsigned long flags, void *private) |
650 | { | 655 | { |
651 | int err; | 656 | int err; |
@@ -680,7 +685,7 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |||
680 | ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && | 685 | ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && |
681 | vma_migratable(vma))) { | 686 | vma_migratable(vma))) { |
682 | 687 | ||
683 | err = check_pgd_range(vma, start, endvma, nodes, | 688 | err = queue_pages_pgd_range(vma, start, endvma, nodes, |
684 | flags, private); | 689 | flags, private); |
685 | if (err) { | 690 | if (err) { |
686 | first = ERR_PTR(err); | 691 | first = ERR_PTR(err); |
@@ -1050,7 +1055,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, | |||
1050 | * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. | 1055 | * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. |
1051 | */ | 1056 | */ |
1052 | VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); | 1057 | VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); |
1053 | check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, | 1058 | queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, |
1054 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); | 1059 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); |
1055 | 1060 | ||
1056 | if (!list_empty(&pagelist)) { | 1061 | if (!list_empty(&pagelist)) { |
@@ -1288,7 +1293,7 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1288 | if (err) | 1293 | if (err) |
1289 | goto mpol_out; | 1294 | goto mpol_out; |
1290 | 1295 | ||
1291 | vma = check_range(mm, start, end, nmask, | 1296 | vma = queue_pages_range(mm, start, end, nmask, |
1292 | flags | MPOL_MF_INVERT, &pagelist); | 1297 | flags | MPOL_MF_INVERT, &pagelist); |
1293 | 1298 | ||
1294 | err = PTR_ERR(vma); /* maybe ... */ | 1299 | err = PTR_ERR(vma); /* maybe ... */ |