diff options
author | Dave Hansen <dave@linux.vnet.ibm.com> | 2011-03-22 19:32:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:04 -0400 |
commit | 033193275b3ffcfe7f3fde7b569f3d207f6cd6a0 (patch) | |
tree | fc65fa02248f855f0f63e087f35a507b6abb5617 | |
parent | 278df9f451dc71dcd002246be48358a473504ad0 (diff) |
pagewalk: only split huge pages when necessary
Right now, if a mm_walk has either ->pte_entry or ->pmd_entry set, it will
unconditionally split any transparent huge pages it runs in to. In
practice, that means that anyone doing a
cat /proc/$pid/smaps
will unconditionally break down every huge page in the process and depend
on khugepaged to re-collapse it later. This is fairly suboptimal.
This patch changes that behavior. It teaches each ->pmd_entry handler
(there are five) that they must break down the THPs themselves. Also, the
_generic_ code will never break down a THP unless a ->pte_entry handler is
actually set.
This means that the ->pmd_entry handlers can now choose to deal with THPs
without breaking them down.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Eric B Munson <emunson@mgebm.net>
Tested-by: Eric B Munson <emunson@mgebm.net>
Cc: Michael J Wolf <mjwolf@us.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/proc/task_mmu.c | 6 | ||||
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | mm/memcontrol.c | 5 | ||||
-rw-r--r-- | mm/pagewalk.c | 24 |
4 files changed, 32 insertions, 6 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 60b914860f81..78fd3621f565 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -343,6 +343,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
343 | struct page *page; | 343 | struct page *page; |
344 | int mapcount; | 344 | int mapcount; |
345 | 345 | ||
346 | split_huge_page_pmd(walk->mm, pmd); | ||
347 | |||
346 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 348 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
347 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 349 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
348 | ptent = *pte; | 350 | ptent = *pte; |
@@ -467,6 +469,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
467 | spinlock_t *ptl; | 469 | spinlock_t *ptl; |
468 | struct page *page; | 470 | struct page *page; |
469 | 471 | ||
472 | split_huge_page_pmd(walk->mm, pmd); | ||
473 | |||
470 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 474 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
471 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 475 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
472 | ptent = *pte; | 476 | ptent = *pte; |
@@ -623,6 +627,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
623 | pte_t *pte; | 627 | pte_t *pte; |
624 | int err = 0; | 628 | int err = 0; |
625 | 629 | ||
630 | split_huge_page_pmd(walk->mm, pmd); | ||
631 | |||
626 | /* find the first VMA at or above 'addr' */ | 632 | /* find the first VMA at or above 'addr' */ |
627 | vma = find_vma(walk->mm, addr); | 633 | vma = find_vma(walk->mm, addr); |
628 | for (; addr != end; addr += PAGE_SIZE) { | 634 | for (; addr != end; addr += PAGE_SIZE) { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 901435e3a9a9..294104e0891d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -914,6 +914,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, | |||
914 | * @pgd_entry: if set, called for each non-empty PGD (top-level) entry | 914 | * @pgd_entry: if set, called for each non-empty PGD (top-level) entry |
915 | * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry | 915 | * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry |
916 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | 916 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |
917 | * this handler is required to be able to handle | ||
918 | * pmd_trans_huge() pmds. They may simply choose to | ||
919 | * split_huge_page() instead of handling it explicitly. | ||
917 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry | 920 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry |
918 | * @pte_hole: if set, called for each hole at all levels | 921 | * @pte_hole: if set, called for each hole at all levels |
919 | * @hugetlb_entry: if set, called for each hugetlb entry | 922 | * @hugetlb_entry: if set, called for each hugetlb entry |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9e0f05efd114..e1ee6ad9c971 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4763,7 +4763,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, | |||
4763 | pte_t *pte; | 4763 | pte_t *pte; |
4764 | spinlock_t *ptl; | 4764 | spinlock_t *ptl; |
4765 | 4765 | ||
4766 | VM_BUG_ON(pmd_trans_huge(*pmd)); | 4766 | split_huge_page_pmd(walk->mm, pmd); |
4767 | |||
4767 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 4768 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
4768 | for (; addr != end; pte++, addr += PAGE_SIZE) | 4769 | for (; addr != end; pte++, addr += PAGE_SIZE) |
4769 | if (is_target_pte_for_mc(vma, addr, *pte, NULL)) | 4770 | if (is_target_pte_for_mc(vma, addr, *pte, NULL)) |
@@ -4925,8 +4926,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, | |||
4925 | pte_t *pte; | 4926 | pte_t *pte; |
4926 | spinlock_t *ptl; | 4927 | spinlock_t *ptl; |
4927 | 4928 | ||
4929 | split_huge_page_pmd(walk->mm, pmd); | ||
4928 | retry: | 4930 | retry: |
4929 | VM_BUG_ON(pmd_trans_huge(*pmd)); | ||
4930 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 4931 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
4931 | for (; addr != end; addr += PAGE_SIZE) { | 4932 | for (; addr != end; addr += PAGE_SIZE) { |
4932 | pte_t ptent = *(pte++); | 4933 | pte_t ptent = *(pte++); |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 7cfa6ae02303..c3450d533611 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -33,19 +33,35 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | |||
33 | 33 | ||
34 | pmd = pmd_offset(pud, addr); | 34 | pmd = pmd_offset(pud, addr); |
35 | do { | 35 | do { |
36 | again: | ||
36 | next = pmd_addr_end(addr, end); | 37 | next = pmd_addr_end(addr, end); |
37 | split_huge_page_pmd(walk->mm, pmd); | 38 | if (pmd_none(*pmd)) { |
38 | if (pmd_none_or_clear_bad(pmd)) { | ||
39 | if (walk->pte_hole) | 39 | if (walk->pte_hole) |
40 | err = walk->pte_hole(addr, next, walk); | 40 | err = walk->pte_hole(addr, next, walk); |
41 | if (err) | 41 | if (err) |
42 | break; | 42 | break; |
43 | continue; | 43 | continue; |
44 | } | 44 | } |
45 | /* | ||
46 | * This implies that each ->pmd_entry() handler | ||
47 | * needs to know about pmd_trans_huge() pmds | ||
48 | */ | ||
45 | if (walk->pmd_entry) | 49 | if (walk->pmd_entry) |
46 | err = walk->pmd_entry(pmd, addr, next, walk); | 50 | err = walk->pmd_entry(pmd, addr, next, walk); |
47 | if (!err && walk->pte_entry) | 51 | if (err) |
48 | err = walk_pte_range(pmd, addr, next, walk); | 52 | break; |
53 | |||
54 | /* | ||
55 | * Check this here so we only break down trans_huge | ||
56 | * pages when we _need_ to | ||
57 | */ | ||
58 | if (!walk->pte_entry) | ||
59 | continue; | ||
60 | |||
61 | split_huge_page_pmd(walk->mm, pmd); | ||
62 | if (pmd_none_or_clear_bad(pmd)) | ||
63 | goto again; | ||
64 | err = walk_pte_range(pmd, addr, next, walk); | ||
49 | if (err) | 65 | if (err) |
50 | break; | 66 | break; |
51 | } while (pmd++, addr = next, addr != end); | 67 | } while (pmd++, addr = next, addr != end); |