diff options
Diffstat (limited to 'mm/pagewalk.c')
| -rw-r--r-- | mm/pagewalk.c | 47 |
1 files changed, 37 insertions, 10 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 7b47a57b6646..8b1a2ce21ee5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -80,6 +80,37 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
| 80 | return err; | 80 | return err; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 84 | static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, | ||
| 85 | unsigned long end) | ||
| 86 | { | ||
| 87 | unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); | ||
| 88 | return boundary < end ? boundary : end; | ||
| 89 | } | ||
| 90 | |||
| 91 | static int walk_hugetlb_range(struct vm_area_struct *vma, | ||
| 92 | unsigned long addr, unsigned long end, | ||
| 93 | struct mm_walk *walk) | ||
| 94 | { | ||
| 95 | struct hstate *h = hstate_vma(vma); | ||
| 96 | unsigned long next; | ||
| 97 | unsigned long hmask = huge_page_mask(h); | ||
| 98 | pte_t *pte; | ||
| 99 | int err = 0; | ||
| 100 | |||
| 101 | do { | ||
| 102 | next = hugetlb_entry_end(h, addr, end); | ||
| 103 | pte = huge_pte_offset(walk->mm, addr & hmask); | ||
| 104 | if (pte && walk->hugetlb_entry) | ||
| 105 | err = walk->hugetlb_entry(pte, hmask, addr, next, walk); | ||
| 106 | if (err) | ||
| 107 | return err; | ||
| 108 | } while (addr = next, addr != end); | ||
| 109 | |||
| 110 | return 0; | ||
| 111 | } | ||
| 112 | #endif | ||
| 113 | |||
| 83 | /** | 114 | /** |
| 84 | * walk_page_range - walk a memory map's page tables with a callback | 115 | * walk_page_range - walk a memory map's page tables with a callback |
| 85 | * @mm: memory map to walk | 116 | * @mm: memory map to walk |
| @@ -128,20 +159,16 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
| 128 | vma = find_vma(walk->mm, addr); | 159 | vma = find_vma(walk->mm, addr); |
| 129 | #ifdef CONFIG_HUGETLB_PAGE | 160 | #ifdef CONFIG_HUGETLB_PAGE |
| 130 | if (vma && is_vm_hugetlb_page(vma)) { | 161 | if (vma && is_vm_hugetlb_page(vma)) { |
| 131 | pte_t *pte; | ||
| 132 | struct hstate *hs; | ||
| 133 | |||
| 134 | if (vma->vm_end < next) | 162 | if (vma->vm_end < next) |
| 135 | next = vma->vm_end; | 163 | next = vma->vm_end; |
| 136 | hs = hstate_vma(vma); | 164 | /* |
| 137 | pte = huge_pte_offset(walk->mm, | 165 | * Hugepage is very tightly coupled with vma, so |
| 138 | addr & huge_page_mask(hs)); | 166 | * walk through hugetlb entries within a given vma. |
| 139 | if (pte && !huge_pte_none(huge_ptep_get(pte)) | 167 | */ |
| 140 | && walk->hugetlb_entry) | 168 | err = walk_hugetlb_range(vma, addr, next, walk); |
| 141 | err = walk->hugetlb_entry(pte, addr, | ||
| 142 | next, walk); | ||
| 143 | if (err) | 169 | if (err) |
| 144 | break; | 170 | break; |
| 171 | pgd = pgd_offset(walk->mm, next); | ||
| 145 | continue; | 172 | continue; |
| 146 | } | 173 | } |
| 147 | #endif | 174 | #endif |
