aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mincore.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2010-05-24 17:32:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:58 -0400
commit25ef0e50cca790370ad7838e3ad74db6a6a2d829 (patch)
tree9fef482df352beaeeb0ae1c6a176448cf795d045 /mm/mincore.c
parentf488401076c5570130c018e573f450a9a6c43365 (diff)
mincore: pass ranges as start,end address pairs
Instead of passing a start address and a number of pages into the helper functions, convert them to use a start and an end address. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mincore.c')
-rw-r--r--mm/mincore.c57
1 files changed, 27 insertions, 30 deletions
diff --git a/mm/mincore.c b/mm/mincore.c
index a0c4c10bbab7..211604adc23c 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -20,14 +20,12 @@
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21 21
22static void mincore_hugetlb_page_range(struct vm_area_struct *vma, 22static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23 unsigned long addr, unsigned long nr, 23 unsigned long addr, unsigned long end,
24 unsigned char *vec) 24 unsigned char *vec)
25{ 25{
26#ifdef CONFIG_HUGETLB_PAGE 26#ifdef CONFIG_HUGETLB_PAGE
27 struct hstate *h; 27 struct hstate *h;
28 int i;
29 28
30 i = 0;
31 h = hstate_vma(vma); 29 h = hstate_vma(vma);
32 while (1) { 30 while (1) {
33 unsigned char present; 31 unsigned char present;
@@ -40,10 +38,10 @@ static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
40 addr & huge_page_mask(h)); 38 addr & huge_page_mask(h));
41 present = ptep && !huge_pte_none(huge_ptep_get(ptep)); 39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
42 while (1) { 40 while (1) {
43 vec[i++] = present; 41 *vec = present;
42 vec++;
44 addr += PAGE_SIZE; 43 addr += PAGE_SIZE;
45 /* reach buffer limit */ 44 if (addr == end)
46 if (i == nr)
47 return; 45 return;
48 /* check hugepage border */ 46 /* check hugepage border */
49 if (!(addr & ~huge_page_mask(h))) 47 if (!(addr & ~huge_page_mask(h)))
@@ -86,9 +84,10 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
86} 84}
87 85
88static void mincore_unmapped_range(struct vm_area_struct *vma, 86static void mincore_unmapped_range(struct vm_area_struct *vma,
89 unsigned long addr, unsigned long nr, 87 unsigned long addr, unsigned long end,
90 unsigned char *vec) 88 unsigned char *vec)
91{ 89{
90 unsigned long nr = (end - addr) >> PAGE_SHIFT;
92 int i; 91 int i;
93 92
94 if (vma->vm_file) { 93 if (vma->vm_file) {
@@ -104,42 +103,44 @@ static void mincore_unmapped_range(struct vm_area_struct *vma,
104} 103}
105 104
106static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 105static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
107 unsigned long addr, unsigned long nr, 106 unsigned long addr, unsigned long end,
108 unsigned char *vec) 107 unsigned char *vec)
109{ 108{
109 unsigned long next;
110 spinlock_t *ptl; 110 spinlock_t *ptl;
111 pte_t *ptep; 111 pte_t *ptep;
112 int i;
113 112
114 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 113 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
115 for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) { 114 do {
116 pte_t pte = *ptep; 115 pte_t pte = *ptep;
117 pgoff_t pgoff; 116 pgoff_t pgoff;
118 117
118 next = addr + PAGE_SIZE;
119 if (pte_none(pte)) 119 if (pte_none(pte))
120 mincore_unmapped_range(vma, addr, 1, vec); 120 mincore_unmapped_range(vma, addr, next, vec);
121 else if (pte_present(pte)) 121 else if (pte_present(pte))
122 vec[i] = 1; 122 *vec = 1;
123 else if (pte_file(pte)) { 123 else if (pte_file(pte)) {
124 pgoff = pte_to_pgoff(pte); 124 pgoff = pte_to_pgoff(pte);
125 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); 125 *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
126 } else { /* pte is a swap entry */ 126 } else { /* pte is a swap entry */
127 swp_entry_t entry = pte_to_swp_entry(pte); 127 swp_entry_t entry = pte_to_swp_entry(pte);
128 128
129 if (is_migration_entry(entry)) { 129 if (is_migration_entry(entry)) {
130 /* migration entries are always uptodate */ 130 /* migration entries are always uptodate */
131 vec[i] = 1; 131 *vec = 1;
132 } else { 132 } else {
133#ifdef CONFIG_SWAP 133#ifdef CONFIG_SWAP
134 pgoff = entry.val; 134 pgoff = entry.val;
135 vec[i] = mincore_page(&swapper_space, pgoff); 135 *vec = mincore_page(&swapper_space, pgoff);
136#else 136#else
137 WARN_ON(1); 137 WARN_ON(1);
138 vec[i] = 1; 138 *vec = 1;
139#endif 139#endif
140 } 140 }
141 } 141 }
142 } 142 vec++;
143 } while (ptep++, addr = next, addr != end);
143 pte_unmap_unlock(ptep - 1, ptl); 144 pte_unmap_unlock(ptep - 1, ptl);
144} 145}
145 146
@@ -153,25 +154,21 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
153 pgd_t *pgd; 154 pgd_t *pgd;
154 pud_t *pud; 155 pud_t *pud;
155 pmd_t *pmd; 156 pmd_t *pmd;
156 unsigned long nr;
157 struct vm_area_struct *vma; 157 struct vm_area_struct *vma;
158 unsigned long end;
158 159
159 vma = find_vma(current->mm, addr); 160 vma = find_vma(current->mm, addr);
160 if (!vma || addr < vma->vm_start) 161 if (!vma || addr < vma->vm_start)
161 return -ENOMEM; 162 return -ENOMEM;
162 163
163 nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT); 164 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
164 165
165 if (is_vm_hugetlb_page(vma)) { 166 if (is_vm_hugetlb_page(vma)) {
166 mincore_hugetlb_page_range(vma, addr, nr, vec); 167 mincore_hugetlb_page_range(vma, addr, end, vec);
167 return nr; 168 return (end - addr) >> PAGE_SHIFT;
168 } 169 }
169 170
170 /* 171 end = pmd_addr_end(addr, end);
171 * Calculate how many pages there are left in the last level of the
172 * PTE array for our address.
173 */
174 nr = min(nr, PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1)));
175 172
176 pgd = pgd_offset(vma->vm_mm, addr); 173 pgd = pgd_offset(vma->vm_mm, addr);
177 if (pgd_none_or_clear_bad(pgd)) 174 if (pgd_none_or_clear_bad(pgd))
@@ -183,12 +180,12 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
183 if (pmd_none_or_clear_bad(pmd)) 180 if (pmd_none_or_clear_bad(pmd))
184 goto none_mapped; 181 goto none_mapped;
185 182
186 mincore_pte_range(vma, pmd, addr, nr, vec); 183 mincore_pte_range(vma, pmd, addr, end, vec);
187 return nr; 184 return (end - addr) >> PAGE_SHIFT;
188 185
189none_mapped: 186none_mapped:
190 mincore_unmapped_range(vma, addr, nr, vec); 187 mincore_unmapped_range(vma, addr, end, vec);
191 return nr; 188 return (end - addr) >> PAGE_SHIFT;
192} 189}
193 190
194/* 191/*