aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2010-05-24 17:32:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:58 -0400
commit6a60f1b3588aef6ddceaa14192df475d430cce45 (patch)
tree4469beefed2e610eef9204b1a4d63cddc5d9aae4
parentc0ff7453bb5c7c98e0885fb94279f2571946f280 (diff)
mincore: cleanups
This fixes some minor issues that bugged me while going over the code: o adjust argument order of do_mincore() to match the syscall o simplify range length calculation o drop superfluous shift in huge tlb calculation, address is page aligned o drop dead nr_huge calculation o check pte_none() before pte_present() o comment and whitespace fixes No semantic changes intended. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/mincore.c76
1 files changed, 27 insertions, 49 deletions
diff --git a/mm/mincore.c b/mm/mincore.c
index f77433c20279..1f6574c5167b 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -54,7 +54,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
54 * all the arguments, we hold the mmap semaphore: we should 54 * all the arguments, we hold the mmap semaphore: we should
55 * just return the amount of info we're asked for. 55 * just return the amount of info we're asked for.
56 */ 56 */
57static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pages) 57static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
58{ 58{
59 pgd_t *pgd; 59 pgd_t *pgd;
60 pud_t *pud; 60 pud_t *pud;
@@ -64,35 +64,29 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
64 unsigned long nr; 64 unsigned long nr;
65 int i; 65 int i;
66 pgoff_t pgoff; 66 pgoff_t pgoff;
67 struct vm_area_struct *vma = find_vma(current->mm, addr); 67 struct vm_area_struct *vma;
68 68
69 /* 69 vma = find_vma(current->mm, addr);
70 * find_vma() didn't find anything above us, or we're
71 * in an unmapped hole in the address space: ENOMEM.
72 */
73 if (!vma || addr < vma->vm_start) 70 if (!vma || addr < vma->vm_start)
74 return -ENOMEM; 71 return -ENOMEM;
75 72
73 nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
74
76#ifdef CONFIG_HUGETLB_PAGE 75#ifdef CONFIG_HUGETLB_PAGE
77 if (is_vm_hugetlb_page(vma)) { 76 if (is_vm_hugetlb_page(vma)) {
78 struct hstate *h; 77 struct hstate *h;
79 unsigned long nr_huge;
80 unsigned char present;
81 78
82 i = 0; 79 i = 0;
83 nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
84 h = hstate_vma(vma); 80 h = hstate_vma(vma);
85 nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
86 - (addr >> huge_page_shift(h)) + 1;
87 nr_huge = min(nr_huge,
88 (vma->vm_end - addr) >> huge_page_shift(h));
89 while (1) { 81 while (1) {
90 /* hugepage always in RAM for now, 82 unsigned char present;
91 * but generally it needs to be check */ 83 /*
84 * Huge pages are always in RAM for now, but
85 * theoretically it needs to be checked.
86 */
92 ptep = huge_pte_offset(current->mm, 87 ptep = huge_pte_offset(current->mm,
93 addr & huge_page_mask(h)); 88 addr & huge_page_mask(h));
94 present = !!(ptep && 89 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
95 !huge_pte_none(huge_ptep_get(ptep)));
96 while (1) { 90 while (1) {
97 vec[i++] = present; 91 vec[i++] = present;
98 addr += PAGE_SIZE; 92 addr += PAGE_SIZE;
@@ -100,8 +94,7 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
100 if (i == nr) 94 if (i == nr)
101 return nr; 95 return nr;
102 /* check hugepage border */ 96 /* check hugepage border */
103 if (!((addr & ~huge_page_mask(h)) 97 if (!(addr & ~huge_page_mask(h)))
104 >> PAGE_SHIFT))
105 break; 98 break;
106 } 99 }
107 } 100 }
@@ -113,17 +106,7 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
113 * Calculate how many pages there are left in the last level of the 106 * Calculate how many pages there are left in the last level of the
114 * PTE array for our address. 107 * PTE array for our address.
115 */ 108 */
116 nr = PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1)); 109 nr = min(nr, PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1)));
117
118 /*
119 * Don't overrun this vma
120 */
121 nr = min(nr, (vma->vm_end - addr) >> PAGE_SHIFT);
122
123 /*
124 * Don't return more than the caller asked for
125 */
126 nr = min(nr, pages);
127 110
128 pgd = pgd_offset(vma->vm_mm, addr); 111 pgd = pgd_offset(vma->vm_mm, addr);
129 if (pgd_none_or_clear_bad(pgd)) 112 if (pgd_none_or_clear_bad(pgd))
@@ -137,43 +120,38 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
137 120
138 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 121 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
139 for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) { 122 for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) {
140 unsigned char present;
141 pte_t pte = *ptep; 123 pte_t pte = *ptep;
142 124
143 if (pte_present(pte)) { 125 if (pte_none(pte)) {
144 present = 1;
145
146 } else if (pte_none(pte)) {
147 if (vma->vm_file) { 126 if (vma->vm_file) {
148 pgoff = linear_page_index(vma, addr); 127 pgoff = linear_page_index(vma, addr);
149 present = mincore_page(vma->vm_file->f_mapping, 128 vec[i] = mincore_page(vma->vm_file->f_mapping,
150 pgoff); 129 pgoff);
151 } else 130 } else
152 present = 0; 131 vec[i] = 0;
153 132 } else if (pte_present(pte))
154 } else if (pte_file(pte)) { 133 vec[i] = 1;
134 else if (pte_file(pte)) {
155 pgoff = pte_to_pgoff(pte); 135 pgoff = pte_to_pgoff(pte);
156 present = mincore_page(vma->vm_file->f_mapping, pgoff); 136 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
157
158 } else { /* pte is a swap entry */ 137 } else { /* pte is a swap entry */
159 swp_entry_t entry = pte_to_swp_entry(pte); 138 swp_entry_t entry = pte_to_swp_entry(pte);
139
160 if (is_migration_entry(entry)) { 140 if (is_migration_entry(entry)) {
161 /* migration entries are always uptodate */ 141 /* migration entries are always uptodate */
162 present = 1; 142 vec[i] = 1;
163 } else { 143 } else {
164#ifdef CONFIG_SWAP 144#ifdef CONFIG_SWAP
165 pgoff = entry.val; 145 pgoff = entry.val;
166 present = mincore_page(&swapper_space, pgoff); 146 vec[i] = mincore_page(&swapper_space, pgoff);
167#else 147#else
168 WARN_ON(1); 148 WARN_ON(1);
169 present = 1; 149 vec[i] = 1;
170#endif 150#endif
171 } 151 }
172 } 152 }
173
174 vec[i] = present;
175 } 153 }
176 pte_unmap_unlock(ptep-1, ptl); 154 pte_unmap_unlock(ptep - 1, ptl);
177 155
178 return nr; 156 return nr;
179 157
@@ -247,7 +225,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
247 * the temporary buffer size. 225 * the temporary buffer size.
248 */ 226 */
249 down_read(&current->mm->mmap_sem); 227 down_read(&current->mm->mmap_sem);
250 retval = do_mincore(start, tmp, min(pages, PAGE_SIZE)); 228 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
251 up_read(&current->mm->mmap_sem); 229 up_read(&current->mm->mmap_sem);
252 230
253 if (retval <= 0) 231 if (retval <= 0)