aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mincore.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2010-05-24 17:32:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:58 -0400
commitf488401076c5570130c018e573f450a9a6c43365 (patch)
tree0e704c52a45e44ba8cbb0dac529e9ad251d9edbd /mm/mincore.c
parent6a60f1b3588aef6ddceaa14192df475d430cce45 (diff)
mincore: break do_mincore() into logical pieces
Split out functions to handle hugetlb ranges, pte ranges and unmapped ranges, to improve readability but also to prepare the file structure for nested page table walks. No semantic changes intended. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mincore.c')
-rw-r--r--mm/mincore.c171
1 files changed, 97 insertions, 74 deletions
diff --git a/mm/mincore.c b/mm/mincore.c
index 1f6574c5167b..a0c4c10bbab7 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -19,6 +19,42 @@
19#include <asm/uaccess.h> 19#include <asm/uaccess.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21 21
22static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23 unsigned long addr, unsigned long nr,
24 unsigned char *vec)
25{
26#ifdef CONFIG_HUGETLB_PAGE
27 struct hstate *h;
28 int i;
29
30 i = 0;
31 h = hstate_vma(vma);
32 while (1) {
33 unsigned char present;
34 pte_t *ptep;
35 /*
36 * Huge pages are always in RAM for now, but
37 * theoretically it needs to be checked.
38 */
39 ptep = huge_pte_offset(current->mm,
40 addr & huge_page_mask(h));
41 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
42 while (1) {
43 vec[i++] = present;
44 addr += PAGE_SIZE;
45 /* reach buffer limit */
46 if (i == nr)
47 return;
48 /* check hugepage border */
49 if (!(addr & ~huge_page_mask(h)))
50 break;
51 }
52 }
53#else
54 BUG();
55#endif
56}
57
22/* 58/*
23 * Later we can get more picky about what "in core" means precisely. 59 * Later we can get more picky about what "in core" means precisely.
24 * For now, simply check to see if the page is in the page cache, 60 * For now, simply check to see if the page is in the page cache,
@@ -49,6 +85,64 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
49 return present; 85 return present;
50} 86}
51 87
88static void mincore_unmapped_range(struct vm_area_struct *vma,
89 unsigned long addr, unsigned long nr,
90 unsigned char *vec)
91{
92 int i;
93
94 if (vma->vm_file) {
95 pgoff_t pgoff;
96
97 pgoff = linear_page_index(vma, addr);
98 for (i = 0; i < nr; i++, pgoff++)
99 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
100 } else {
101 for (i = 0; i < nr; i++)
102 vec[i] = 0;
103 }
104}
105
106static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
107 unsigned long addr, unsigned long nr,
108 unsigned char *vec)
109{
110 spinlock_t *ptl;
111 pte_t *ptep;
112 int i;
113
114 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
115 for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) {
116 pte_t pte = *ptep;
117 pgoff_t pgoff;
118
119 if (pte_none(pte))
120 mincore_unmapped_range(vma, addr, 1, vec);
121 else if (pte_present(pte))
122 vec[i] = 1;
123 else if (pte_file(pte)) {
124 pgoff = pte_to_pgoff(pte);
125 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
126 } else { /* pte is a swap entry */
127 swp_entry_t entry = pte_to_swp_entry(pte);
128
129 if (is_migration_entry(entry)) {
130 /* migration entries are always uptodate */
131 vec[i] = 1;
132 } else {
133#ifdef CONFIG_SWAP
134 pgoff = entry.val;
135 vec[i] = mincore_page(&swapper_space, pgoff);
136#else
137 WARN_ON(1);
138 vec[i] = 1;
139#endif
140 }
141 }
142 }
143 pte_unmap_unlock(ptep - 1, ptl);
144}
145
52/* 146/*
53 * Do a chunk of "sys_mincore()". We've already checked 147 * Do a chunk of "sys_mincore()". We've already checked
54 * all the arguments, we hold the mmap semaphore: we should 148 * all the arguments, we hold the mmap semaphore: we should
@@ -59,11 +153,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
59 pgd_t *pgd; 153 pgd_t *pgd;
60 pud_t *pud; 154 pud_t *pud;
61 pmd_t *pmd; 155 pmd_t *pmd;
62 pte_t *ptep;
63 spinlock_t *ptl;
64 unsigned long nr; 156 unsigned long nr;
65 int i;
66 pgoff_t pgoff;
67 struct vm_area_struct *vma; 157 struct vm_area_struct *vma;
68 158
69 vma = find_vma(current->mm, addr); 159 vma = find_vma(current->mm, addr);
@@ -72,35 +162,10 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
72 162
73 nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT); 163 nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
74 164
75#ifdef CONFIG_HUGETLB_PAGE
76 if (is_vm_hugetlb_page(vma)) { 165 if (is_vm_hugetlb_page(vma)) {
77 struct hstate *h; 166 mincore_hugetlb_page_range(vma, addr, nr, vec);
78
79 i = 0;
80 h = hstate_vma(vma);
81 while (1) {
82 unsigned char present;
83 /*
84 * Huge pages are always in RAM for now, but
85 * theoretically it needs to be checked.
86 */
87 ptep = huge_pte_offset(current->mm,
88 addr & huge_page_mask(h));
89 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
90 while (1) {
91 vec[i++] = present;
92 addr += PAGE_SIZE;
93 /* reach buffer limit */
94 if (i == nr)
95 return nr;
96 /* check hugepage border */
97 if (!(addr & ~huge_page_mask(h)))
98 break;
99 }
100 }
101 return nr; 167 return nr;
102 } 168 }
103#endif
104 169
105 /* 170 /*
106 * Calculate how many pages there are left in the last level of the 171 * Calculate how many pages there are left in the last level of the
@@ -118,53 +183,11 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
118 if (pmd_none_or_clear_bad(pmd)) 183 if (pmd_none_or_clear_bad(pmd))
119 goto none_mapped; 184 goto none_mapped;
120 185
121 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 186 mincore_pte_range(vma, pmd, addr, nr, vec);
122 for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) {
123 pte_t pte = *ptep;
124
125 if (pte_none(pte)) {
126 if (vma->vm_file) {
127 pgoff = linear_page_index(vma, addr);
128 vec[i] = mincore_page(vma->vm_file->f_mapping,
129 pgoff);
130 } else
131 vec[i] = 0;
132 } else if (pte_present(pte))
133 vec[i] = 1;
134 else if (pte_file(pte)) {
135 pgoff = pte_to_pgoff(pte);
136 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
137 } else { /* pte is a swap entry */
138 swp_entry_t entry = pte_to_swp_entry(pte);
139
140 if (is_migration_entry(entry)) {
141 /* migration entries are always uptodate */
142 vec[i] = 1;
143 } else {
144#ifdef CONFIG_SWAP
145 pgoff = entry.val;
146 vec[i] = mincore_page(&swapper_space, pgoff);
147#else
148 WARN_ON(1);
149 vec[i] = 1;
150#endif
151 }
152 }
153 }
154 pte_unmap_unlock(ptep - 1, ptl);
155
156 return nr; 187 return nr;
157 188
158none_mapped: 189none_mapped:
159 if (vma->vm_file) { 190 mincore_unmapped_range(vma, addr, nr, vec);
160 pgoff = linear_page_index(vma, addr);
161 for (i = 0; i < nr; i++, pgoff++)
162 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
163 } else {
164 for (i = 0; i < nr; i++)
165 vec[i] = 0;
166 }
167
168 return nr; 191 return nr;
169} 192}
170 193