aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:09 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:38 -0400
commit147efea8ebb034b48aee806caae1da9a2ee41b38 (patch)
tree6b5d03d6ab478552509c3ce813e2be1c475612de /arch/sh64
parent7ee78232501ea9de2b6c8f10d32c9a0fee541357 (diff)
[PATCH] mm: sh64 hugetlbpage.c
The sh64 hugetlbpage.c seems to be erroneous, left over from a bygone age, clashing with the common hugetlb.c. Replace it by a copy of the sh hugetlbpage.c. Except, delete that mk_pte_huge macro neither uses. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sh64')
-rw-r--r--arch/sh64/mm/hugetlbpage.c188
1 files changed, 12 insertions, 176 deletions
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index dcd9c8a8baf8..ed6a505b3ee2 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -54,41 +54,31 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
54 return pte; 54 return pte;
55} 55}
56 56
57#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) 57void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
58 58 pte_t *ptep, pte_t entry)
59static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
60 struct page *page, pte_t * page_table, int write_access)
61{ 59{
62 unsigned long i; 60 int i;
63 pte_t entry;
64
65 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
66
67 if (write_access)
68 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
69 vma->vm_page_prot)));
70 else
71 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
72 entry = pte_mkyoung(entry);
73 mk_pte_huge(entry);
74 61
75 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 62 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
76 set_pte(page_table, entry); 63 set_pte_at(mm, addr, ptep, entry);
77 page_table++; 64 ptep++;
78 65 addr += PAGE_SIZE;
79 pte_val(entry) += PAGE_SIZE; 66 pte_val(entry) += PAGE_SIZE;
80 } 67 }
81} 68}
82 69
83pte_t huge_ptep_get_and_clear(pte_t *ptep) 70pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
71 pte_t *ptep)
84{ 72{
85 pte_t entry; 73 pte_t entry;
74 int i;
86 75
87 entry = *ptep; 76 entry = *ptep;
88 77
89 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 78 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
90 pte_clear(pte); 79 pte_clear(mm, addr, ptep);
91 pte++; 80 addr += PAGE_SIZE;
81 ptep++;
92 } 82 }
93 83
94 return entry; 84 return entry;
@@ -106,79 +96,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
106 return 0; 96 return 0;
107} 97}
108 98
109int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
110 struct vm_area_struct *vma)
111{
112 pte_t *src_pte, *dst_pte, entry;
113 struct page *ptepage;
114 unsigned long addr = vma->vm_start;
115 unsigned long end = vma->vm_end;
116 int i;
117
118 while (addr < end) {
119 dst_pte = huge_pte_alloc(dst, addr);
120 if (!dst_pte)
121 goto nomem;
122 src_pte = huge_pte_offset(src, addr);
123 BUG_ON(!src_pte || pte_none(*src_pte));
124 entry = *src_pte;
125 ptepage = pte_page(entry);
126 get_page(ptepage);
127 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
128 set_pte(dst_pte, entry);
129 pte_val(entry) += PAGE_SIZE;
130 dst_pte++;
131 }
132 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
133 addr += HPAGE_SIZE;
134 }
135 return 0;
136
137nomem:
138 return -ENOMEM;
139}
140
141int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
142 struct page **pages, struct vm_area_struct **vmas,
143 unsigned long *position, int *length, int i)
144{
145 unsigned long vaddr = *position;
146 int remainder = *length;
147
148 WARN_ON(!is_vm_hugetlb_page(vma));
149
150 while (vaddr < vma->vm_end && remainder) {
151 if (pages) {
152 pte_t *pte;
153 struct page *page;
154
155 pte = huge_pte_offset(mm, vaddr);
156
157 /* hugetlb should be locked, and hence, prefaulted */
158 BUG_ON(!pte || pte_none(*pte));
159
160 page = pte_page(*pte);
161
162 WARN_ON(!PageCompound(page));
163
164 get_page(page);
165 pages[i] = page;
166 }
167
168 if (vmas)
169 vmas[i] = vma;
170
171 vaddr += PAGE_SIZE;
172 --remainder;
173 ++i;
174 }
175
176 *length = remainder;
177 *position = vaddr;
178
179 return i;
180}
181
182struct page *follow_huge_addr(struct mm_struct *mm, 99struct page *follow_huge_addr(struct mm_struct *mm,
183 unsigned long address, int write) 100 unsigned long address, int write)
184{ 101{
@@ -195,84 +112,3 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
195{ 112{
196 return NULL; 113 return NULL;
197} 114}
198
199void unmap_hugepage_range(struct vm_area_struct *vma,
200 unsigned long start, unsigned long end)
201{
202 struct mm_struct *mm = vma->vm_mm;
203 unsigned long address;
204 pte_t *pte;
205 struct page *page;
206 int i;
207
208 BUG_ON(start & (HPAGE_SIZE - 1));
209 BUG_ON(end & (HPAGE_SIZE - 1));
210
211 for (address = start; address < end; address += HPAGE_SIZE) {
212 pte = huge_pte_offset(mm, address);
213 BUG_ON(!pte);
214 if (pte_none(*pte))
215 continue;
216 page = pte_page(*pte);
217 put_page(page);
218 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
219 pte_clear(mm, address+(i*PAGE_SIZE), pte);
220 pte++;
221 }
222 }
223 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
224 flush_tlb_range(vma, start, end);
225}
226
227int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
228{
229 struct mm_struct *mm = current->mm;
230 unsigned long addr;
231 int ret = 0;
232
233 BUG_ON(vma->vm_start & ~HPAGE_MASK);
234 BUG_ON(vma->vm_end & ~HPAGE_MASK);
235
236 spin_lock(&mm->page_table_lock);
237 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
238 unsigned long idx;
239 pte_t *pte = huge_pte_alloc(mm, addr);
240 struct page *page;
241
242 if (!pte) {
243 ret = -ENOMEM;
244 goto out;
245 }
246 if (!pte_none(*pte))
247 continue;
248
249 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
250 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
251 page = find_get_page(mapping, idx);
252 if (!page) {
253 /* charge the fs quota first */
254 if (hugetlb_get_quota(mapping)) {
255 ret = -ENOMEM;
256 goto out;
257 }
258 page = alloc_huge_page();
259 if (!page) {
260 hugetlb_put_quota(mapping);
261 ret = -ENOMEM;
262 goto out;
263 }
264 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
265 if (! ret) {
266 unlock_page(page);
267 } else {
268 hugetlb_put_quota(mapping);
269 free_huge_page(page);
270 goto out;
271 }
272 }
273 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
274 }
275out:
276 spin_unlock(&mm->page_table_lock);
277 return ret;
278}