diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-06-21 20:14:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-21 21:46:15 -0400 |
commit | 63551ae0feaaa23807ebea60de1901564bbef32e (patch) | |
tree | f6f97f60f83c3e9813bdfcc6039c499997b1ea10 /arch/ppc64 | |
parent | 1e7e5a9048b30c57ba1ddaa6cdf59b21b65cde99 (diff) |
[PATCH] Hugepage consolidation
A lot of the code in arch/*/mm/hugetlbpage.c is quite similar. This patch
attempts to consolidate a lot of the code across the arch's, putting the
combined version in mm/hugetlb.c. There are a couple of uglyish hacks in
order to covert all the hugepage archs, but the result is a very large
reduction in the total amount of code. It also means things like hugepage
lazy allocation could be implemented in one place, instead of six.
Tested, at least a little, on ppc64, i386 and x86_64.
Notes:
- this patch changes the meaning of set_huge_pte() to be more
analagous to set_pte()
- does SH4 need s special huge_ptep_get_and_clear()??
Acked-by: William Lee Irwin <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 180 |
1 files changed, 2 insertions, 178 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index d3bf86a5c1ad..b4ab766f5980 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -121,7 +121,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr | |||
121 | return hugepte_offset(dir, addr); | 121 | return hugepte_offset(dir, addr); |
122 | } | 122 | } |
123 | 123 | ||
124 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 124 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
125 | { | 125 | { |
126 | pud_t *pud; | 126 | pud_t *pud; |
127 | 127 | ||
@@ -134,7 +134,7 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
134 | return hugepte_offset(pud, addr); | 134 | return hugepte_offset(pud, addr); |
135 | } | 135 | } |
136 | 136 | ||
137 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 137 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
138 | { | 138 | { |
139 | pud_t *pud; | 139 | pud_t *pud; |
140 | 140 | ||
@@ -147,25 +147,6 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | |||
147 | return hugepte_alloc(mm, pud, addr); | 147 | return hugepte_alloc(mm, pud, addr); |
148 | } | 148 | } |
149 | 149 | ||
150 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, | ||
151 | unsigned long addr, struct page *page, | ||
152 | pte_t *ptep, int write_access) | ||
153 | { | ||
154 | pte_t entry; | ||
155 | |||
156 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); | ||
157 | if (write_access) { | ||
158 | entry = | ||
159 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | ||
160 | } else { | ||
161 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | ||
162 | } | ||
163 | entry = pte_mkyoung(entry); | ||
164 | entry = pte_mkhuge(entry); | ||
165 | |||
166 | set_pte_at(mm, addr, ptep, entry); | ||
167 | } | ||
168 | |||
169 | /* | 150 | /* |
170 | * This function checks for proper alignment of input addr and len parameters. | 151 | * This function checks for proper alignment of input addr and len parameters. |
171 | */ | 152 | */ |
@@ -259,80 +240,6 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
259 | return -EINVAL; | 240 | return -EINVAL; |
260 | } | 241 | } |
261 | 242 | ||
262 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | ||
263 | struct vm_area_struct *vma) | ||
264 | { | ||
265 | pte_t *src_pte, *dst_pte, entry; | ||
266 | struct page *ptepage; | ||
267 | unsigned long addr = vma->vm_start; | ||
268 | unsigned long end = vma->vm_end; | ||
269 | int err = -ENOMEM; | ||
270 | |||
271 | while (addr < end) { | ||
272 | dst_pte = huge_pte_alloc(dst, addr); | ||
273 | if (!dst_pte) | ||
274 | goto out; | ||
275 | |||
276 | src_pte = huge_pte_offset(src, addr); | ||
277 | entry = *src_pte; | ||
278 | |||
279 | ptepage = pte_page(entry); | ||
280 | get_page(ptepage); | ||
281 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | ||
282 | set_pte_at(dst, addr, dst_pte, entry); | ||
283 | |||
284 | addr += HPAGE_SIZE; | ||
285 | } | ||
286 | |||
287 | err = 0; | ||
288 | out: | ||
289 | return err; | ||
290 | } | ||
291 | |||
292 | int | ||
293 | follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
294 | struct page **pages, struct vm_area_struct **vmas, | ||
295 | unsigned long *position, int *length, int i) | ||
296 | { | ||
297 | unsigned long vpfn, vaddr = *position; | ||
298 | int remainder = *length; | ||
299 | |||
300 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
301 | |||
302 | vpfn = vaddr/PAGE_SIZE; | ||
303 | while (vaddr < vma->vm_end && remainder) { | ||
304 | if (pages) { | ||
305 | pte_t *pte; | ||
306 | struct page *page; | ||
307 | |||
308 | pte = huge_pte_offset(mm, vaddr); | ||
309 | |||
310 | /* hugetlb should be locked, and hence, prefaulted */ | ||
311 | WARN_ON(!pte || pte_none(*pte)); | ||
312 | |||
313 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | ||
314 | |||
315 | WARN_ON(!PageCompound(page)); | ||
316 | |||
317 | get_page(page); | ||
318 | pages[i] = page; | ||
319 | } | ||
320 | |||
321 | if (vmas) | ||
322 | vmas[i] = vma; | ||
323 | |||
324 | vaddr += PAGE_SIZE; | ||
325 | ++vpfn; | ||
326 | --remainder; | ||
327 | ++i; | ||
328 | } | ||
329 | |||
330 | *length = remainder; | ||
331 | *position = vaddr; | ||
332 | |||
333 | return i; | ||
334 | } | ||
335 | |||
336 | struct page * | 243 | struct page * |
337 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 244 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
338 | { | 245 | { |
@@ -363,89 +270,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
363 | return NULL; | 270 | return NULL; |
364 | } | 271 | } |
365 | 272 | ||
366 | void unmap_hugepage_range(struct vm_area_struct *vma, | ||
367 | unsigned long start, unsigned long end) | ||
368 | { | ||
369 | struct mm_struct *mm = vma->vm_mm; | ||
370 | unsigned long addr; | ||
371 | pte_t *ptep; | ||
372 | struct page *page; | ||
373 | |||
374 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
375 | BUG_ON((start % HPAGE_SIZE) != 0); | ||
376 | BUG_ON((end % HPAGE_SIZE) != 0); | ||
377 | |||
378 | for (addr = start; addr < end; addr += HPAGE_SIZE) { | ||
379 | pte_t pte; | ||
380 | |||
381 | ptep = huge_pte_offset(mm, addr); | ||
382 | if (!ptep || pte_none(*ptep)) | ||
383 | continue; | ||
384 | |||
385 | pte = *ptep; | ||
386 | page = pte_page(pte); | ||
387 | pte_clear(mm, addr, ptep); | ||
388 | |||
389 | put_page(page); | ||
390 | } | ||
391 | add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); | ||
392 | flush_tlb_pending(); | ||
393 | } | ||
394 | |||
395 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | ||
396 | { | ||
397 | struct mm_struct *mm = current->mm; | ||
398 | unsigned long addr; | ||
399 | int ret = 0; | ||
400 | |||
401 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
402 | BUG_ON((vma->vm_start % HPAGE_SIZE) != 0); | ||
403 | BUG_ON((vma->vm_end % HPAGE_SIZE) != 0); | ||
404 | |||
405 | spin_lock(&mm->page_table_lock); | ||
406 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | ||
407 | unsigned long idx; | ||
408 | pte_t *pte = huge_pte_alloc(mm, addr); | ||
409 | struct page *page; | ||
410 | |||
411 | if (!pte) { | ||
412 | ret = -ENOMEM; | ||
413 | goto out; | ||
414 | } | ||
415 | if (! pte_none(*pte)) | ||
416 | continue; | ||
417 | |||
418 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) | ||
419 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
420 | page = find_get_page(mapping, idx); | ||
421 | if (!page) { | ||
422 | /* charge the fs quota first */ | ||
423 | if (hugetlb_get_quota(mapping)) { | ||
424 | ret = -ENOMEM; | ||
425 | goto out; | ||
426 | } | ||
427 | page = alloc_huge_page(); | ||
428 | if (!page) { | ||
429 | hugetlb_put_quota(mapping); | ||
430 | ret = -ENOMEM; | ||
431 | goto out; | ||
432 | } | ||
433 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); | ||
434 | if (! ret) { | ||
435 | unlock_page(page); | ||
436 | } else { | ||
437 | hugetlb_put_quota(mapping); | ||
438 | free_huge_page(page); | ||
439 | goto out; | ||
440 | } | ||
441 | } | ||
442 | set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE); | ||
443 | } | ||
444 | out: | ||
445 | spin_unlock(&mm->page_table_lock); | ||
446 | return ret; | ||
447 | } | ||
448 | |||
449 | /* Because we have an exclusive hugepage region which lies within the | 273 | /* Because we have an exclusive hugepage region which lies within the |
450 | * normal user address space, we have to take special measures to make | 274 | * normal user address space, we have to take special measures to make |
451 | * non-huge mmap()s evade the hugepage reserved regions. */ | 275 | * non-huge mmap()s evade the hugepage reserved regions. */ |