aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:46:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:41 -0500
commit47ad8475c000141eacb3ecda5e5ce4b43a9cd04d (patch)
tree78c29aaf2ae9340e314a25ea08e9724471cf4414 /mm/hugetlb.c
parent3f04f62f90d46a82dd73027c5fd7a15daed5c33d (diff)
thp: clear_copy_huge_page
Move the copy/clear_huge_page functions to common code to share between hugetlb.c and huge_memory.c. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c70
1 files changed, 3 insertions, 67 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 85855240933d..7bf223d6677b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -394,71 +394,6 @@ static int vma_has_reserves(struct vm_area_struct *vma)
394 return 0; 394 return 0;
395} 395}
396 396
397static void clear_gigantic_page(struct page *page,
398 unsigned long addr, unsigned long sz)
399{
400 int i;
401 struct page *p = page;
402
403 might_sleep();
404 for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
405 cond_resched();
406 clear_user_highpage(p, addr + i * PAGE_SIZE);
407 }
408}
409static void clear_huge_page(struct page *page,
410 unsigned long addr, unsigned long sz)
411{
412 int i;
413
414 if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
415 clear_gigantic_page(page, addr, sz);
416 return;
417 }
418
419 might_sleep();
420 for (i = 0; i < sz/PAGE_SIZE; i++) {
421 cond_resched();
422 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
423 }
424}
425
426static void copy_user_gigantic_page(struct page *dst, struct page *src,
427 unsigned long addr, struct vm_area_struct *vma)
428{
429 int i;
430 struct hstate *h = hstate_vma(vma);
431 struct page *dst_base = dst;
432 struct page *src_base = src;
433
434 for (i = 0; i < pages_per_huge_page(h); ) {
435 cond_resched();
436 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
437
438 i++;
439 dst = mem_map_next(dst, dst_base, i);
440 src = mem_map_next(src, src_base, i);
441 }
442}
443
444static void copy_user_huge_page(struct page *dst, struct page *src,
445 unsigned long addr, struct vm_area_struct *vma)
446{
447 int i;
448 struct hstate *h = hstate_vma(vma);
449
450 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
451 copy_user_gigantic_page(dst, src, addr, vma);
452 return;
453 }
454
455 might_sleep();
456 for (i = 0; i < pages_per_huge_page(h); i++) {
457 cond_resched();
458 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
459 }
460}
461
462static void copy_gigantic_page(struct page *dst, struct page *src) 397static void copy_gigantic_page(struct page *dst, struct page *src)
463{ 398{
464 int i; 399 int i;
@@ -2454,7 +2389,8 @@ retry_avoidcopy:
2454 return VM_FAULT_OOM; 2389 return VM_FAULT_OOM;
2455 } 2390 }
2456 2391
2457 copy_user_huge_page(new_page, old_page, address, vma); 2392 copy_user_huge_page(new_page, old_page, address, vma,
2393 pages_per_huge_page(h));
2458 __SetPageUptodate(new_page); 2394 __SetPageUptodate(new_page);
2459 2395
2460 /* 2396 /*
@@ -2558,7 +2494,7 @@ retry:
2558 ret = -PTR_ERR(page); 2494 ret = -PTR_ERR(page);
2559 goto out; 2495 goto out;
2560 } 2496 }
2561 clear_huge_page(page, address, huge_page_size(h)); 2497 clear_huge_page(page, address, pages_per_huge_page(h));
2562 __SetPageUptodate(page); 2498 __SetPageUptodate(page);
2563 2499
2564 if (vma->vm_flags & VM_MAYSHARE) { 2500 if (vma->vm_flags & VM_MAYSHARE) {