diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 18:46:47 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:41 -0500 |
commit | 47ad8475c000141eacb3ecda5e5ce4b43a9cd04d (patch) | |
tree | 78c29aaf2ae9340e314a25ea08e9724471cf4414 /mm/memory.c | |
parent | 3f04f62f90d46a82dd73027c5fd7a15daed5c33d (diff) |
thp: clear_copy_huge_page
Move the copy/clear_huge_page functions to common code to share between
hugetlb.c and huge_memory.c.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 567bca80ea53..60e1c68d8218 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3645,3 +3645,74 @@ void might_fault(void) | |||
3645 | } | 3645 | } |
3646 | EXPORT_SYMBOL(might_fault); | 3646 | EXPORT_SYMBOL(might_fault); |
3647 | #endif | 3647 | #endif |
3648 | |||
3649 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) | ||
3650 | static void clear_gigantic_page(struct page *page, | ||
3651 | unsigned long addr, | ||
3652 | unsigned int pages_per_huge_page) | ||
3653 | { | ||
3654 | int i; | ||
3655 | struct page *p = page; | ||
3656 | |||
3657 | might_sleep(); | ||
3658 | for (i = 0; i < pages_per_huge_page; | ||
3659 | i++, p = mem_map_next(p, page, i)) { | ||
3660 | cond_resched(); | ||
3661 | clear_user_highpage(p, addr + i * PAGE_SIZE); | ||
3662 | } | ||
3663 | } | ||
3664 | void clear_huge_page(struct page *page, | ||
3665 | unsigned long addr, unsigned int pages_per_huge_page) | ||
3666 | { | ||
3667 | int i; | ||
3668 | |||
3669 | if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { | ||
3670 | clear_gigantic_page(page, addr, pages_per_huge_page); | ||
3671 | return; | ||
3672 | } | ||
3673 | |||
3674 | might_sleep(); | ||
3675 | for (i = 0; i < pages_per_huge_page; i++) { | ||
3676 | cond_resched(); | ||
3677 | clear_user_highpage(page + i, addr + i * PAGE_SIZE); | ||
3678 | } | ||
3679 | } | ||
3680 | |||
3681 | static void copy_user_gigantic_page(struct page *dst, struct page *src, | ||
3682 | unsigned long addr, | ||
3683 | struct vm_area_struct *vma, | ||
3684 | unsigned int pages_per_huge_page) | ||
3685 | { | ||
3686 | int i; | ||
3687 | struct page *dst_base = dst; | ||
3688 | struct page *src_base = src; | ||
3689 | |||
3690 | for (i = 0; i < pages_per_huge_page; ) { | ||
3691 | cond_resched(); | ||
3692 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); | ||
3693 | |||
3694 | i++; | ||
3695 | dst = mem_map_next(dst, dst_base, i); | ||
3696 | src = mem_map_next(src, src_base, i); | ||
3697 | } | ||
3698 | } | ||
3699 | |||
3700 | void copy_user_huge_page(struct page *dst, struct page *src, | ||
3701 | unsigned long addr, struct vm_area_struct *vma, | ||
3702 | unsigned int pages_per_huge_page) | ||
3703 | { | ||
3704 | int i; | ||
3705 | |||
3706 | if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { | ||
3707 | copy_user_gigantic_page(dst, src, addr, vma, | ||
3708 | pages_per_huge_page); | ||
3709 | return; | ||
3710 | } | ||
3711 | |||
3712 | might_sleep(); | ||
3713 | for (i = 0; i < pages_per_huge_page; i++) { | ||
3714 | cond_resched(); | ||
3715 | copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); | ||
3716 | } | ||
3717 | } | ||
3718 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | ||