diff options
-rw-r--r-- | mm/hugetlb.c | 33 |
1 files changed, 26 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 783098f6cf8e..41b1038f76da 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -27,6 +27,29 @@ static struct list_head hugepage_freelists[MAX_NUMNODES]; | |||
27 | static unsigned int nr_huge_pages_node[MAX_NUMNODES]; | 27 | static unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
28 | static unsigned int free_huge_pages_node[MAX_NUMNODES]; | 28 | static unsigned int free_huge_pages_node[MAX_NUMNODES]; |
29 | 29 | ||
30 | static void clear_huge_page(struct page *page, unsigned long addr) | ||
31 | { | ||
32 | int i; | ||
33 | |||
34 | might_sleep(); | ||
35 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { | ||
36 | cond_resched(); | ||
37 | clear_user_highpage(page + i, addr); | ||
38 | } | ||
39 | } | ||
40 | |||
41 | static void copy_huge_page(struct page *dst, struct page *src, | ||
42 | unsigned long addr) | ||
43 | { | ||
44 | int i; | ||
45 | |||
46 | might_sleep(); | ||
47 | for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { | ||
48 | cond_resched(); | ||
49 | copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE); | ||
50 | } | ||
51 | } | ||
52 | |||
30 | /* | 53 | /* |
31 | * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages | 54 | * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages |
32 | */ | 55 | */ |
@@ -98,7 +121,6 @@ void free_huge_page(struct page *page) | |||
98 | struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | 121 | struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) |
99 | { | 122 | { |
100 | struct page *page; | 123 | struct page *page; |
101 | int i; | ||
102 | 124 | ||
103 | spin_lock(&hugetlb_lock); | 125 | spin_lock(&hugetlb_lock); |
104 | page = dequeue_huge_page(vma, addr); | 126 | page = dequeue_huge_page(vma, addr); |
@@ -108,8 +130,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | |||
108 | } | 130 | } |
109 | spin_unlock(&hugetlb_lock); | 131 | spin_unlock(&hugetlb_lock); |
110 | set_page_refcounted(page); | 132 | set_page_refcounted(page); |
111 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) | ||
112 | clear_user_highpage(&page[i], addr); | ||
113 | return page; | 133 | return page; |
114 | } | 134 | } |
115 | 135 | ||
@@ -367,7 +387,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
367 | unsigned long address, pte_t *ptep, pte_t pte) | 387 | unsigned long address, pte_t *ptep, pte_t pte) |
368 | { | 388 | { |
369 | struct page *old_page, *new_page; | 389 | struct page *old_page, *new_page; |
370 | int i, avoidcopy; | 390 | int avoidcopy; |
371 | 391 | ||
372 | old_page = pte_page(pte); | 392 | old_page = pte_page(pte); |
373 | 393 | ||
@@ -388,9 +408,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | |||
388 | } | 408 | } |
389 | 409 | ||
390 | spin_unlock(&mm->page_table_lock); | 410 | spin_unlock(&mm->page_table_lock); |
391 | for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) | 411 | copy_huge_page(new_page, old_page, address); |
392 | copy_user_highpage(new_page + i, old_page + i, | ||
393 | address + i*PAGE_SIZE); | ||
394 | spin_lock(&mm->page_table_lock); | 412 | spin_lock(&mm->page_table_lock); |
395 | 413 | ||
396 | ptep = huge_pte_offset(mm, address & HPAGE_MASK); | 414 | ptep = huge_pte_offset(mm, address & HPAGE_MASK); |
@@ -435,6 +453,7 @@ retry: | |||
435 | ret = VM_FAULT_OOM; | 453 | ret = VM_FAULT_OOM; |
436 | goto out; | 454 | goto out; |
437 | } | 455 | } |
456 | clear_huge_page(page, address); | ||
438 | 457 | ||
439 | if (vma->vm_flags & VM_SHARED) { | 458 | if (vma->vm_flags & VM_SHARED) { |
440 | int err; | 459 | int err; |