aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2006-03-22 03:08:51 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:03 -0500
commit79ac6ba40eb8d70f0d204e98ae9b63280ad1018c (patch)
tree522d835dbdc6b6efe6b834f3f1f9a21a8ba161e5 /mm
parent8f860591ffb29738cf5539b6fbf27f50dcdeb380 (diff)
[PATCH] hugepage: Small fixes to hugepage clear/copy path
Move the loops used in mm/hugetlb.c to clear and copy hugepages to their own functions for clarity. As we do so, we add some checks of need_resched - we are, after all copying megabytes of memory here. We also add might_sleep() accordingly. We generally dropped locks around the clear and copy, already but not everyone has PREEMPT enabled, so we should still be checking explicitly. For this to work, we need to remove the clear_huge_page() from alloc_huge_page(), which is called with the page_table_lock held in the COW path. We move the clear_huge_page() to just after the alloc_huge_page() in the hugepage no-page path. In the COW path, the new page is about to be copied over, so clearing it was just a waste of time anyway. So as a side effect we also fix the fact that we held the page_table_lock for far too long in this path by calling alloc_huge_page() under it. It causes no regressions on the libhugetlbfs testsuite (ppc64, POWER5). Signed-off-by: David Gibson <dwg@au1.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c33
1 files changed, 26 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 783098f6cf8e..41b1038f76da 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,29 @@ static struct list_head hugepage_freelists[MAX_NUMNODES];
27static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 27static unsigned int nr_huge_pages_node[MAX_NUMNODES];
28static unsigned int free_huge_pages_node[MAX_NUMNODES]; 28static unsigned int free_huge_pages_node[MAX_NUMNODES];
29 29
30static void clear_huge_page(struct page *page, unsigned long addr)
31{
32 int i;
33
34 might_sleep();
35 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
36 cond_resched();
37 clear_user_highpage(page + i, addr);
38 }
39}
40
41static void copy_huge_page(struct page *dst, struct page *src,
42 unsigned long addr)
43{
44 int i;
45
46 might_sleep();
47 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
48 cond_resched();
49 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
50 }
51}
52
30/* 53/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 54 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */ 55 */
@@ -98,7 +121,6 @@ void free_huge_page(struct page *page)
98struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) 121struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
99{ 122{
100 struct page *page; 123 struct page *page;
101 int i;
102 124
103 spin_lock(&hugetlb_lock); 125 spin_lock(&hugetlb_lock);
104 page = dequeue_huge_page(vma, addr); 126 page = dequeue_huge_page(vma, addr);
@@ -108,8 +130,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
108 } 130 }
109 spin_unlock(&hugetlb_lock); 131 spin_unlock(&hugetlb_lock);
110 set_page_refcounted(page); 132 set_page_refcounted(page);
111 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
112 clear_user_highpage(&page[i], addr);
113 return page; 133 return page;
114} 134}
115 135
@@ -367,7 +387,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
367 unsigned long address, pte_t *ptep, pte_t pte) 387 unsigned long address, pte_t *ptep, pte_t pte)
368{ 388{
369 struct page *old_page, *new_page; 389 struct page *old_page, *new_page;
370 int i, avoidcopy; 390 int avoidcopy;
371 391
372 old_page = pte_page(pte); 392 old_page = pte_page(pte);
373 393
@@ -388,9 +408,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
388 } 408 }
389 409
390 spin_unlock(&mm->page_table_lock); 410 spin_unlock(&mm->page_table_lock);
391 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) 411 copy_huge_page(new_page, old_page, address);
392 copy_user_highpage(new_page + i, old_page + i,
393 address + i*PAGE_SIZE);
394 spin_lock(&mm->page_table_lock); 412 spin_lock(&mm->page_table_lock);
395 413
396 ptep = huge_pte_offset(mm, address & HPAGE_MASK); 414 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
@@ -435,6 +453,7 @@ retry:
435 ret = VM_FAULT_OOM; 453 ret = VM_FAULT_OOM;
436 goto out; 454 goto out;
437 } 455 }
456 clear_huge_page(page, address);
438 457
439 if (vma->vm_flags & VM_SHARED) { 458 if (vma->vm_flags & VM_SHARED) {
440 int err; 459 int err;