aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6912bbf33faa..f127940ec24f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -78,16 +78,13 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
78 for (z = zonelist->zones; *z; z++) { 78 for (z = zonelist->zones; *z; z++) {
79 nid = zone_to_nid(*z); 79 nid = zone_to_nid(*z);
80 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && 80 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
81 !list_empty(&hugepage_freelists[nid])) 81 !list_empty(&hugepage_freelists[nid])) {
82 break; 82 page = list_entry(hugepage_freelists[nid].next,
83 } 83 struct page, lru);
84 84 list_del(&page->lru);
85 if (*z) { 85 free_huge_pages--;
86 page = list_entry(hugepage_freelists[nid].next, 86 free_huge_pages_node[nid]--;
87 struct page, lru); 87 }
88 list_del(&page->lru);
89 free_huge_pages--;
90 free_huge_pages_node[nid]--;
91 } 88 }
92 return page; 89 return page;
93} 90}
@@ -107,15 +104,19 @@ static int alloc_fresh_huge_page(void)
107{ 104{
108 static int prev_nid; 105 static int prev_nid;
109 struct page *page; 106 struct page *page;
110 static DEFINE_SPINLOCK(nid_lock);
111 int nid; 107 int nid;
112 108
113 spin_lock(&nid_lock); 109 /*
110 * Copy static prev_nid to local nid, work on that, then copy it
111 * back to prev_nid afterwards: otherwise there's a window in which
112 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
113 * But we don't need to use a spin_lock here: it really doesn't
114 * matter if occasionally a racer chooses the same nid as we do.
115 */
114 nid = next_node(prev_nid, node_online_map); 116 nid = next_node(prev_nid, node_online_map);
115 if (nid == MAX_NUMNODES) 117 if (nid == MAX_NUMNODES)
116 nid = first_node(node_online_map); 118 nid = first_node(node_online_map);
117 prev_nid = nid; 119 prev_nid = nid;
118 spin_unlock(&nid_lock);
119 120
120 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, 121 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
121 HUGETLB_PAGE_ORDER); 122 HUGETLB_PAGE_ORDER);
@@ -207,7 +208,7 @@ static void update_and_free_page(struct page *page)
207 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 208 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
208 1 << PG_private | 1<< PG_writeback); 209 1 << PG_private | 1<< PG_writeback);
209 } 210 }
210 page[1].lru.next = NULL; 211 set_compound_page_dtor(page, NULL);
211 set_page_refcounted(page); 212 set_page_refcounted(page);
212 __free_pages(page, HUGETLB_PAGE_ORDER); 213 __free_pages(page, HUGETLB_PAGE_ORDER);
213} 214}
@@ -316,15 +317,14 @@ unsigned long hugetlb_total_pages(void)
316 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 317 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
317 * this far. 318 * this far.
318 */ 319 */
319static struct page *hugetlb_nopage(struct vm_area_struct *vma, 320static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
320 unsigned long address, int *unused)
321{ 321{
322 BUG(); 322 BUG();
323 return NULL; 323 return 0;
324} 324}
325 325
326struct vm_operations_struct hugetlb_vm_ops = { 326struct vm_operations_struct hugetlb_vm_ops = {
327 .nopage = hugetlb_nopage, 327 .fault = hugetlb_vm_op_fault,
328}; 328};
329 329
330static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 330static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -470,7 +470,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
470 avoidcopy = (page_count(old_page) == 1); 470 avoidcopy = (page_count(old_page) == 1);
471 if (avoidcopy) { 471 if (avoidcopy) {
472 set_huge_ptep_writable(vma, address, ptep); 472 set_huge_ptep_writable(vma, address, ptep);
473 return VM_FAULT_MINOR; 473 return 0;
474 } 474 }
475 475
476 page_cache_get(old_page); 476 page_cache_get(old_page);
@@ -495,7 +495,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
495 } 495 }
496 page_cache_release(new_page); 496 page_cache_release(new_page);
497 page_cache_release(old_page); 497 page_cache_release(old_page);
498 return VM_FAULT_MINOR; 498 return 0;
499} 499}
500 500
501static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 501static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -552,7 +552,7 @@ retry:
552 if (idx >= size) 552 if (idx >= size)
553 goto backout; 553 goto backout;
554 554
555 ret = VM_FAULT_MINOR; 555 ret = 0;
556 if (!pte_none(*ptep)) 556 if (!pte_none(*ptep))
557 goto backout; 557 goto backout;
558 558
@@ -603,7 +603,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
603 return ret; 603 return ret;
604 } 604 }
605 605
606 ret = VM_FAULT_MINOR; 606 ret = 0;
607 607
608 spin_lock(&mm->page_table_lock); 608 spin_lock(&mm->page_table_lock);
609 /* Check for a racing update before calling hugetlb_cow */ 609 /* Check for a racing update before calling hugetlb_cow */
@@ -642,7 +642,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
642 spin_unlock(&mm->page_table_lock); 642 spin_unlock(&mm->page_table_lock);
643 ret = hugetlb_fault(mm, vma, vaddr, 0); 643 ret = hugetlb_fault(mm, vma, vaddr, 0);
644 spin_lock(&mm->page_table_lock); 644 spin_lock(&mm->page_table_lock);
645 if (ret == VM_FAULT_MINOR) 645 if (!(ret & VM_FAULT_MAJOR))
646 continue; 646 continue;
647 647
648 remainder = 0; 648 remainder = 0;