aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eb7180db3033..acc0fb3cf067 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -66,7 +66,7 @@ static void enqueue_huge_page(struct page *page)
66static struct page *dequeue_huge_page(struct vm_area_struct *vma, 66static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67 unsigned long address) 67 unsigned long address)
68{ 68{
69 int nid = numa_node_id(); 69 int nid;
70 struct page *page = NULL; 70 struct page *page = NULL;
71 struct zonelist *zonelist = huge_zonelist(vma, address); 71 struct zonelist *zonelist = huge_zonelist(vma, address);
72 struct zone **z; 72 struct zone **z;
@@ -101,13 +101,20 @@ static void free_huge_page(struct page *page)
101 101
102static int alloc_fresh_huge_page(void) 102static int alloc_fresh_huge_page(void)
103{ 103{
104 static int nid = 0; 104 static int prev_nid;
105 struct page *page; 105 struct page *page;
106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 106 static DEFINE_SPINLOCK(nid_lock);
107 HUGETLB_PAGE_ORDER); 107 int nid;
108 nid = next_node(nid, node_online_map); 108
109 spin_lock(&nid_lock);
110 nid = next_node(prev_nid, node_online_map);
109 if (nid == MAX_NUMNODES) 111 if (nid == MAX_NUMNODES)
110 nid = first_node(node_online_map); 112 nid = first_node(node_online_map);
113 prev_nid = nid;
114 spin_unlock(&nid_lock);
115
116 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
117 HUGETLB_PAGE_ORDER);
111 if (page) { 118 if (page) {
112 set_compound_page_dtor(page, free_huge_page); 119 set_compound_page_dtor(page, free_huge_page);
113 spin_lock(&hugetlb_lock); 120 spin_lock(&hugetlb_lock);
@@ -326,9 +333,10 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
326 pte_t entry; 333 pte_t entry;
327 334
328 entry = pte_mkwrite(pte_mkdirty(*ptep)); 335 entry = pte_mkwrite(pte_mkdirty(*ptep));
329 ptep_set_access_flags(vma, address, ptep, entry, 1); 336 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
330 update_mmu_cache(vma, address, entry); 337 update_mmu_cache(vma, address, entry);
331 lazy_mmu_prot_update(entry); 338 lazy_mmu_prot_update(entry);
339 }
332} 340}
333 341
334 342