aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-10-08 19:29:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:27 -0400
commit420256ef02660af0acf28c12fe4b7d514ca88a4d (patch)
treea2710b4d7b9983d084c59fb8c4a4df35be98d321 /mm/huge_memory.c
parentd516904bd239fe2c9f1bd46cf146bb4b8831321c (diff)
thp: release page in page pre-alloc path
If NUMA is enabled, we can release the page in the page pre-alloc operation, then the CONFIG_NUMA dependent code can be reduced Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d5b5fcc73c44..9c4390f60c3e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1873,15 +1873,12 @@ static void collapse_huge_page(struct mm_struct *mm,
1873 *hpage = ERR_PTR(-ENOMEM); 1873 *hpage = ERR_PTR(-ENOMEM);
1874 return; 1874 return;
1875 } 1875 }
1876 *hpage = new_page;
1876 count_vm_event(THP_COLLAPSE_ALLOC); 1877 count_vm_event(THP_COLLAPSE_ALLOC);
1877#endif 1878#endif
1878 1879
1879 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1880 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
1880#ifdef CONFIG_NUMA
1881 put_page(new_page);
1882#endif
1883 return; 1881 return;
1884 }
1885 1882
1886 /* 1883 /*
1887 * Prevent all access to pagetables with the exception of 1884 * Prevent all access to pagetables with the exception of
@@ -1982,9 +1979,8 @@ static void collapse_huge_page(struct mm_struct *mm,
1982 prepare_pmd_huge_pte(pgtable, mm); 1979 prepare_pmd_huge_pte(pgtable, mm);
1983 spin_unlock(&mm->page_table_lock); 1980 spin_unlock(&mm->page_table_lock);
1984 1981
1985#ifndef CONFIG_NUMA
1986 *hpage = NULL; 1982 *hpage = NULL;
1987#endif 1983
1988 khugepaged_pages_collapsed++; 1984 khugepaged_pages_collapsed++;
1989out_up_write: 1985out_up_write:
1990 up_write(&mm->mmap_sem); 1986 up_write(&mm->mmap_sem);
@@ -1992,9 +1988,6 @@ out_up_write:
1992 1988
1993out: 1989out:
1994 mem_cgroup_uncharge_page(new_page); 1990 mem_cgroup_uncharge_page(new_page);
1995#ifdef CONFIG_NUMA
1996 put_page(new_page);
1997#endif
1998 goto out_up_write; 1991 goto out_up_write;
1999} 1992}
2000 1993
@@ -2260,8 +2253,6 @@ static void khugepaged_do_scan(void)
2260 barrier(); /* write khugepaged_pages_to_scan to local stack */ 2253 barrier(); /* write khugepaged_pages_to_scan to local stack */
2261 2254
2262 while (progress < pages) { 2255 while (progress < pages) {
2263 cond_resched();
2264
2265#ifndef CONFIG_NUMA 2256#ifndef CONFIG_NUMA
2266 if (!hpage) 2257 if (!hpage)
2267 hpage = khugepaged_alloc_hugepage(&wait); 2258 hpage = khugepaged_alloc_hugepage(&wait);
@@ -2274,8 +2265,12 @@ static void khugepaged_do_scan(void)
2274 break; 2265 break;
2275 wait = false; 2266 wait = false;
2276 khugepaged_alloc_sleep(); 2267 khugepaged_alloc_sleep();
2268 } else if (hpage) {
2269 put_page(hpage);
2270 hpage = NULL;
2277 } 2271 }
2278#endif 2272#endif
2273 cond_resched();
2279 2274
2280 if (unlikely(kthread_should_stop() || freezing(current))) 2275 if (unlikely(kthread_should_stop() || freezing(current)))
2281 break; 2276 break;