aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorJianyu Zhan <nasa4836@gmail.com>2014-06-04 19:10:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:10 -0400
commit8f34af6f93aee88291cec53ae8dff4989e58fbbd (patch)
tree685f2fa37565ede78b6ee2629d3f330eb7e39490 /mm/hugetlb.c
parent6edd6cc66201e06a6cc34030462217e7f4d72f4f (diff)
mm, hugetlb: move the error handle logic out of normal code path
alloc_huge_page() now mixes normal code path with error handle logic. This patches move out the error handle logic, to make normal code path more clean and redue code duplicate. Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Acked-by: Davidlohr Bueso <davidlohr@hp.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 98f0bc105dfe..244194217e39 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1386,24 +1386,17 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1386 return ERR_PTR(-ENOSPC); 1386 return ERR_PTR(-ENOSPC);
1387 1387
1388 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 1388 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1389 if (ret) { 1389 if (ret)
1390 if (chg || avoid_reserve) 1390 goto out_subpool_put;
1391 hugepage_subpool_put_pages(spool, 1); 1391
1392 return ERR_PTR(-ENOSPC);
1393 }
1394 spin_lock(&hugetlb_lock); 1392 spin_lock(&hugetlb_lock);
1395 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); 1393 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1396 if (!page) { 1394 if (!page) {
1397 spin_unlock(&hugetlb_lock); 1395 spin_unlock(&hugetlb_lock);
1398 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1396 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1399 if (!page) { 1397 if (!page)
1400 hugetlb_cgroup_uncharge_cgroup(idx, 1398 goto out_uncharge_cgroup;
1401 pages_per_huge_page(h), 1399
1402 h_cg);
1403 if (chg || avoid_reserve)
1404 hugepage_subpool_put_pages(spool, 1);
1405 return ERR_PTR(-ENOSPC);
1406 }
1407 spin_lock(&hugetlb_lock); 1400 spin_lock(&hugetlb_lock);
1408 list_move(&page->lru, &h->hugepage_activelist); 1401 list_move(&page->lru, &h->hugepage_activelist);
1409 /* Fall through */ 1402 /* Fall through */
@@ -1415,6 +1408,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1415 1408
1416 vma_commit_reservation(h, vma, addr); 1409 vma_commit_reservation(h, vma, addr);
1417 return page; 1410 return page;
1411
1412out_uncharge_cgroup:
1413 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1414out_subpool_put:
1415 if (chg || avoid_reserve)
1416 hugepage_subpool_put_pages(spool, 1);
1417 return ERR_PTR(-ENOSPC);
1418} 1418}
1419 1419
1420/* 1420/*