aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 19:41:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:40 -0400
commit76dcee75c1aff61259f5ed55e2bcfab60cc4bd5f (patch)
tree987ca7e7add2453370dc07e1e797d8a3b8e2d68b /mm/hugetlb.c
parent47d38344abd0c7c6793b59ac741aa5b205fc197c (diff)
hugetlb: don't use ERR_PTR with VM_FAULT* values
The current use of VM_FAULT_* codes with ERR_PTR requires us to ensure VM_FAULT_* values will not exceed MAX_ERRNO value. Decouple the VM_FAULT_* values from MAX_ERRNO. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Hillf Danton <dhillf@gmail.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c86830931cc6..34a7e2375478 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1123,10 +1123,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1123 */ 1123 */
1124 chg = vma_needs_reservation(h, vma, addr); 1124 chg = vma_needs_reservation(h, vma, addr);
1125 if (chg < 0) 1125 if (chg < 0)
1126 return ERR_PTR(-VM_FAULT_OOM); 1126 return ERR_PTR(-ENOMEM);
1127 if (chg) 1127 if (chg)
1128 if (hugepage_subpool_get_pages(spool, chg)) 1128 if (hugepage_subpool_get_pages(spool, chg))
1129 return ERR_PTR(-VM_FAULT_SIGBUS); 1129 return ERR_PTR(-ENOSPC);
1130 1130
1131 spin_lock(&hugetlb_lock); 1131 spin_lock(&hugetlb_lock);
1132 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1132 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1136,7 +1136,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1136 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1136 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1137 if (!page) { 1137 if (!page) {
1138 hugepage_subpool_put_pages(spool, chg); 1138 hugepage_subpool_put_pages(spool, chg);
1139 return ERR_PTR(-VM_FAULT_SIGBUS); 1139 return ERR_PTR(-ENOSPC);
1140 } 1140 }
1141 } 1141 }
1142 1142
@@ -2496,6 +2496,7 @@ retry_avoidcopy:
2496 new_page = alloc_huge_page(vma, address, outside_reserve); 2496 new_page = alloc_huge_page(vma, address, outside_reserve);
2497 2497
2498 if (IS_ERR(new_page)) { 2498 if (IS_ERR(new_page)) {
2499 long err = PTR_ERR(new_page);
2499 page_cache_release(old_page); 2500 page_cache_release(old_page);
2500 2501
2501 /* 2502 /*
@@ -2524,7 +2525,10 @@ retry_avoidcopy:
2524 2525
2525 /* Caller expects lock to be held */ 2526 /* Caller expects lock to be held */
2526 spin_lock(&mm->page_table_lock); 2527 spin_lock(&mm->page_table_lock);
2527 return -PTR_ERR(new_page); 2528 if (err == -ENOMEM)
2529 return VM_FAULT_OOM;
2530 else
2531 return VM_FAULT_SIGBUS;
2528 } 2532 }
2529 2533
2530 /* 2534 /*
@@ -2642,7 +2646,11 @@ retry:
2642 goto out; 2646 goto out;
2643 page = alloc_huge_page(vma, address, 0); 2647 page = alloc_huge_page(vma, address, 0);
2644 if (IS_ERR(page)) { 2648 if (IS_ERR(page)) {
2645 ret = -PTR_ERR(page); 2649 ret = PTR_ERR(page);
2650 if (ret == -ENOMEM)
2651 ret = VM_FAULT_OOM;
2652 else
2653 ret = VM_FAULT_SIGBUS;
2646 goto out; 2654 goto out;
2647 } 2655 }
2648 clear_huge_page(page, address, pages_per_huge_page(h)); 2656 clear_huge_page(page, address, pages_per_huge_page(h));