aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2007-11-14 19:59:39 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-14 21:45:40 -0500
commit2fc39cec6a9b5b41727d3386b780b69422a15152 (patch)
tree4b01dcef394a109ae3ff83189f7386ff3cdb3f61 /mm
parentc79fb75e5a514a5a35f22c229042aa29f4237e3a (diff)
hugetlb: debit quota in alloc_huge_page
Now that quota is credited by free_huge_page(), calls to hugetlb_get_quota() seem out of place. The alloc/free API is unbalanced because we handle the hugetlb_put_quota() but expect the caller to open-code hugetlb_get_quota(). Move the get inside alloc_huge_page to clean up this disparity. This patch has been kept apart from the previous patch because of the somewhat dodgy ERR_PTR() use herein. Moving the quota logic means that alloc_huge_page() has two failure modes. Quota failure must result in a SIGBUS while a standard allocation failure is OOM. Unfortunately, ERR_PTR() doesn't like the small positive errnos we have in VM_FAULT_* so they must be negated before they are used. Does anyone take issue with the way I am using PTR_ERR. If so, what are your thoughts on how to clean this up (without needing an if,else if,else block at each alloc_huge_page() callsite)? Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: Ken Chen <kenchen@google.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: David Gibson <hermes@gibson.dropbear.id.au> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3992bd5120e7..bc12b0adfa87 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -388,6 +388,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
388 unsigned long addr) 388 unsigned long addr)
389{ 389{
390 struct page *page; 390 struct page *page;
391 struct address_space *mapping = vma->vm_file->f_mapping;
392
393 if (hugetlb_get_quota(mapping))
394 return ERR_PTR(-VM_FAULT_SIGBUS);
391 395
392 if (vma->vm_flags & VM_MAYSHARE) 396 if (vma->vm_flags & VM_MAYSHARE)
393 page = alloc_huge_page_shared(vma, addr); 397 page = alloc_huge_page_shared(vma, addr);
@@ -395,9 +399,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
395 page = alloc_huge_page_private(vma, addr); 399 page = alloc_huge_page_private(vma, addr);
396 if (page) { 400 if (page) {
397 set_page_refcounted(page); 401 set_page_refcounted(page);
398 set_page_private(page, (unsigned long) vma->vm_file->f_mapping); 402 set_page_private(page, (unsigned long) mapping);
399 } 403 return page;
400 return page; 404 } else
405 return ERR_PTR(-VM_FAULT_OOM);
401} 406}
402 407
403static int __init hugetlb_init(void) 408static int __init hugetlb_init(void)
@@ -737,15 +742,13 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
737 set_huge_ptep_writable(vma, address, ptep); 742 set_huge_ptep_writable(vma, address, ptep);
738 return 0; 743 return 0;
739 } 744 }
740 if (hugetlb_get_quota(vma->vm_file->f_mapping))
741 return VM_FAULT_SIGBUS;
742 745
743 page_cache_get(old_page); 746 page_cache_get(old_page);
744 new_page = alloc_huge_page(vma, address); 747 new_page = alloc_huge_page(vma, address);
745 748
746 if (!new_page) { 749 if (IS_ERR(new_page)) {
747 page_cache_release(old_page); 750 page_cache_release(old_page);
748 return VM_FAULT_OOM; 751 return -PTR_ERR(new_page);
749 } 752 }
750 753
751 spin_unlock(&mm->page_table_lock); 754 spin_unlock(&mm->page_table_lock);
@@ -789,12 +792,9 @@ retry:
789 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 792 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
790 if (idx >= size) 793 if (idx >= size)
791 goto out; 794 goto out;
792 if (hugetlb_get_quota(mapping))
793 goto out;
794 page = alloc_huge_page(vma, address); 795 page = alloc_huge_page(vma, address);
795 if (!page) { 796 if (IS_ERR(page)) {
796 hugetlb_put_quota(mapping); 797 ret = -PTR_ERR(page);
797 ret = VM_FAULT_OOM;
798 goto out; 798 goto out;
799 } 799 }
800 clear_huge_page(page, address); 800 clear_huge_page(page, address);