aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2007-11-14 19:59:42 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-14 21:45:40 -0500
commit90d8b7e6129e8f4e5b3bc1a2cfbe585372ce8646 (patch)
tree7a078249e1a468dc3ec5973c14f8f6a95cb0620f
parent9a119c056dc2a9970901954a6d561d50a95e528d (diff)
hugetlb: enforce quotas during reservation for shared mappings
When a MAP_SHARED mmap of a hugetlbfs file succeeds, huge pages are reserved to guarantee no problems will occur later when instantiating pages. If quotas are in force, page instantiation could fail due to a race with another process or an oversized (but approved) shared mapping. To prevent these scenarios, debit the quota for the full reservation amount up front and credit the unused quota when the reservation is released. Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: Ken Chen <kenchen@google.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: David Gibson <hermes@gibson.dropbear.id.au> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1e317465ecd1..b52b6ddd6c15 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -367,7 +367,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
367 spin_lock(&hugetlb_lock); 367 spin_lock(&hugetlb_lock);
368 page = dequeue_huge_page(vma, addr); 368 page = dequeue_huge_page(vma, addr);
369 spin_unlock(&hugetlb_lock); 369 spin_unlock(&hugetlb_lock);
370 return page; 370 return page ? page : ERR_PTR(-VM_FAULT_OOM);
371} 371}
372 372
373static struct page *alloc_huge_page_private(struct vm_area_struct *vma, 373static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
@@ -375,13 +375,16 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
375{ 375{
376 struct page *page = NULL; 376 struct page *page = NULL;
377 377
378 if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
379 return ERR_PTR(-VM_FAULT_SIGBUS);
380
378 spin_lock(&hugetlb_lock); 381 spin_lock(&hugetlb_lock);
379 if (free_huge_pages > resv_huge_pages) 382 if (free_huge_pages > resv_huge_pages)
380 page = dequeue_huge_page(vma, addr); 383 page = dequeue_huge_page(vma, addr);
381 spin_unlock(&hugetlb_lock); 384 spin_unlock(&hugetlb_lock);
382 if (!page) 385 if (!page)
383 page = alloc_buddy_huge_page(vma, addr); 386 page = alloc_buddy_huge_page(vma, addr);
384 return page; 387 return page ? page : ERR_PTR(-VM_FAULT_OOM);
385} 388}
386 389
387static struct page *alloc_huge_page(struct vm_area_struct *vma, 390static struct page *alloc_huge_page(struct vm_area_struct *vma,
@@ -390,19 +393,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
390 struct page *page; 393 struct page *page;
391 struct address_space *mapping = vma->vm_file->f_mapping; 394 struct address_space *mapping = vma->vm_file->f_mapping;
392 395
393 if (hugetlb_get_quota(mapping, 1))
394 return ERR_PTR(-VM_FAULT_SIGBUS);
395
396 if (vma->vm_flags & VM_MAYSHARE) 396 if (vma->vm_flags & VM_MAYSHARE)
397 page = alloc_huge_page_shared(vma, addr); 397 page = alloc_huge_page_shared(vma, addr);
398 else 398 else
399 page = alloc_huge_page_private(vma, addr); 399 page = alloc_huge_page_private(vma, addr);
400 if (page) { 400
401 if (!IS_ERR(page)) {
401 set_page_refcounted(page); 402 set_page_refcounted(page);
402 set_page_private(page, (unsigned long) mapping); 403 set_page_private(page, (unsigned long) mapping);
403 return page; 404 }
404 } else 405 return page;
405 return ERR_PTR(-VM_FAULT_OOM);
406} 406}
407 407
408static int __init hugetlb_init(void) 408static int __init hugetlb_init(void)
@@ -1148,6 +1148,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1148 if (chg < 0) 1148 if (chg < 0)
1149 return chg; 1149 return chg;
1150 1150
1151 if (hugetlb_get_quota(inode->i_mapping, chg))
1152 return -ENOSPC;
1151 ret = hugetlb_acct_memory(chg); 1153 ret = hugetlb_acct_memory(chg);
1152 if (ret < 0) 1154 if (ret < 0)
1153 return ret; 1155 return ret;
@@ -1158,5 +1160,6 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1158void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1160void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1159{ 1161{
1160 long chg = region_truncate(&inode->i_mapping->private_list, offset); 1162 long chg = region_truncate(&inode->i_mapping->private_list, offset);
1161 hugetlb_acct_memory(freed - chg); 1163 hugetlb_put_quota(inode->i_mapping, (chg - freed));
1164 hugetlb_acct_memory(-(chg - freed));
1162} 1165}