aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2007-11-14 19:59:38 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-14 21:45:40 -0500
commitc79fb75e5a514a5a35f22c229042aa29f4237e3a (patch)
tree012e5610e814ed938628af84ffa7c4775db113e2
parent348ea204cc23cda35faf962414b674c57da647d7 (diff)
hugetlb: fix quota management for private mappings
The hugetlbfs quota management system was never taught to handle MAP_PRIVATE mappings when that support was added. Currently, quota is debited at page instantiation and credited at file truncation. This approach works correctly for shared pages but is incomplete for private pages. In addition to hugetlb_no_page(), private pages can be instantiated by hugetlb_cow(); but this function does not respect quotas. Private huge pages are treated very much like normal, anonymous pages. They are not "backed" by the hugetlbfs file and are not stored in the mapping's radix tree. This means that private pages are invisible to truncate_hugepages() so that function will not credit the quota. This patch (based on a prototype provided by Ken Chen) moves quota crediting for all pages into free_huge_page(). page->private is used to store a pointer to the mapping to which this page belongs. This is used to credit quota on the appropriate hugetlbfs instance. Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: Ken Chen <kenchen@google.com> Cc: Ken Chen <kenchen@google.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: David Gibson <hermes@gibson.dropbear.id.au> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--mm/hugetlb.c13
2 files changed, 10 insertions, 4 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 12aca8ed605f..6513f5655861 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -364,7 +364,6 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
364 ++next; 364 ++next;
365 truncate_huge_page(page); 365 truncate_huge_page(page);
366 unlock_page(page); 366 unlock_page(page);
367 hugetlb_put_quota(mapping);
368 freed++; 367 freed++;
369 } 368 }
370 huge_pagevec_release(&pvec); 369 huge_pagevec_release(&pvec);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f43b3dca12b5..3992bd5120e7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -116,7 +116,9 @@ static void update_and_free_page(struct page *page)
116static void free_huge_page(struct page *page) 116static void free_huge_page(struct page *page)
117{ 117{
118 int nid = page_to_nid(page); 118 int nid = page_to_nid(page);
119 struct address_space *mapping;
119 120
121 mapping = (struct address_space *) page_private(page);
120 BUG_ON(page_count(page)); 122 BUG_ON(page_count(page));
121 INIT_LIST_HEAD(&page->lru); 123 INIT_LIST_HEAD(&page->lru);
122 124
@@ -129,6 +131,9 @@ static void free_huge_page(struct page *page)
129 enqueue_huge_page(page); 131 enqueue_huge_page(page);
130 } 132 }
131 spin_unlock(&hugetlb_lock); 133 spin_unlock(&hugetlb_lock);
134 if (mapping)
135 hugetlb_put_quota(mapping);
136 set_page_private(page, 0);
132} 137}
133 138
134/* 139/*
@@ -388,8 +393,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
388 page = alloc_huge_page_shared(vma, addr); 393 page = alloc_huge_page_shared(vma, addr);
389 else 394 else
390 page = alloc_huge_page_private(vma, addr); 395 page = alloc_huge_page_private(vma, addr);
391 if (page) 396 if (page) {
392 set_page_refcounted(page); 397 set_page_refcounted(page);
398 set_page_private(page, (unsigned long) vma->vm_file->f_mapping);
399 }
393 return page; 400 return page;
394} 401}
395 402
@@ -730,6 +737,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
730 set_huge_ptep_writable(vma, address, ptep); 737 set_huge_ptep_writable(vma, address, ptep);
731 return 0; 738 return 0;
732 } 739 }
740 if (hugetlb_get_quota(vma->vm_file->f_mapping))
741 return VM_FAULT_SIGBUS;
733 742
734 page_cache_get(old_page); 743 page_cache_get(old_page);
735 new_page = alloc_huge_page(vma, address); 744 new_page = alloc_huge_page(vma, address);
@@ -796,7 +805,6 @@ retry:
796 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 805 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
797 if (err) { 806 if (err) {
798 put_page(page); 807 put_page(page);
799 hugetlb_put_quota(mapping);
800 if (err == -EEXIST) 808 if (err == -EEXIST)
801 goto retry; 809 goto retry;
802 goto out; 810 goto out;
@@ -830,7 +838,6 @@ out:
830 838
831backout: 839backout:
832 spin_unlock(&mm->page_table_lock); 840 spin_unlock(&mm->page_table_lock);
833 hugetlb_put_quota(mapping);
834 unlock_page(page); 841 unlock_page(page);
835 put_page(page); 842 put_page(page);
836 goto out; 843 goto out;