diff options
author | Andi Kleen <ak@suse.de> | 2008-07-24 00:27:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:17 -0400 |
commit | a137e1cc6d6e7d315fef03962a2a5a113348b13b (patch) | |
tree | b47e195c392abaa3640cc2f9187d99d58cee664a /mm/hugetlb.c | |
parent | e5ff215941d59f8ae6bf58f6428dc5c26745a612 (diff) |
hugetlbfs: per mount huge page sizes
Add the ability to configure the hugetlb hstate used on a per mount basis.
- Add a new pagesize= option to the hugetlbfs mount that allows setting
the page size
- This option causes the mount code to find the hstate corresponding to the
specified size, and sets up a pointer to the hstate in the mount's
superblock.
- Change the hstate accessors to use this information rather than the
global_hstate they were using (requires a slight change in mm/memory.c
so we don't NULL deref in the error-unmap path -- see comments).
[np: take hstate out of hugetlbfs inode and vma->vm_private_data]
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: Nishanth Aravamudan <nacc@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 16 |
1 files changed, 3 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 82378d44a0c5..4cf7a90e9140 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1439,19 +1439,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
1439 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 1439 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, |
1440 | unsigned long end, struct page *ref_page) | 1440 | unsigned long end, struct page *ref_page) |
1441 | { | 1441 | { |
1442 | /* | 1442 | spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); |
1443 | * It is undesirable to test vma->vm_file as it should be non-null | 1443 | __unmap_hugepage_range(vma, start, end, ref_page); |
1444 | * for valid hugetlb area. However, vm_file will be NULL in the error | 1444 | spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); |
1445 | * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, | ||
1446 | * do_mmap_pgoff() nullifies vma->vm_file before calling this function | ||
1447 | * to clean up. Since no pte has actually been setup, it is safe to | ||
1448 | * do nothing in this case. | ||
1449 | */ | ||
1450 | if (vma->vm_file) { | ||
1451 | spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
1452 | __unmap_hugepage_range(vma, start, end, ref_page); | ||
1453 | spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
1454 | } | ||
1455 | } | 1445 | } |
1456 | 1446 | ||
1457 | /* | 1447 | /* |