diff options
author | Andi Kleen <ak@suse.de> | 2008-07-24 00:27:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:17 -0400 |
commit | a137e1cc6d6e7d315fef03962a2a5a113348b13b (patch) | |
tree | b47e195c392abaa3640cc2f9187d99d58cee664a /mm | |
parent | e5ff215941d59f8ae6bf58f6428dc5c26745a612 (diff) |
hugetlbfs: per mount huge page sizes
Add the ability to configure the hugetlb hstate used on a per mount basis.
- Add a new pagesize= option to the hugetlbfs mount that allows setting
the page size
- This option causes the mount code to find the hstate corresponding to the
specified size, and sets up a pointer to the hstate in the mount's
superblock.
- Change the hstate accessors to use this information rather than the
global_hstate they were using (requires a slight change in mm/memory.c
so we don't NULL deref in the error-unmap path -- see comments).
[np: take hstate out of hugetlbfs inode and vma->vm_private_data]
Acked-by: Adam Litke <agl@us.ibm.com>
Acked-by: Nishanth Aravamudan <nacc@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 16 | ||||
-rw-r--r-- | mm/memory.c | 18 |
2 files changed, 19 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 82378d44a0c5..4cf7a90e9140 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1439,19 +1439,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
1439 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 1439 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, |
1440 | unsigned long end, struct page *ref_page) | 1440 | unsigned long end, struct page *ref_page) |
1441 | { | 1441 | { |
1442 | /* | 1442 | spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); |
1443 | * It is undesirable to test vma->vm_file as it should be non-null | 1443 | __unmap_hugepage_range(vma, start, end, ref_page); |
1444 | * for valid hugetlb area. However, vm_file will be NULL in the error | 1444 | spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); |
1445 | * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, | ||
1446 | * do_mmap_pgoff() nullifies vma->vm_file before calling this function | ||
1447 | * to clean up. Since no pte has actually been setup, it is safe to | ||
1448 | * do nothing in this case. | ||
1449 | */ | ||
1450 | if (vma->vm_file) { | ||
1451 | spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
1452 | __unmap_hugepage_range(vma, start, end, ref_page); | ||
1453 | spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | ||
1454 | } | ||
1455 | } | 1445 | } |
1456 | 1446 | ||
1457 | /* | 1447 | /* |
diff --git a/mm/memory.c b/mm/memory.c index c1c1d6d8c22b..02fc6b1047b0 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -901,9 +901,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, | |||
901 | } | 901 | } |
902 | 902 | ||
903 | if (unlikely(is_vm_hugetlb_page(vma))) { | 903 | if (unlikely(is_vm_hugetlb_page(vma))) { |
904 | unmap_hugepage_range(vma, start, end, NULL); | 904 | /* |
905 | zap_work -= (end - start) / | 905 | * It is undesirable to test vma->vm_file as it |
906 | * should be non-null for valid hugetlb area. | ||
907 | * However, vm_file will be NULL in the error | ||
908 | * cleanup path of do_mmap_pgoff. When | ||
909 | * hugetlbfs ->mmap method fails, | ||
910 | * do_mmap_pgoff() nullifies vma->vm_file | ||
911 | * before calling this function to clean up. | ||
912 | * Since no pte has actually been setup, it is | ||
913 | * safe to do nothing in this case. | ||
914 | */ | ||
915 | if (vma->vm_file) { | ||
916 | unmap_hugepage_range(vma, start, end, NULL); | ||
917 | zap_work -= (end - start) / | ||
906 | pages_per_huge_page(hstate_vma(vma)); | 918 | pages_per_huge_page(hstate_vma(vma)); |
919 | } | ||
920 | |||
907 | start = end; | 921 | start = end; |
908 | } else | 922 | } else |
909 | start = unmap_page_range(*tlbp, vma, | 923 | start = unmap_page_range(*tlbp, vma, |