aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-05-06 17:50:02 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:55 -0400
commitd506a7725114aaddbf982fd18621b3e0e5c27f1b (patch)
treeebebcac1a8d4252a738e0f0edb3e1213f7082dd6
parent2b45ab3398a0ba119b1f672c7c56fd5a431b7f0a (diff)
get_unmapped_area handles MAP_FIXED on powerpc
The current get_unmapped_area code calls the f_ops->get_unmapped_area or the arch one (via the mm) only when MAP_FIXED is not passed. That makes it impossible for archs to impose proper constraints on regions of the virtual address space. To work around that, get_unmapped_area() then calls some hugetlbfs specific hacks. This cause several problems, among others: - It makes it impossible for a driver or filesystem to do the same thing that hugetlbfs does (for example, to allow a driver to use larger page sizes to map external hardware) if that requires applying a constraint on the addresses (constraining that mapping in certain regions and other mappings out of those regions). - Some archs like arm, mips, sparc, sparc64, sh and sh64 already want MAP_FIXED to be passed down in order to deal with aliasing issues. The code is there to handle it... but is never called. This series of patches moves the logic to handle MAP_FIXED down to the various arch/driver get_unmapped_area() implementations, and then changes the generic code to always call them. The hugetlbfs hacks then disappear from the generic code. Since I need to do some special 64K pages mappings for SPEs on cell, I need to work around the first problem at least. I have further patches thus implementing a "slices" layer that handles multiple page sizes through slices of the address space for use by hugetlbfs, the SPE code, and possibly others, but it requires that serie of patches first/ There is still a potential (but not practical) issue due to the fact that filesystems/drivers implemeting g_u_a will effectively bypass all arch checks. This is not an issue in practice as the only filesystems/drivers using that hook are doing so for arch specific purposes in the first place. There is also a problem with mremap that will completely bypass all arch checks. I'll try to address that separately, I'm not 100% certain yet how, possibly by making it not work when the vma has a file whose f_ops has a get_unmapped_area callback, and by making it use is_hugepage_only_range() before expanding into a new area. Also, I want to turn is_hugepage_only_range() into a more generic is_normal_page_range() as that's really what it will end up meaning when used in stack grow, brk grow and mremap. None of the above "issues" however are introduced by this patch, they are already there, so I think the patch can go ini for 2.6.22. This patch: Handle MAP_FIXED in powerpc's arch_get_unmapped_area() in all 3 implementations of it. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: William Irwin <bill.irwin@oracle.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: David Howells <dhowells@redhat.com> Cc: Andi Kleen <ak@suse.de> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Adam Litke <agl@us.ibm.com> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/mm/hugetlbpage.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index d0ec887f05a2..1f07f70ac89f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -566,6 +566,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
566 if (len > TASK_SIZE) 566 if (len > TASK_SIZE)
567 return -ENOMEM; 567 return -ENOMEM;
568 568
569 /* handle fixed mapping: prevent overlap with huge pages */
570 if (flags & MAP_FIXED) {
571 if (is_hugepage_only_range(mm, addr, len))
572 return -EINVAL;
573 return addr;
574 }
575
569 if (addr) { 576 if (addr) {
570 addr = PAGE_ALIGN(addr); 577 addr = PAGE_ALIGN(addr);
571 vma = find_vma(mm, addr); 578 vma = find_vma(mm, addr);
@@ -641,6 +648,13 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
641 if (len > TASK_SIZE) 648 if (len > TASK_SIZE)
642 return -ENOMEM; 649 return -ENOMEM;
643 650
651 /* handle fixed mapping: prevent overlap with huge pages */
652 if (flags & MAP_FIXED) {
653 if (is_hugepage_only_range(mm, addr, len))
654 return -EINVAL;
655 return addr;
656 }
657
644 /* dont allow allocations above current base */ 658 /* dont allow allocations above current base */
645 if (mm->free_area_cache > base) 659 if (mm->free_area_cache > base)
646 mm->free_area_cache = base; 660 mm->free_area_cache = base;
@@ -823,6 +837,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
823 /* Paranoia, caller should have dealt with this */ 837 /* Paranoia, caller should have dealt with this */
824 BUG_ON((addr + len) < addr); 838 BUG_ON((addr + len) < addr);
825 839
840 /* Handle MAP_FIXED */
841 if (flags & MAP_FIXED) {
842 if (prepare_hugepage_range(addr, len, pgoff))
843 return -EINVAL;
844 return addr;
845 }
846
826 if (test_thread_flag(TIF_32BIT)) { 847 if (test_thread_flag(TIF_32BIT)) {
827 curareas = current->mm->context.low_htlb_areas; 848 curareas = current->mm->context.low_htlb_areas;
828 849