aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2007-02-08 17:20:41 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 12:25:47 -0500
commitfa5dc22f8586cc3742413dd05f5cd9e039dfab9e (patch)
tree39a97d91e25794f64e3cc03f1d4a8fa2c8ad78d2
parenta25700a53f715fde30443e737e52310c6d4a311a (diff)
[PATCH] Add install_special_mapping
This patch adds a utility function install_special_mapping, for creating a special vma using a fixed set of preallocated pages as backing, such as for a vDSO. This consolidates some nearly identical code used for vDSO mapping reimplemented for different architectures. Signed-off-by: Roland McGrath <roland@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/mmap.c72
2 files changed, 75 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2d2c08d5f473..bb793a4c8e9e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1030,6 +1030,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1030 unsigned long addr, unsigned long len, pgoff_t pgoff); 1030 unsigned long addr, unsigned long len, pgoff_t pgoff);
1031extern void exit_mmap(struct mm_struct *); 1031extern void exit_mmap(struct mm_struct *);
1032extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1032extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1033extern int install_special_mapping(struct mm_struct *mm,
1034 unsigned long addr, unsigned long len,
1035 unsigned long flags, struct page **pages);
1033 1036
1034extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1037extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1035 1038
diff --git a/mm/mmap.c b/mm/mmap.c
index cc3a20819457..eb509ae76553 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2101,3 +2101,75 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2101 return 0; 2101 return 0;
2102 return 1; 2102 return 1;
2103} 2103}
2104
2105
2106static struct page *special_mapping_nopage(struct vm_area_struct *vma,
2107 unsigned long address, int *type)
2108{
2109 struct page **pages;
2110
2111 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2112
2113 address -= vma->vm_start;
2114 for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
2115 address -= PAGE_SIZE;
2116
2117 if (*pages) {
2118 struct page *page = *pages;
2119 get_page(page);
2120 return page;
2121 }
2122
2123 return NOPAGE_SIGBUS;
2124}
2125
2126/*
2127 * Having a close hook prevents vma merging regardless of flags.
2128 */
2129static void special_mapping_close(struct vm_area_struct *vma)
2130{
2131}
2132
2133static struct vm_operations_struct special_mapping_vmops = {
2134 .close = special_mapping_close,
2135 .nopage = special_mapping_nopage,
2136};
2137
2138/*
2139 * Called with mm->mmap_sem held for writing.
2140 * Insert a new vma covering the given region, with the given flags.
2141 * Its pages are supplied by the given array of struct page *.
2142 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2143 * The region past the last page supplied will always produce SIGBUS.
2144 * The array pointer and the pages it points to are assumed to stay alive
2145 * for as long as this mapping might exist.
2146 */
2147int install_special_mapping(struct mm_struct *mm,
2148 unsigned long addr, unsigned long len,
2149 unsigned long vm_flags, struct page **pages)
2150{
2151 struct vm_area_struct *vma;
2152
2153 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2154 if (unlikely(vma == NULL))
2155 return -ENOMEM;
2156
2157 vma->vm_mm = mm;
2158 vma->vm_start = addr;
2159 vma->vm_end = addr + len;
2160
2161 vma->vm_flags = vm_flags | mm->def_flags;
2162 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
2163
2164 vma->vm_ops = &special_mapping_vmops;
2165 vma->vm_private_data = pages;
2166
2167 if (unlikely(insert_vm_struct(mm, vma))) {
2168 kmem_cache_free(vm_area_cachep, vma);
2169 return -ENOMEM;
2170 }
2171
2172 mm->total_vm += len >> PAGE_SHIFT;
2173
2174 return 0;
2175}