aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-07-21 07:37:25 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-21 20:49:14 -0400
commitb50731732f926d6c49fd0724616a7344c31cd5cf (patch)
tree2bc72fc6bf200b3696666ff1ef21f7e60a379d35 /mm
parentdf336d1c7b6fd510fa6d3a028f999e7586c7026e (diff)
nommu: vmalloc_32_user()/vm_insert_page() and symbol exports.
Trying to survive an allmodconfig on a nommu platform results in many screen lengths of module unhappiness. Many of the mmap related things that binfmt_flat hooks in to are never exported despite being global, and there are also missing definitions for vmalloc_32_user() and vm_insert_page(). I've implemented vmalloc_32_user() trying to stick as close to the mm/vmalloc.c implementation as possible, though we don't have any need for VM_USERMAP, so groveling for the VMA can be skipped. vm_insert_page() has been stubbed for now in order to keep the build happy. Signed-off-by: Paul Mundt <lethal@linux-sh.org> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/nommu.c45
1 files changed, 33 insertions, 12 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 1b105d28949f..9eef6a398555 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -54,12 +54,6 @@ DECLARE_RWSEM(nommu_vma_sem);
54struct vm_operations_struct generic_file_vm_ops = { 54struct vm_operations_struct generic_file_vm_ops = {
55}; 55};
56 56
57EXPORT_SYMBOL(vfree);
58EXPORT_SYMBOL(vmalloc_to_page);
59EXPORT_SYMBOL(vmalloc_32);
60EXPORT_SYMBOL(vmap);
61EXPORT_SYMBOL(vunmap);
62
63/* 57/*
64 * Handle all mappings that got truncated by a "truncate()" 58 * Handle all mappings that got truncated by a "truncate()"
65 * system call. 59 * system call.
@@ -168,7 +162,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
168finish_or_fault: 162finish_or_fault:
169 return i ? : -EFAULT; 163 return i ? : -EFAULT;
170} 164}
171
172EXPORT_SYMBOL(get_user_pages); 165EXPORT_SYMBOL(get_user_pages);
173 166
174DEFINE_RWLOCK(vmlist_lock); 167DEFINE_RWLOCK(vmlist_lock);
@@ -178,6 +171,7 @@ void vfree(void *addr)
178{ 171{
179 kfree(addr); 172 kfree(addr);
180} 173}
174EXPORT_SYMBOL(vfree);
181 175
182void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 176void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
183{ 177{
@@ -186,17 +180,19 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
186 */ 180 */
187 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 181 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
188} 182}
183EXPORT_SYMBOL(__vmalloc);
189 184
190struct page * vmalloc_to_page(void *addr) 185struct page * vmalloc_to_page(void *addr)
191{ 186{
192 return virt_to_page(addr); 187 return virt_to_page(addr);
193} 188}
189EXPORT_SYMBOL(vmalloc_to_page);
194 190
195unsigned long vmalloc_to_pfn(void *addr) 191unsigned long vmalloc_to_pfn(void *addr)
196{ 192{
197 return page_to_pfn(virt_to_page(addr)); 193 return page_to_pfn(virt_to_page(addr));
198} 194}
199 195EXPORT_SYMBOL(vmalloc_to_pfn);
200 196
201long vread(char *buf, char *addr, unsigned long count) 197long vread(char *buf, char *addr, unsigned long count)
202{ 198{
@@ -237,9 +233,8 @@ void *vmalloc_node(unsigned long size, int node)
237} 233}
238EXPORT_SYMBOL(vmalloc_node); 234EXPORT_SYMBOL(vmalloc_node);
239 235
240/* 236/**
241 * vmalloc_32 - allocate virtually continguos memory (32bit addressable) 237 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
242 *
243 * @size: allocation size 238 * @size: allocation size
244 * 239 *
245 * Allocate enough 32bit PA addressable pages to cover @size from the 240 * Allocate enough 32bit PA addressable pages to cover @size from the
@@ -249,17 +244,33 @@ void *vmalloc_32(unsigned long size)
249{ 244{
250 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 245 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
251} 246}
247EXPORT_SYMBOL(vmalloc_32);
248
249/**
250 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
251 * @size: allocation size
252 *
253 * The resulting memory area is 32bit addressable and zeroed so it can be
254 * mapped to userspace without leaking data.
255 */
256void *vmalloc_32_user(unsigned long size)
257{
258 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
259}
260EXPORT_SYMBOL(vmalloc_32_user);
252 261
253void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) 262void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
254{ 263{
255 BUG(); 264 BUG();
256 return NULL; 265 return NULL;
257} 266}
267EXPORT_SYMBOL(vmap);
258 268
259void vunmap(void *addr) 269void vunmap(void *addr)
260{ 270{
261 BUG(); 271 BUG();
262} 272}
273EXPORT_SYMBOL(vunmap);
263 274
264/* 275/*
265 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 276 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
@@ -269,6 +280,13 @@ void __attribute__((weak)) vmalloc_sync_all(void)
269{ 280{
270} 281}
271 282
283int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
284 struct page *page)
285{
286 return -EINVAL;
287}
288EXPORT_SYMBOL(vm_insert_page);
289
272/* 290/*
273 * sys_brk() for the most part doesn't need the global kernel 291 * sys_brk() for the most part doesn't need the global kernel
274 * lock, except when an application is doing something nasty 292 * lock, except when an application is doing something nasty
@@ -994,6 +1012,7 @@ unsigned long do_mmap_pgoff(struct file *file,
994 show_free_areas(); 1012 show_free_areas();
995 return -ENOMEM; 1013 return -ENOMEM;
996} 1014}
1015EXPORT_SYMBOL(do_mmap_pgoff);
997 1016
998/* 1017/*
999 * handle mapping disposal for uClinux 1018 * handle mapping disposal for uClinux
@@ -1074,6 +1093,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
1074 1093
1075 return 0; 1094 return 0;
1076} 1095}
1096EXPORT_SYMBOL(do_munmap);
1077 1097
1078asmlinkage long sys_munmap(unsigned long addr, size_t len) 1098asmlinkage long sys_munmap(unsigned long addr, size_t len)
1079{ 1099{
@@ -1164,6 +1184,7 @@ unsigned long do_mremap(unsigned long addr,
1164 1184
1165 return vma->vm_start; 1185 return vma->vm_start;
1166} 1186}
1187EXPORT_SYMBOL(do_mremap);
1167 1188
1168asmlinkage unsigned long sys_mremap(unsigned long addr, 1189asmlinkage unsigned long sys_mremap(unsigned long addr,
1169 unsigned long old_len, unsigned long new_len, 1190 unsigned long old_len, unsigned long new_len,
@@ -1231,7 +1252,6 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1231 1252
1232 return get_area(file, addr, len, pgoff, flags); 1253 return get_area(file, addr, len, pgoff, flags);
1233} 1254}
1234
1235EXPORT_SYMBOL(get_unmapped_area); 1255EXPORT_SYMBOL(get_unmapped_area);
1236 1256
1237/* 1257/*
@@ -1346,6 +1366,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1346 BUG(); 1366 BUG();
1347 return 0; 1367 return 0;
1348} 1368}
1369EXPORT_SYMBOL(filemap_fault);
1349 1370
1350/* 1371/*
1351 * Access another process' address space. 1372 * Access another process' address space.