aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-06-23 05:03:20 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:49 -0400
commit833423143c3a7c6545e409d65febd0d92deb351b (patch)
tree13a1881f1ffd6e546e80a2ec04b1ac44ad145298 /mm/vmalloc.c
parent762834e8bf46bf41ce9034d062a7c1f8563175f3 (diff)
[PATCH] mm: introduce remap_vmalloc_range()
Add remap_vmalloc_range, vmalloc_user, and vmalloc_32_user so that drivers can have a nice interface for remapping vmalloc memory. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c122
1 files changed, 120 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c0504f1e34eb..35f8553f893a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -257,6 +257,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int
257} 257}
258 258
259/* Caller must hold vmlist_lock */ 259/* Caller must hold vmlist_lock */
260static struct vm_struct *__find_vm_area(void *addr)
261{
262 struct vm_struct *tmp;
263
264 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
265 if (tmp->addr == addr)
266 break;
267 }
268
269 return tmp;
270}
271
272/* Caller must hold vmlist_lock */
260struct vm_struct *__remove_vm_area(void *addr) 273struct vm_struct *__remove_vm_area(void *addr)
261{ 274{
262 struct vm_struct **p, *tmp; 275 struct vm_struct **p, *tmp;
@@ -498,11 +511,33 @@ EXPORT_SYMBOL(__vmalloc);
498 */ 511 */
499void *vmalloc(unsigned long size) 512void *vmalloc(unsigned long size)
500{ 513{
501 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 514 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
502} 515}
503EXPORT_SYMBOL(vmalloc); 516EXPORT_SYMBOL(vmalloc);
504 517
505/** 518/**
519 * vmalloc_user - allocate virtually contiguous memory which has
520 * been zeroed so it can be mapped to userspace without
521 * leaking data.
522 *
523 * @size: allocation size
524 */
525void *vmalloc_user(unsigned long size)
526{
527 struct vm_struct *area;
528 void *ret;
529
530 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
531 write_lock(&vmlist_lock);
532 area = __find_vm_area(ret);
533 area->flags |= VM_USERMAP;
534 write_unlock(&vmlist_lock);
535
536 return ret;
537}
538EXPORT_SYMBOL(vmalloc_user);
539
540/**
506 * vmalloc_node - allocate memory on a specific node 541 * vmalloc_node - allocate memory on a specific node
507 * 542 *
508 * @size: allocation size 543 * @size: allocation size
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc);
516 */ 551 */
517void *vmalloc_node(unsigned long size, int node) 552void *vmalloc_node(unsigned long size, int node)
518{ 553{
519 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); 554 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
520} 555}
521EXPORT_SYMBOL(vmalloc_node); 556EXPORT_SYMBOL(vmalloc_node);
522 557
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size)
556} 591}
557EXPORT_SYMBOL(vmalloc_32); 592EXPORT_SYMBOL(vmalloc_32);
558 593
594/**
595 * vmalloc_32_user - allocate virtually contiguous memory (32bit
596 * addressable) which is zeroed so it can be
597 * mapped to userspace without leaking data.
598 *
599 * @size: allocation size
600 */
601void *vmalloc_32_user(unsigned long size)
602{
603 struct vm_struct *area;
604 void *ret;
605
606 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
607 write_lock(&vmlist_lock);
608 area = __find_vm_area(ret);
609 area->flags |= VM_USERMAP;
610 write_unlock(&vmlist_lock);
611
612 return ret;
613}
614EXPORT_SYMBOL(vmalloc_32_user);
615
559long vread(char *buf, char *addr, unsigned long count) 616long vread(char *buf, char *addr, unsigned long count)
560{ 617{
561 struct vm_struct *tmp; 618 struct vm_struct *tmp;
@@ -630,3 +687,64 @@ finished:
630 read_unlock(&vmlist_lock); 687 read_unlock(&vmlist_lock);
631 return buf - buf_start; 688 return buf - buf_start;
632} 689}
690
691/**
692 * remap_vmalloc_range - map vmalloc pages to userspace
693 *
694 * @vma: vma to cover (map full range of vma)
695 * @addr: vmalloc memory
696 * @pgoff: number of pages into addr before first page to map
697 * @returns: 0 for success, -Exxx on failure
698 *
699 * This function checks that addr is a valid vmalloc'ed area, and
700 * that it is big enough to cover the vma. Will return failure if
701 * that criteria isn't met.
702 *
703 * Similar to remap_pfn_range (see mm/memory.c)
704 */
705int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
706 unsigned long pgoff)
707{
708 struct vm_struct *area;
709 unsigned long uaddr = vma->vm_start;
710 unsigned long usize = vma->vm_end - vma->vm_start;
711 int ret;
712
713 if ((PAGE_SIZE-1) & (unsigned long)addr)
714 return -EINVAL;
715
716 read_lock(&vmlist_lock);
717 area = __find_vm_area(addr);
718 if (!area)
719 goto out_einval_locked;
720
721 if (!(area->flags & VM_USERMAP))
722 goto out_einval_locked;
723
724 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
725 goto out_einval_locked;
726 read_unlock(&vmlist_lock);
727
728 addr += pgoff << PAGE_SHIFT;
729 do {
730 struct page *page = vmalloc_to_page(addr);
731 ret = vm_insert_page(vma, uaddr, page);
732 if (ret)
733 return ret;
734
735 uaddr += PAGE_SIZE;
736 addr += PAGE_SIZE;
737 usize -= PAGE_SIZE;
738 } while (usize > 0);
739
740 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
741 vma->vm_flags |= VM_RESERVED;
742
743 return ret;
744
745out_einval_locked:
746 read_unlock(&vmlist_lock);
747 return -EINVAL;
748}
749EXPORT_SYMBOL(remap_vmalloc_range);
750