aboutsummaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c1027
1 files changed, 701 insertions, 326 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 1c28ea3a4e9c..60ed8375c986 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -6,11 +6,11 @@
6 * 6 *
7 * See Documentation/nommu-mmap.txt 7 * See Documentation/nommu-mmap.txt
8 * 8 *
9 * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com> 9 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
13 * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org> 13 * Copyright (c) 2007-2008 Paul Mundt <lethal@linux-sh.org>
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/module.h>
@@ -33,6 +33,28 @@
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/tlb.h> 34#include <asm/tlb.h>
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include "internal.h"
37
38static inline __attribute__((format(printf, 1, 2)))
39void no_printk(const char *fmt, ...)
40{
41}
42
43#if 0
44#define kenter(FMT, ...) \
45 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
46#define kleave(FMT, ...) \
47 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
48#define kdebug(FMT, ...) \
49 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
50#else
51#define kenter(FMT, ...) \
52 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
53#define kleave(FMT, ...) \
54 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
55#define kdebug(FMT, ...) \
56 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
57#endif
36 58
37#include "internal.h" 59#include "internal.h"
38 60
@@ -40,19 +62,22 @@ void *high_memory;
40struct page *mem_map; 62struct page *mem_map;
41unsigned long max_mapnr; 63unsigned long max_mapnr;
42unsigned long num_physpages; 64unsigned long num_physpages;
43unsigned long askedalloc, realalloc;
44atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); 65atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
45int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 66int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
46int sysctl_overcommit_ratio = 50; /* default is 50% */ 67int sysctl_overcommit_ratio = 50; /* default is 50% */
47int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 68int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
48int heap_stack_gap = 0; 70int heap_stack_gap = 0;
49 71
72atomic_t mmap_pages_allocated;
73
50EXPORT_SYMBOL(mem_map); 74EXPORT_SYMBOL(mem_map);
51EXPORT_SYMBOL(num_physpages); 75EXPORT_SYMBOL(num_physpages);
52 76
53/* list of shareable VMAs */ 77/* list of mapped, potentially shareable regions */
54struct rb_root nommu_vma_tree = RB_ROOT; 78static struct kmem_cache *vm_region_jar;
55DECLARE_RWSEM(nommu_vma_sem); 79struct rb_root nommu_region_tree = RB_ROOT;
80DECLARE_RWSEM(nommu_region_sem);
56 81
57struct vm_operations_struct generic_file_vm_ops = { 82struct vm_operations_struct generic_file_vm_ops = {
58}; 83};
@@ -124,6 +149,20 @@ unsigned int kobjsize(const void *objp)
124 return ksize(objp); 149 return ksize(objp);
125 150
126 /* 151 /*
152 * If it's not a compound page, see if we have a matching VMA
153 * region. This test is intentionally done in reverse order,
154 * so if there's no VMA, we still fall through and hand back
155 * PAGE_SIZE for 0-order pages.
156 */
157 if (!PageCompound(page)) {
158 struct vm_area_struct *vma;
159
160 vma = find_vma(current->mm, (unsigned long)objp);
161 if (vma)
162 return vma->vm_end - vma->vm_start;
163 }
164
165 /*
127 * The ksize() function is only guaranteed to work for pointers 166 * The ksize() function is only guaranteed to work for pointers
128 * returned by kmalloc(). So handle arbitrary pointers here. 167 * returned by kmalloc(). So handle arbitrary pointers here.
129 */ 168 */
@@ -401,129 +440,178 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
401 return mm->brk = brk; 440 return mm->brk = brk;
402} 441}
403 442
404#ifdef DEBUG 443/*
405static void show_process_blocks(void) 444 * initialise the VMA and region record slabs
445 */
446void __init mmap_init(void)
406{ 447{
407 struct vm_list_struct *vml; 448 vm_region_jar = kmem_cache_create("vm_region_jar",
408 449 sizeof(struct vm_region), 0,
409 printk("Process blocks %d:", current->pid); 450 SLAB_PANIC, NULL);
410 451 vm_area_cachep = kmem_cache_create("vm_area_struct",
411 for (vml = &current->mm->context.vmlist; vml; vml = vml->next) { 452 sizeof(struct vm_area_struct), 0,
412 printk(" %p: %p", vml, vml->vma); 453 SLAB_PANIC, NULL);
413 if (vml->vma)
414 printk(" (%d @%lx #%d)",
415 kobjsize((void *) vml->vma->vm_start),
416 vml->vma->vm_start,
417 atomic_read(&vml->vma->vm_usage));
418 printk(vml->next ? " ->" : ".\n");
419 }
420} 454}
421#endif /* DEBUG */
422 455
423/* 456/*
424 * add a VMA into a process's mm_struct in the appropriate place in the list 457 * validate the region tree
425 * - should be called with mm->mmap_sem held writelocked 458 * - the caller must hold the region lock
426 */ 459 */
427static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml) 460#ifdef CONFIG_DEBUG_NOMMU_REGIONS
461static noinline void validate_nommu_regions(void)
428{ 462{
429 struct vm_list_struct **ppv; 463 struct vm_region *region, *last;
430 464 struct rb_node *p, *lastp;
431 for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next) 465
432 if ((*ppv)->vma->vm_start > vml->vma->vm_start) 466 lastp = rb_first(&nommu_region_tree);
433 break; 467 if (!lastp)
434 468 return;
435 vml->next = *ppv; 469
436 *ppv = vml; 470 last = rb_entry(lastp, struct vm_region, vm_rb);
471 if (unlikely(last->vm_end <= last->vm_start))
472 BUG();
473 if (unlikely(last->vm_top < last->vm_end))
474 BUG();
475
476 while ((p = rb_next(lastp))) {
477 region = rb_entry(p, struct vm_region, vm_rb);
478 last = rb_entry(lastp, struct vm_region, vm_rb);
479
480 if (unlikely(region->vm_end <= region->vm_start))
481 BUG();
482 if (unlikely(region->vm_top < region->vm_end))
483 BUG();
484 if (unlikely(region->vm_start < last->vm_top))
485 BUG();
486
487 lastp = p;
488 }
437} 489}
490#else
491#define validate_nommu_regions() do {} while(0)
492#endif
438 493
439/* 494/*
440 * look up the first VMA in which addr resides, NULL if none 495 * add a region into the global tree
441 * - should be called with mm->mmap_sem at least held readlocked
442 */ 496 */
443struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 497static void add_nommu_region(struct vm_region *region)
444{ 498{
445 struct vm_list_struct *loop, *vml; 499 struct vm_region *pregion;
500 struct rb_node **p, *parent;
446 501
447 /* search the vm_start ordered list */ 502 validate_nommu_regions();
448 vml = NULL; 503
449 for (loop = mm->context.vmlist; loop; loop = loop->next) { 504 BUG_ON(region->vm_start & ~PAGE_MASK);
450 if (loop->vma->vm_start > addr) 505
451 break; 506 parent = NULL;
452 vml = loop; 507 p = &nommu_region_tree.rb_node;
508 while (*p) {
509 parent = *p;
510 pregion = rb_entry(parent, struct vm_region, vm_rb);
511 if (region->vm_start < pregion->vm_start)
512 p = &(*p)->rb_left;
513 else if (region->vm_start > pregion->vm_start)
514 p = &(*p)->rb_right;
515 else if (pregion == region)
516 return;
517 else
518 BUG();
453 } 519 }
454 520
455 if (vml && vml->vma->vm_end > addr) 521 rb_link_node(&region->vm_rb, parent, p);
456 return vml->vma; 522 rb_insert_color(&region->vm_rb, &nommu_region_tree);
457 523
458 return NULL; 524 validate_nommu_regions();
459} 525}
460EXPORT_SYMBOL(find_vma);
461 526
462/* 527/*
463 * find a VMA 528 * delete a region from the global tree
464 * - we don't extend stack VMAs under NOMMU conditions
465 */ 529 */
466struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) 530static void delete_nommu_region(struct vm_region *region)
467{ 531{
468 return find_vma(mm, addr); 532 BUG_ON(!nommu_region_tree.rb_node);
469}
470 533
471int expand_stack(struct vm_area_struct *vma, unsigned long address) 534 validate_nommu_regions();
472{ 535 rb_erase(&region->vm_rb, &nommu_region_tree);
473 return -ENOMEM; 536 validate_nommu_regions();
474} 537}
475 538
476/* 539/*
477 * look up the first VMA exactly that exactly matches addr 540 * free a contiguous series of pages
478 * - should be called with mm->mmap_sem at least held readlocked
479 */ 541 */
480static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm, 542static void free_page_series(unsigned long from, unsigned long to)
481 unsigned long addr)
482{ 543{
483 struct vm_list_struct *vml; 544 for (; from < to; from += PAGE_SIZE) {
484 545 struct page *page = virt_to_page(from);
485 /* search the vm_start ordered list */ 546
486 for (vml = mm->context.vmlist; vml; vml = vml->next) { 547 kdebug("- free %lx", from);
487 if (vml->vma->vm_start == addr) 548 atomic_dec(&mmap_pages_allocated);
488 return vml->vma; 549 if (page_count(page) != 1)
489 if (vml->vma->vm_start > addr) 550 kdebug("free page %p [%d]", page, page_count(page));
490 break; 551 put_page(page);
491 } 552 }
492
493 return NULL;
494} 553}
495 554
496/* 555/*
497 * find a VMA in the global tree 556 * release a reference to a region
557 * - the caller must hold the region semaphore, which this releases
558 * - the region may not have been added to the tree yet, in which case vm_top
559 * will equal vm_start
498 */ 560 */
499static inline struct vm_area_struct *find_nommu_vma(unsigned long start) 561static void __put_nommu_region(struct vm_region *region)
562 __releases(nommu_region_sem)
500{ 563{
501 struct vm_area_struct *vma; 564 kenter("%p{%d}", region, atomic_read(&region->vm_usage));
502 struct rb_node *n = nommu_vma_tree.rb_node;
503 565
504 while (n) { 566 BUG_ON(!nommu_region_tree.rb_node);
505 vma = rb_entry(n, struct vm_area_struct, vm_rb);
506 567
507 if (start < vma->vm_start) 568 if (atomic_dec_and_test(&region->vm_usage)) {
508 n = n->rb_left; 569 if (region->vm_top > region->vm_start)
509 else if (start > vma->vm_start) 570 delete_nommu_region(region);
510 n = n->rb_right; 571 up_write(&nommu_region_sem);
511 else 572
512 return vma; 573 if (region->vm_file)
574 fput(region->vm_file);
575
576 /* IO memory and memory shared directly out of the pagecache
577 * from ramfs/tmpfs mustn't be released here */
578 if (region->vm_flags & VM_MAPPED_COPY) {
579 kdebug("free series");
580 free_page_series(region->vm_start, region->vm_top);
581 }
582 kmem_cache_free(vm_region_jar, region);
583 } else {
584 up_write(&nommu_region_sem);
513 } 585 }
586}
514 587
515 return NULL; 588/*
589 * release a reference to a region
590 */
591static void put_nommu_region(struct vm_region *region)
592{
593 down_write(&nommu_region_sem);
594 __put_nommu_region(region);
516} 595}
517 596
518/* 597/*
519 * add a VMA in the global tree 598 * add a VMA into a process's mm_struct in the appropriate place in the list
599 * and tree and add to the address space's page tree also if not an anonymous
600 * page
601 * - should be called with mm->mmap_sem held writelocked
520 */ 602 */
521static void add_nommu_vma(struct vm_area_struct *vma) 603static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
522{ 604{
523 struct vm_area_struct *pvma; 605 struct vm_area_struct *pvma, **pp;
524 struct address_space *mapping; 606 struct address_space *mapping;
525 struct rb_node **p = &nommu_vma_tree.rb_node; 607 struct rb_node **p, *parent;
526 struct rb_node *parent = NULL; 608
609 kenter(",%p", vma);
610
611 BUG_ON(!vma->vm_region);
612
613 mm->map_count++;
614 vma->vm_mm = mm;
527 615
528 /* add the VMA to the mapping */ 616 /* add the VMA to the mapping */
529 if (vma->vm_file) { 617 if (vma->vm_file) {
@@ -534,42 +622,62 @@ static void add_nommu_vma(struct vm_area_struct *vma)
534 flush_dcache_mmap_unlock(mapping); 622 flush_dcache_mmap_unlock(mapping);
535 } 623 }
536 624
537 /* add the VMA to the master list */ 625 /* add the VMA to the tree */
626 parent = NULL;
627 p = &mm->mm_rb.rb_node;
538 while (*p) { 628 while (*p) {
539 parent = *p; 629 parent = *p;
540 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); 630 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
541 631
542 if (vma->vm_start < pvma->vm_start) { 632 /* sort by: start addr, end addr, VMA struct addr in that order
633 * (the latter is necessary as we may get identical VMAs) */
634 if (vma->vm_start < pvma->vm_start)
543 p = &(*p)->rb_left; 635 p = &(*p)->rb_left;
544 } 636 else if (vma->vm_start > pvma->vm_start)
545 else if (vma->vm_start > pvma->vm_start) {
546 p = &(*p)->rb_right; 637 p = &(*p)->rb_right;
547 } 638 else if (vma->vm_end < pvma->vm_end)
548 else { 639 p = &(*p)->rb_left;
549 /* mappings are at the same address - this can only 640 else if (vma->vm_end > pvma->vm_end)
550 * happen for shared-mem chardevs and shared file 641 p = &(*p)->rb_right;
551 * mappings backed by ramfs/tmpfs */ 642 else if (vma < pvma)
552 BUG_ON(!(pvma->vm_flags & VM_SHARED)); 643 p = &(*p)->rb_left;
553 644 else if (vma > pvma)
554 if (vma < pvma) 645 p = &(*p)->rb_right;
555 p = &(*p)->rb_left; 646 else
556 else if (vma > pvma) 647 BUG();
557 p = &(*p)->rb_right;
558 else
559 BUG();
560 }
561 } 648 }
562 649
563 rb_link_node(&vma->vm_rb, parent, p); 650 rb_link_node(&vma->vm_rb, parent, p);
564 rb_insert_color(&vma->vm_rb, &nommu_vma_tree); 651 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
652
653 /* add VMA to the VMA list also */
654 for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
655 if (pvma->vm_start > vma->vm_start)
656 break;
657 if (pvma->vm_start < vma->vm_start)
658 continue;
659 if (pvma->vm_end < vma->vm_end)
660 break;
661 }
662
663 vma->vm_next = *pp;
664 *pp = vma;
565} 665}
566 666
567/* 667/*
568 * delete a VMA from the global list 668 * delete a VMA from its owning mm_struct and address space
569 */ 669 */
570static void delete_nommu_vma(struct vm_area_struct *vma) 670static void delete_vma_from_mm(struct vm_area_struct *vma)
571{ 671{
672 struct vm_area_struct **pp;
572 struct address_space *mapping; 673 struct address_space *mapping;
674 struct mm_struct *mm = vma->vm_mm;
675
676 kenter("%p", vma);
677
678 mm->map_count--;
679 if (mm->mmap_cache == vma)
680 mm->mmap_cache = NULL;
573 681
574 /* remove the VMA from the mapping */ 682 /* remove the VMA from the mapping */
575 if (vma->vm_file) { 683 if (vma->vm_file) {
@@ -580,8 +688,115 @@ static void delete_nommu_vma(struct vm_area_struct *vma)
580 flush_dcache_mmap_unlock(mapping); 688 flush_dcache_mmap_unlock(mapping);
581 } 689 }
582 690
583 /* remove from the master list */ 691 /* remove from the MM's tree and list */
584 rb_erase(&vma->vm_rb, &nommu_vma_tree); 692 rb_erase(&vma->vm_rb, &mm->mm_rb);
693 for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
694 if (*pp == vma) {
695 *pp = vma->vm_next;
696 break;
697 }
698 }
699
700 vma->vm_mm = NULL;
701}
702
703/*
704 * destroy a VMA record
705 */
706static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
707{
708 kenter("%p", vma);
709 if (vma->vm_ops && vma->vm_ops->close)
710 vma->vm_ops->close(vma);
711 if (vma->vm_file) {
712 fput(vma->vm_file);
713 if (vma->vm_flags & VM_EXECUTABLE)
714 removed_exe_file_vma(mm);
715 }
716 put_nommu_region(vma->vm_region);
717 kmem_cache_free(vm_area_cachep, vma);
718}
719
720/*
721 * look up the first VMA in which addr resides, NULL if none
722 * - should be called with mm->mmap_sem at least held readlocked
723 */
724struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
725{
726 struct vm_area_struct *vma;
727 struct rb_node *n = mm->mm_rb.rb_node;
728
729 /* check the cache first */
730 vma = mm->mmap_cache;
731 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
732 return vma;
733
734 /* trawl the tree (there may be multiple mappings in which addr
735 * resides) */
736 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
737 vma = rb_entry(n, struct vm_area_struct, vm_rb);
738 if (vma->vm_start > addr)
739 return NULL;
740 if (vma->vm_end > addr) {
741 mm->mmap_cache = vma;
742 return vma;
743 }
744 }
745
746 return NULL;
747}
748EXPORT_SYMBOL(find_vma);
749
750/*
751 * find a VMA
752 * - we don't extend stack VMAs under NOMMU conditions
753 */
754struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
755{
756 return find_vma(mm, addr);
757}
758
759/*
760 * expand a stack to a given address
761 * - not supported under NOMMU conditions
762 */
763int expand_stack(struct vm_area_struct *vma, unsigned long address)
764{
765 return -ENOMEM;
766}
767
768/*
769 * look up the first VMA exactly that exactly matches addr
770 * - should be called with mm->mmap_sem at least held readlocked
771 */
772static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
773 unsigned long addr,
774 unsigned long len)
775{
776 struct vm_area_struct *vma;
777 struct rb_node *n = mm->mm_rb.rb_node;
778 unsigned long end = addr + len;
779
780 /* check the cache first */
781 vma = mm->mmap_cache;
782 if (vma && vma->vm_start == addr && vma->vm_end == end)
783 return vma;
784
785 /* trawl the tree (there may be multiple mappings in which addr
786 * resides) */
787 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
788 vma = rb_entry(n, struct vm_area_struct, vm_rb);
789 if (vma->vm_start < addr)
790 continue;
791 if (vma->vm_start > addr)
792 return NULL;
793 if (vma->vm_end == end) {
794 mm->mmap_cache = vma;
795 return vma;
796 }
797 }
798
799 return NULL;
585} 800}
586 801
587/* 802/*
@@ -596,7 +811,7 @@ static int validate_mmap_request(struct file *file,
596 unsigned long pgoff, 811 unsigned long pgoff,
597 unsigned long *_capabilities) 812 unsigned long *_capabilities)
598{ 813{
599 unsigned long capabilities; 814 unsigned long capabilities, rlen;
600 unsigned long reqprot = prot; 815 unsigned long reqprot = prot;
601 int ret; 816 int ret;
602 817
@@ -616,12 +831,12 @@ static int validate_mmap_request(struct file *file,
616 return -EINVAL; 831 return -EINVAL;
617 832
618 /* Careful about overflows.. */ 833 /* Careful about overflows.. */
619 len = PAGE_ALIGN(len); 834 rlen = PAGE_ALIGN(len);
620 if (!len || len > TASK_SIZE) 835 if (!rlen || rlen > TASK_SIZE)
621 return -ENOMEM; 836 return -ENOMEM;
622 837
623 /* offset overflow? */ 838 /* offset overflow? */
624 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 839 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
625 return -EOVERFLOW; 840 return -EOVERFLOW;
626 841
627 if (file) { 842 if (file) {
@@ -795,13 +1010,18 @@ static unsigned long determine_vm_flags(struct file *file,
795} 1010}
796 1011
797/* 1012/*
798 * set up a shared mapping on a file 1013 * set up a shared mapping on a file (the driver or filesystem provides and
1014 * pins the storage)
799 */ 1015 */
800static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len) 1016static int do_mmap_shared_file(struct vm_area_struct *vma)
801{ 1017{
802 int ret; 1018 int ret;
803 1019
804 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1020 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1021 if (ret == 0) {
1022 vma->vm_region->vm_top = vma->vm_region->vm_end;
1023 return ret;
1024 }
805 if (ret != -ENOSYS) 1025 if (ret != -ENOSYS)
806 return ret; 1026 return ret;
807 1027
@@ -815,10 +1035,14 @@ static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
815/* 1035/*
816 * set up a private mapping or an anonymous shared mapping 1036 * set up a private mapping or an anonymous shared mapping
817 */ 1037 */
818static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) 1038static int do_mmap_private(struct vm_area_struct *vma,
1039 struct vm_region *region,
1040 unsigned long len)
819{ 1041{
1042 struct page *pages;
1043 unsigned long total, point, n, rlen;
820 void *base; 1044 void *base;
821 int ret; 1045 int ret, order;
822 1046
823 /* invoke the file's mapping function so that it can keep track of 1047 /* invoke the file's mapping function so that it can keep track of
824 * shared mappings on devices or memory 1048 * shared mappings on devices or memory
@@ -826,34 +1050,63 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
826 */ 1050 */
827 if (vma->vm_file) { 1051 if (vma->vm_file) {
828 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1052 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
829 if (ret != -ENOSYS) { 1053 if (ret == 0) {
830 /* shouldn't return success if we're not sharing */ 1054 /* shouldn't return success if we're not sharing */
831 BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE)); 1055 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
832 return ret; /* success or a real error */ 1056 vma->vm_region->vm_top = vma->vm_region->vm_end;
1057 return ret;
833 } 1058 }
1059 if (ret != -ENOSYS)
1060 return ret;
834 1061
835 /* getting an ENOSYS error indicates that direct mmap isn't 1062 /* getting an ENOSYS error indicates that direct mmap isn't
836 * possible (as opposed to tried but failed) so we'll try to 1063 * possible (as opposed to tried but failed) so we'll try to
837 * make a private copy of the data and map that instead */ 1064 * make a private copy of the data and map that instead */
838 } 1065 }
839 1066
1067 rlen = PAGE_ALIGN(len);
1068
840 /* allocate some memory to hold the mapping 1069 /* allocate some memory to hold the mapping
841 * - note that this may not return a page-aligned address if the object 1070 * - note that this may not return a page-aligned address if the object
842 * we're allocating is smaller than a page 1071 * we're allocating is smaller than a page
843 */ 1072 */
844 base = kmalloc(len, GFP_KERNEL|__GFP_COMP); 1073 order = get_order(rlen);
845 if (!base) 1074 kdebug("alloc order %d for %lx", order, len);
1075
1076 pages = alloc_pages(GFP_KERNEL, order);
1077 if (!pages)
846 goto enomem; 1078 goto enomem;
847 1079
848 vma->vm_start = (unsigned long) base; 1080 total = 1 << order;
849 vma->vm_end = vma->vm_start + len; 1081 atomic_add(total, &mmap_pages_allocated);
850 vma->vm_flags |= VM_MAPPED_COPY; 1082
1083 point = rlen >> PAGE_SHIFT;
1084
1085 /* we allocated a power-of-2 sized page set, so we may want to trim off
1086 * the excess */
1087 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1088 while (total > point) {
1089 order = ilog2(total - point);
1090 n = 1 << order;
1091 kdebug("shave %lu/%lu @%lu", n, total - point, total);
1092 atomic_sub(n, &mmap_pages_allocated);
1093 total -= n;
1094 set_page_refcounted(pages + total);
1095 __free_pages(pages + total, order);
1096 }
1097 }
1098
1099 for (point = 1; point < total; point++)
1100 set_page_refcounted(&pages[point]);
851 1101
852#ifdef WARN_ON_SLACK 1102 base = page_address(pages);
853 if (len + WARN_ON_SLACK <= kobjsize(result)) 1103 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
854 printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", 1104 region->vm_start = (unsigned long) base;
855 len, current->pid, kobjsize(result) - len); 1105 region->vm_end = region->vm_start + rlen;
856#endif 1106 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1107
1108 vma->vm_start = region->vm_start;
1109 vma->vm_end = region->vm_start + len;
857 1110
858 if (vma->vm_file) { 1111 if (vma->vm_file) {
859 /* read the contents of a file into the copy */ 1112 /* read the contents of a file into the copy */
@@ -865,26 +1118,28 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
865 1118
866 old_fs = get_fs(); 1119 old_fs = get_fs();
867 set_fs(KERNEL_DS); 1120 set_fs(KERNEL_DS);
868 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); 1121 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
869 set_fs(old_fs); 1122 set_fs(old_fs);
870 1123
871 if (ret < 0) 1124 if (ret < 0)
872 goto error_free; 1125 goto error_free;
873 1126
874 /* clear the last little bit */ 1127 /* clear the last little bit */
875 if (ret < len) 1128 if (ret < rlen)
876 memset(base + ret, 0, len - ret); 1129 memset(base + ret, 0, rlen - ret);
877 1130
878 } else { 1131 } else {
879 /* if it's an anonymous mapping, then just clear it */ 1132 /* if it's an anonymous mapping, then just clear it */
880 memset(base, 0, len); 1133 memset(base, 0, rlen);
881 } 1134 }
882 1135
883 return 0; 1136 return 0;
884 1137
885error_free: 1138error_free:
886 kfree(base); 1139 free_page_series(region->vm_start, region->vm_end);
887 vma->vm_start = 0; 1140 region->vm_start = vma->vm_start = 0;
1141 region->vm_end = vma->vm_end = 0;
1142 region->vm_top = 0;
888 return ret; 1143 return ret;
889 1144
890enomem: 1145enomem:
@@ -904,13 +1159,14 @@ unsigned long do_mmap_pgoff(struct file *file,
904 unsigned long flags, 1159 unsigned long flags,
905 unsigned long pgoff) 1160 unsigned long pgoff)
906{ 1161{
907 struct vm_list_struct *vml = NULL; 1162 struct vm_area_struct *vma;
908 struct vm_area_struct *vma = NULL; 1163 struct vm_region *region;
909 struct rb_node *rb; 1164 struct rb_node *rb;
910 unsigned long capabilities, vm_flags; 1165 unsigned long capabilities, vm_flags, result;
911 void *result;
912 int ret; 1166 int ret;
913 1167
1168 kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1169
914 if (!(flags & MAP_FIXED)) 1170 if (!(flags & MAP_FIXED))
915 addr = round_hint_to_min(addr); 1171 addr = round_hint_to_min(addr);
916 1172
@@ -918,73 +1174,120 @@ unsigned long do_mmap_pgoff(struct file *file,
918 * mapping */ 1174 * mapping */
919 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, 1175 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
920 &capabilities); 1176 &capabilities);
921 if (ret < 0) 1177 if (ret < 0) {
1178 kleave(" = %d [val]", ret);
922 return ret; 1179 return ret;
1180 }
923 1181
924 /* we've determined that we can make the mapping, now translate what we 1182 /* we've determined that we can make the mapping, now translate what we
925 * now know into VMA flags */ 1183 * now know into VMA flags */
926 vm_flags = determine_vm_flags(file, prot, flags, capabilities); 1184 vm_flags = determine_vm_flags(file, prot, flags, capabilities);
927 1185
928 /* we're going to need to record the mapping if it works */ 1186 /* we're going to need to record the mapping */
929 vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL); 1187 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
930 if (!vml) 1188 if (!region)
931 goto error_getting_vml; 1189 goto error_getting_region;
1190
1191 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1192 if (!vma)
1193 goto error_getting_vma;
1194
1195 atomic_set(&region->vm_usage, 1);
1196 region->vm_flags = vm_flags;
1197 region->vm_pgoff = pgoff;
1198
1199 INIT_LIST_HEAD(&vma->anon_vma_node);
1200 vma->vm_flags = vm_flags;
1201 vma->vm_pgoff = pgoff;
932 1202
933 down_write(&nommu_vma_sem); 1203 if (file) {
1204 region->vm_file = file;
1205 get_file(file);
1206 vma->vm_file = file;
1207 get_file(file);
1208 if (vm_flags & VM_EXECUTABLE) {
1209 added_exe_file_vma(current->mm);
1210 vma->vm_mm = current->mm;
1211 }
1212 }
934 1213
935 /* if we want to share, we need to check for VMAs created by other 1214 down_write(&nommu_region_sem);
1215
1216 /* if we want to share, we need to check for regions created by other
936 * mmap() calls that overlap with our proposed mapping 1217 * mmap() calls that overlap with our proposed mapping
937 * - we can only share with an exact match on most regular files 1218 * - we can only share with a superset match on most regular files
938 * - shared mappings on character devices and memory backed files are 1219 * - shared mappings on character devices and memory backed files are
939 * permitted to overlap inexactly as far as we are concerned for in 1220 * permitted to overlap inexactly as far as we are concerned for in
940 * these cases, sharing is handled in the driver or filesystem rather 1221 * these cases, sharing is handled in the driver or filesystem rather
941 * than here 1222 * than here
942 */ 1223 */
943 if (vm_flags & VM_MAYSHARE) { 1224 if (vm_flags & VM_MAYSHARE) {
944 unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1225 struct vm_region *pregion;
945 unsigned long vmpglen; 1226 unsigned long pglen, rpglen, pgend, rpgend, start;
946 1227
947 /* suppress VMA sharing for shared regions */ 1228 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
948 if (vm_flags & VM_SHARED && 1229 pgend = pgoff + pglen;
949 capabilities & BDI_CAP_MAP_DIRECT)
950 goto dont_share_VMAs;
951 1230
952 for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) { 1231 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
953 vma = rb_entry(rb, struct vm_area_struct, vm_rb); 1232 pregion = rb_entry(rb, struct vm_region, vm_rb);
954 1233
955 if (!(vma->vm_flags & VM_MAYSHARE)) 1234 if (!(pregion->vm_flags & VM_MAYSHARE))
956 continue; 1235 continue;
957 1236
958 /* search for overlapping mappings on the same file */ 1237 /* search for overlapping mappings on the same file */
959 if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode) 1238 if (pregion->vm_file->f_path.dentry->d_inode !=
1239 file->f_path.dentry->d_inode)
960 continue; 1240 continue;
961 1241
962 if (vma->vm_pgoff >= pgoff + pglen) 1242 if (pregion->vm_pgoff >= pgend)
963 continue; 1243 continue;
964 1244
965 vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1; 1245 rpglen = pregion->vm_end - pregion->vm_start;
966 vmpglen >>= PAGE_SHIFT; 1246 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
967 if (pgoff >= vma->vm_pgoff + vmpglen) 1247 rpgend = pregion->vm_pgoff + rpglen;
1248 if (pgoff >= rpgend)
968 continue; 1249 continue;
969 1250
970 /* handle inexactly overlapping matches between mappings */ 1251 /* handle inexactly overlapping matches between
971 if (vma->vm_pgoff != pgoff || vmpglen != pglen) { 1252 * mappings */
1253 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1254 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1255 /* new mapping is not a subset of the region */
972 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1256 if (!(capabilities & BDI_CAP_MAP_DIRECT))
973 goto sharing_violation; 1257 goto sharing_violation;
974 continue; 1258 continue;
975 } 1259 }
976 1260
977 /* we've found a VMA we can share */ 1261 /* we've found a region we can share */
978 atomic_inc(&vma->vm_usage); 1262 atomic_inc(&pregion->vm_usage);
979 1263 vma->vm_region = pregion;
980 vml->vma = vma; 1264 start = pregion->vm_start;
981 result = (void *) vma->vm_start; 1265 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
982 goto shared; 1266 vma->vm_start = start;
1267 vma->vm_end = start + len;
1268
1269 if (pregion->vm_flags & VM_MAPPED_COPY) {
1270 kdebug("share copy");
1271 vma->vm_flags |= VM_MAPPED_COPY;
1272 } else {
1273 kdebug("share mmap");
1274 ret = do_mmap_shared_file(vma);
1275 if (ret < 0) {
1276 vma->vm_region = NULL;
1277 vma->vm_start = 0;
1278 vma->vm_end = 0;
1279 atomic_dec(&pregion->vm_usage);
1280 pregion = NULL;
1281 goto error_just_free;
1282 }
1283 }
1284 fput(region->vm_file);
1285 kmem_cache_free(vm_region_jar, region);
1286 region = pregion;
1287 result = start;
1288 goto share;
983 } 1289 }
984 1290
985 dont_share_VMAs:
986 vma = NULL;
987
988 /* obtain the address at which to make a shared mapping 1291 /* obtain the address at which to make a shared mapping
989 * - this is the hook for quasi-memory character devices to 1292 * - this is the hook for quasi-memory character devices to
990 * tell us the location of a shared mapping 1293 * tell us the location of a shared mapping
@@ -995,113 +1298,93 @@ unsigned long do_mmap_pgoff(struct file *file,
995 if (IS_ERR((void *) addr)) { 1298 if (IS_ERR((void *) addr)) {
996 ret = addr; 1299 ret = addr;
997 if (ret != (unsigned long) -ENOSYS) 1300 if (ret != (unsigned long) -ENOSYS)
998 goto error; 1301 goto error_just_free;
999 1302
1000 /* the driver refused to tell us where to site 1303 /* the driver refused to tell us where to site
1001 * the mapping so we'll have to attempt to copy 1304 * the mapping so we'll have to attempt to copy
1002 * it */ 1305 * it */
1003 ret = (unsigned long) -ENODEV; 1306 ret = (unsigned long) -ENODEV;
1004 if (!(capabilities & BDI_CAP_MAP_COPY)) 1307 if (!(capabilities & BDI_CAP_MAP_COPY))
1005 goto error; 1308 goto error_just_free;
1006 1309
1007 capabilities &= ~BDI_CAP_MAP_DIRECT; 1310 capabilities &= ~BDI_CAP_MAP_DIRECT;
1311 } else {
1312 vma->vm_start = region->vm_start = addr;
1313 vma->vm_end = region->vm_end = addr + len;
1008 } 1314 }
1009 } 1315 }
1010 } 1316 }
1011 1317
1012 /* we're going to need a VMA struct as well */ 1318 vma->vm_region = region;
1013 vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
1014 if (!vma)
1015 goto error_getting_vma;
1016
1017 INIT_LIST_HEAD(&vma->anon_vma_node);
1018 atomic_set(&vma->vm_usage, 1);
1019 if (file) {
1020 get_file(file);
1021 if (vm_flags & VM_EXECUTABLE) {
1022 added_exe_file_vma(current->mm);
1023 vma->vm_mm = current->mm;
1024 }
1025 }
1026 vma->vm_file = file;
1027 vma->vm_flags = vm_flags;
1028 vma->vm_start = addr;
1029 vma->vm_end = addr + len;
1030 vma->vm_pgoff = pgoff;
1031
1032 vml->vma = vma;
1033 1319
1034 /* set up the mapping */ 1320 /* set up the mapping */
1035 if (file && vma->vm_flags & VM_SHARED) 1321 if (file && vma->vm_flags & VM_SHARED)
1036 ret = do_mmap_shared_file(vma, len); 1322 ret = do_mmap_shared_file(vma);
1037 else 1323 else
1038 ret = do_mmap_private(vma, len); 1324 ret = do_mmap_private(vma, region, len);
1039 if (ret < 0) 1325 if (ret < 0)
1040 goto error; 1326 goto error_put_region;
1041
1042 /* okay... we have a mapping; now we have to register it */
1043 result = (void *) vma->vm_start;
1044 1327
1045 if (vma->vm_flags & VM_MAPPED_COPY) { 1328 add_nommu_region(region);
1046 realalloc += kobjsize(result);
1047 askedalloc += len;
1048 }
1049 1329
1050 realalloc += kobjsize(vma); 1330 /* okay... we have a mapping; now we have to register it */
1051 askedalloc += sizeof(*vma); 1331 result = vma->vm_start;
1052 1332
1053 current->mm->total_vm += len >> PAGE_SHIFT; 1333 current->mm->total_vm += len >> PAGE_SHIFT;
1054 1334
1055 add_nommu_vma(vma); 1335share:
1056 1336 add_vma_to_mm(current->mm, vma);
1057 shared:
1058 realalloc += kobjsize(vml);
1059 askedalloc += sizeof(*vml);
1060
1061 add_vma_to_mm(current->mm, vml);
1062 1337
1063 up_write(&nommu_vma_sem); 1338 up_write(&nommu_region_sem);
1064 1339
1065 if (prot & PROT_EXEC) 1340 if (prot & PROT_EXEC)
1066 flush_icache_range((unsigned long) result, 1341 flush_icache_range(result, result + len);
1067 (unsigned long) result + len);
1068 1342
1069#ifdef DEBUG 1343 kleave(" = %lx", result);
1070 printk("do_mmap:\n"); 1344 return result;
1071 show_process_blocks();
1072#endif
1073
1074 return (unsigned long) result;
1075 1345
1076 error: 1346error_put_region:
1077 up_write(&nommu_vma_sem); 1347 __put_nommu_region(region);
1078 kfree(vml);
1079 if (vma) { 1348 if (vma) {
1080 if (vma->vm_file) { 1349 if (vma->vm_file) {
1081 fput(vma->vm_file); 1350 fput(vma->vm_file);
1082 if (vma->vm_flags & VM_EXECUTABLE) 1351 if (vma->vm_flags & VM_EXECUTABLE)
1083 removed_exe_file_vma(vma->vm_mm); 1352 removed_exe_file_vma(vma->vm_mm);
1084 } 1353 }
1085 kfree(vma); 1354 kmem_cache_free(vm_area_cachep, vma);
1086 } 1355 }
1356 kleave(" = %d [pr]", ret);
1087 return ret; 1357 return ret;
1088 1358
1089 sharing_violation: 1359error_just_free:
1090 up_write(&nommu_vma_sem); 1360 up_write(&nommu_region_sem);
1091 printk("Attempt to share mismatched mappings\n"); 1361error:
1092 kfree(vml); 1362 fput(region->vm_file);
1093 return -EINVAL; 1363 kmem_cache_free(vm_region_jar, region);
1364 fput(vma->vm_file);
1365 if (vma->vm_flags & VM_EXECUTABLE)
1366 removed_exe_file_vma(vma->vm_mm);
1367 kmem_cache_free(vm_area_cachep, vma);
1368 kleave(" = %d", ret);
1369 return ret;
1094 1370
1095 error_getting_vma: 1371sharing_violation:
1096 up_write(&nommu_vma_sem); 1372 up_write(&nommu_region_sem);
1097 kfree(vml); 1373 printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1098 printk("Allocation of vma for %lu byte allocation from process %d failed\n", 1374 ret = -EINVAL;
1375 goto error;
1376
1377error_getting_vma:
1378 kmem_cache_free(vm_region_jar, region);
1379 printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1380 " from process %d failed\n",
1099 len, current->pid); 1381 len, current->pid);
1100 show_free_areas(); 1382 show_free_areas();
1101 return -ENOMEM; 1383 return -ENOMEM;
1102 1384
1103 error_getting_vml: 1385error_getting_region:
1104 printk("Allocation of vml for %lu byte allocation from process %d failed\n", 1386 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1387 " from process %d failed\n",
1105 len, current->pid); 1388 len, current->pid);
1106 show_free_areas(); 1389 show_free_areas();
1107 return -ENOMEM; 1390 return -ENOMEM;
@@ -1109,85 +1392,183 @@ unsigned long do_mmap_pgoff(struct file *file,
1109EXPORT_SYMBOL(do_mmap_pgoff); 1392EXPORT_SYMBOL(do_mmap_pgoff);
1110 1393
1111/* 1394/*
1112 * handle mapping disposal for uClinux 1395 * split a vma into two pieces at address 'addr', a new vma is allocated either
1396 * for the first part or the tail.
1113 */ 1397 */
1114static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma) 1398int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1399 unsigned long addr, int new_below)
1115{ 1400{
1116 if (vma) { 1401 struct vm_area_struct *new;
1117 down_write(&nommu_vma_sem); 1402 struct vm_region *region;
1403 unsigned long npages;
1118 1404
1119 if (atomic_dec_and_test(&vma->vm_usage)) { 1405 kenter("");
1120 delete_nommu_vma(vma);
1121 1406
1122 if (vma->vm_ops && vma->vm_ops->close) 1407 /* we're only permitted to split anonymous regions that have a single
1123 vma->vm_ops->close(vma); 1408 * owner */
1409 if (vma->vm_file ||
1410 atomic_read(&vma->vm_region->vm_usage) != 1)
1411 return -ENOMEM;
1124 1412
1125 /* IO memory and memory shared directly out of the pagecache from 1413 if (mm->map_count >= sysctl_max_map_count)
1126 * ramfs/tmpfs mustn't be released here */ 1414 return -ENOMEM;
1127 if (vma->vm_flags & VM_MAPPED_COPY) {
1128 realalloc -= kobjsize((void *) vma->vm_start);
1129 askedalloc -= vma->vm_end - vma->vm_start;
1130 kfree((void *) vma->vm_start);
1131 }
1132 1415
1133 realalloc -= kobjsize(vma); 1416 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1134 askedalloc -= sizeof(*vma); 1417 if (!region)
1418 return -ENOMEM;
1135 1419
1136 if (vma->vm_file) { 1420 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1137 fput(vma->vm_file); 1421 if (!new) {
1138 if (vma->vm_flags & VM_EXECUTABLE) 1422 kmem_cache_free(vm_region_jar, region);
1139 removed_exe_file_vma(mm); 1423 return -ENOMEM;
1140 } 1424 }
1141 kfree(vma); 1425
1142 } 1426 /* most fields are the same, copy all, and then fixup */
1427 *new = *vma;
1428 *region = *vma->vm_region;
1429 new->vm_region = region;
1430
1431 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1143 1432
1144 up_write(&nommu_vma_sem); 1433 if (new_below) {
1434 region->vm_top = region->vm_end = new->vm_end = addr;
1435 } else {
1436 region->vm_start = new->vm_start = addr;
1437 region->vm_pgoff = new->vm_pgoff += npages;
1438 }
1439
1440 if (new->vm_ops && new->vm_ops->open)
1441 new->vm_ops->open(new);
1442
1443 delete_vma_from_mm(vma);
1444 down_write(&nommu_region_sem);
1445 delete_nommu_region(vma->vm_region);
1446 if (new_below) {
1447 vma->vm_region->vm_start = vma->vm_start = addr;
1448 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1449 } else {
1450 vma->vm_region->vm_end = vma->vm_end = addr;
1451 vma->vm_region->vm_top = addr;
1145 } 1452 }
1453 add_nommu_region(vma->vm_region);
1454 add_nommu_region(new->vm_region);
1455 up_write(&nommu_region_sem);
1456 add_vma_to_mm(mm, vma);
1457 add_vma_to_mm(mm, new);
1458 return 0;
1146} 1459}
1147 1460
1148/* 1461/*
1149 * release a mapping 1462 * shrink a VMA by removing the specified chunk from either the beginning or
1150 * - under NOMMU conditions the parameters must match exactly to the mapping to 1463 * the end
1151 * be removed
1152 */ 1464 */
1153int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) 1465static int shrink_vma(struct mm_struct *mm,
1466 struct vm_area_struct *vma,
1467 unsigned long from, unsigned long to)
1154{ 1468{
1155 struct vm_list_struct *vml, **parent; 1469 struct vm_region *region;
1156 unsigned long end = addr + len;
1157 1470
1158#ifdef DEBUG 1471 kenter("");
1159 printk("do_munmap:\n");
1160#endif
1161 1472
1162 for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) { 1473 /* adjust the VMA's pointers, which may reposition it in the MM's tree
1163 if ((*parent)->vma->vm_start > addr) 1474 * and list */
1164 break; 1475 delete_vma_from_mm(vma);
1165 if ((*parent)->vma->vm_start == addr && 1476 if (from > vma->vm_start)
1166 ((len == 0) || ((*parent)->vma->vm_end == end))) 1477 vma->vm_end = from;
1167 goto found; 1478 else
1479 vma->vm_start = to;
1480 add_vma_to_mm(mm, vma);
1481
1482 /* cut the backing region down to size */
1483 region = vma->vm_region;
1484 BUG_ON(atomic_read(&region->vm_usage) != 1);
1485
1486 down_write(&nommu_region_sem);
1487 delete_nommu_region(region);
1488 if (from > region->vm_start) {
1489 to = region->vm_top;
1490 region->vm_top = region->vm_end = from;
1491 } else {
1492 region->vm_start = to;
1168 } 1493 }
1494 add_nommu_region(region);
1495 up_write(&nommu_region_sem);
1169 1496
1170 printk("munmap of non-mmaped memory by process %d (%s): %p\n", 1497 free_page_series(from, to);
1171 current->pid, current->comm, (void *) addr); 1498 return 0;
1172 return -EINVAL; 1499}
1173 1500
1174 found: 1501/*
1175 vml = *parent; 1502 * release a mapping
1503 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1504 * VMA, though it need not cover the whole VMA
1505 */
1506int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1507{
1508 struct vm_area_struct *vma;
1509 struct rb_node *rb;
1510 unsigned long end = start + len;
1511 int ret;
1176 1512
1177 put_vma(mm, vml->vma); 1513 kenter(",%lx,%zx", start, len);
1178 1514
1179 *parent = vml->next; 1515 if (len == 0)
1180 realalloc -= kobjsize(vml); 1516 return -EINVAL;
1181 askedalloc -= sizeof(*vml);
1182 kfree(vml);
1183 1517
1184 update_hiwater_vm(mm); 1518 /* find the first potentially overlapping VMA */
1185 mm->total_vm -= len >> PAGE_SHIFT; 1519 vma = find_vma(mm, start);
1520 if (!vma) {
1521 printk(KERN_WARNING
1522 "munmap of memory not mmapped by process %d (%s):"
1523 " 0x%lx-0x%lx\n",
1524 current->pid, current->comm, start, start + len - 1);
1525 return -EINVAL;
1526 }
1186 1527
1187#ifdef DEBUG 1528 /* we're allowed to split an anonymous VMA but not a file-backed one */
1188 show_process_blocks(); 1529 if (vma->vm_file) {
1189#endif 1530 do {
1531 if (start > vma->vm_start) {
1532 kleave(" = -EINVAL [miss]");
1533 return -EINVAL;
1534 }
1535 if (end == vma->vm_end)
1536 goto erase_whole_vma;
1537 rb = rb_next(&vma->vm_rb);
1538 vma = rb_entry(rb, struct vm_area_struct, vm_rb);
1539 } while (rb);
1540 kleave(" = -EINVAL [split file]");
1541 return -EINVAL;
1542 } else {
1543 /* the chunk must be a subset of the VMA found */
1544 if (start == vma->vm_start && end == vma->vm_end)
1545 goto erase_whole_vma;
1546 if (start < vma->vm_start || end > vma->vm_end) {
1547 kleave(" = -EINVAL [superset]");
1548 return -EINVAL;
1549 }
1550 if (start & ~PAGE_MASK) {
1551 kleave(" = -EINVAL [unaligned start]");
1552 return -EINVAL;
1553 }
1554 if (end != vma->vm_end && end & ~PAGE_MASK) {
1555 kleave(" = -EINVAL [unaligned split]");
1556 return -EINVAL;
1557 }
1558 if (start != vma->vm_start && end != vma->vm_end) {
1559 ret = split_vma(mm, vma, start, 1);
1560 if (ret < 0) {
1561 kleave(" = %d [split]", ret);
1562 return ret;
1563 }
1564 }
1565 return shrink_vma(mm, vma, start, end);
1566 }
1190 1567
1568erase_whole_vma:
1569 delete_vma_from_mm(vma);
1570 delete_vma(mm, vma);
1571 kleave(" = 0");
1191 return 0; 1572 return 0;
1192} 1573}
1193EXPORT_SYMBOL(do_munmap); 1574EXPORT_SYMBOL(do_munmap);
@@ -1204,32 +1585,26 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
1204} 1585}
1205 1586
1206/* 1587/*
1207 * Release all mappings 1588 * release all the mappings made in a process's VM space
1208 */ 1589 */
1209void exit_mmap(struct mm_struct * mm) 1590void exit_mmap(struct mm_struct *mm)
1210{ 1591{
1211 struct vm_list_struct *tmp; 1592 struct vm_area_struct *vma;
1212
1213 if (mm) {
1214#ifdef DEBUG
1215 printk("Exit_mmap:\n");
1216#endif
1217 1593
1218 mm->total_vm = 0; 1594 if (!mm)
1595 return;
1219 1596
1220 while ((tmp = mm->context.vmlist)) { 1597 kenter("");
1221 mm->context.vmlist = tmp->next;
1222 put_vma(mm, tmp->vma);
1223 1598
1224 realalloc -= kobjsize(tmp); 1599 mm->total_vm = 0;
1225 askedalloc -= sizeof(*tmp);
1226 kfree(tmp);
1227 }
1228 1600
1229#ifdef DEBUG 1601 while ((vma = mm->mmap)) {
1230 show_process_blocks(); 1602 mm->mmap = vma->vm_next;
1231#endif 1603 delete_vma_from_mm(vma);
1604 delete_vma(mm, vma);
1232 } 1605 }
1606
1607 kleave("");
1233} 1608}
1234 1609
1235unsigned long do_brk(unsigned long addr, unsigned long len) 1610unsigned long do_brk(unsigned long addr, unsigned long len)
@@ -1242,8 +1617,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1242 * time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1617 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1243 * 1618 *
1244 * under NOMMU conditions, we only permit changing a mapping's size, and only 1619 * under NOMMU conditions, we only permit changing a mapping's size, and only
1245 * as long as it stays within the hole allocated by the kmalloc() call in 1620 * as long as it stays within the region allocated by do_mmap_private() and the
1246 * do_mmap_pgoff() and the block is not shareable 1621 * block is not shareable
1247 * 1622 *
1248 * MREMAP_FIXED is not supported under NOMMU conditions 1623 * MREMAP_FIXED is not supported under NOMMU conditions
1249 */ 1624 */
@@ -1254,13 +1629,16 @@ unsigned long do_mremap(unsigned long addr,
1254 struct vm_area_struct *vma; 1629 struct vm_area_struct *vma;
1255 1630
1256 /* insanity checks first */ 1631 /* insanity checks first */
1257 if (new_len == 0) 1632 if (old_len == 0 || new_len == 0)
1258 return (unsigned long) -EINVAL; 1633 return (unsigned long) -EINVAL;
1259 1634
1635 if (addr & ~PAGE_MASK)
1636 return -EINVAL;
1637
1260 if (flags & MREMAP_FIXED && new_addr != addr) 1638 if (flags & MREMAP_FIXED && new_addr != addr)
1261 return (unsigned long) -EINVAL; 1639 return (unsigned long) -EINVAL;
1262 1640
1263 vma = find_vma_exact(current->mm, addr); 1641 vma = find_vma_exact(current->mm, addr, old_len);
1264 if (!vma) 1642 if (!vma)
1265 return (unsigned long) -EINVAL; 1643 return (unsigned long) -EINVAL;
1266 1644
@@ -1270,22 +1648,19 @@ unsigned long do_mremap(unsigned long addr,
1270 if (vma->vm_flags & VM_MAYSHARE) 1648 if (vma->vm_flags & VM_MAYSHARE)
1271 return (unsigned long) -EPERM; 1649 return (unsigned long) -EPERM;
1272 1650
1273 if (new_len > kobjsize((void *) addr)) 1651 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1274 return (unsigned long) -ENOMEM; 1652 return (unsigned long) -ENOMEM;
1275 1653
1276 /* all checks complete - do it */ 1654 /* all checks complete - do it */
1277 vma->vm_end = vma->vm_start + new_len; 1655 vma->vm_end = vma->vm_start + new_len;
1278
1279 askedalloc -= old_len;
1280 askedalloc += new_len;
1281
1282 return vma->vm_start; 1656 return vma->vm_start;
1283} 1657}
1284EXPORT_SYMBOL(do_mremap); 1658EXPORT_SYMBOL(do_mremap);
1285 1659
1286asmlinkage unsigned long sys_mremap(unsigned long addr, 1660asmlinkage
1287 unsigned long old_len, unsigned long new_len, 1661unsigned long sys_mremap(unsigned long addr,
1288 unsigned long flags, unsigned long new_addr) 1662 unsigned long old_len, unsigned long new_len,
1663 unsigned long flags, unsigned long new_addr)
1289{ 1664{
1290 unsigned long ret; 1665 unsigned long ret;
1291 1666