diff options
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 52 |
1 files changed, 25 insertions, 27 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 2fcf47d449b4..72eda4aee2cb 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -69,7 +69,7 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | |||
69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ | 69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ |
70 | int heap_stack_gap = 0; | 70 | int heap_stack_gap = 0; |
71 | 71 | ||
72 | atomic_t mmap_pages_allocated; | 72 | atomic_long_t mmap_pages_allocated; |
73 | 73 | ||
74 | EXPORT_SYMBOL(mem_map); | 74 | EXPORT_SYMBOL(mem_map); |
75 | EXPORT_SYMBOL(num_physpages); | 75 | EXPORT_SYMBOL(num_physpages); |
@@ -463,12 +463,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
463 | */ | 463 | */ |
464 | void __init mmap_init(void) | 464 | void __init mmap_init(void) |
465 | { | 465 | { |
466 | vm_region_jar = kmem_cache_create("vm_region_jar", | 466 | vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); |
467 | sizeof(struct vm_region), 0, | ||
468 | SLAB_PANIC, NULL); | ||
469 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
470 | sizeof(struct vm_area_struct), 0, | ||
471 | SLAB_PANIC, NULL); | ||
472 | } | 467 | } |
473 | 468 | ||
474 | /* | 469 | /* |
@@ -486,27 +481,24 @@ static noinline void validate_nommu_regions(void) | |||
486 | return; | 481 | return; |
487 | 482 | ||
488 | last = rb_entry(lastp, struct vm_region, vm_rb); | 483 | last = rb_entry(lastp, struct vm_region, vm_rb); |
489 | if (unlikely(last->vm_end <= last->vm_start)) | 484 | BUG_ON(unlikely(last->vm_end <= last->vm_start)); |
490 | BUG(); | 485 | BUG_ON(unlikely(last->vm_top < last->vm_end)); |
491 | if (unlikely(last->vm_top < last->vm_end)) | ||
492 | BUG(); | ||
493 | 486 | ||
494 | while ((p = rb_next(lastp))) { | 487 | while ((p = rb_next(lastp))) { |
495 | region = rb_entry(p, struct vm_region, vm_rb); | 488 | region = rb_entry(p, struct vm_region, vm_rb); |
496 | last = rb_entry(lastp, struct vm_region, vm_rb); | 489 | last = rb_entry(lastp, struct vm_region, vm_rb); |
497 | 490 | ||
498 | if (unlikely(region->vm_end <= region->vm_start)) | 491 | BUG_ON(unlikely(region->vm_end <= region->vm_start)); |
499 | BUG(); | 492 | BUG_ON(unlikely(region->vm_top < region->vm_end)); |
500 | if (unlikely(region->vm_top < region->vm_end)) | 493 | BUG_ON(unlikely(region->vm_start < last->vm_top)); |
501 | BUG(); | ||
502 | if (unlikely(region->vm_start < last->vm_top)) | ||
503 | BUG(); | ||
504 | 494 | ||
505 | lastp = p; | 495 | lastp = p; |
506 | } | 496 | } |
507 | } | 497 | } |
508 | #else | 498 | #else |
509 | #define validate_nommu_regions() do {} while(0) | 499 | static void validate_nommu_regions(void) |
500 | { | ||
501 | } | ||
510 | #endif | 502 | #endif |
511 | 503 | ||
512 | /* | 504 | /* |
@@ -563,16 +555,17 @@ static void free_page_series(unsigned long from, unsigned long to) | |||
563 | struct page *page = virt_to_page(from); | 555 | struct page *page = virt_to_page(from); |
564 | 556 | ||
565 | kdebug("- free %lx", from); | 557 | kdebug("- free %lx", from); |
566 | atomic_dec(&mmap_pages_allocated); | 558 | atomic_long_dec(&mmap_pages_allocated); |
567 | if (page_count(page) != 1) | 559 | if (page_count(page) != 1) |
568 | kdebug("free page %p [%d]", page, page_count(page)); | 560 | kdebug("free page %p: refcount not one: %d", |
561 | page, page_count(page)); | ||
569 | put_page(page); | 562 | put_page(page); |
570 | } | 563 | } |
571 | } | 564 | } |
572 | 565 | ||
573 | /* | 566 | /* |
574 | * release a reference to a region | 567 | * release a reference to a region |
575 | * - the caller must hold the region semaphore, which this releases | 568 | * - the caller must hold the region semaphore for writing, which this releases |
576 | * - the region may not have been added to the tree yet, in which case vm_top | 569 | * - the region may not have been added to the tree yet, in which case vm_top |
577 | * will equal vm_start | 570 | * will equal vm_start |
578 | */ | 571 | */ |
@@ -1096,7 +1089,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1096 | goto enomem; | 1089 | goto enomem; |
1097 | 1090 | ||
1098 | total = 1 << order; | 1091 | total = 1 << order; |
1099 | atomic_add(total, &mmap_pages_allocated); | 1092 | atomic_long_add(total, &mmap_pages_allocated); |
1100 | 1093 | ||
1101 | point = rlen >> PAGE_SHIFT; | 1094 | point = rlen >> PAGE_SHIFT; |
1102 | 1095 | ||
@@ -1107,7 +1100,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1107 | order = ilog2(total - point); | 1100 | order = ilog2(total - point); |
1108 | n = 1 << order; | 1101 | n = 1 << order; |
1109 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | 1102 | kdebug("shave %lu/%lu @%lu", n, total - point, total); |
1110 | atomic_sub(n, &mmap_pages_allocated); | 1103 | atomic_long_sub(n, &mmap_pages_allocated); |
1111 | total -= n; | 1104 | total -= n; |
1112 | set_page_refcounted(pages + total); | 1105 | set_page_refcounted(pages + total); |
1113 | __free_pages(pages + total, order); | 1106 | __free_pages(pages + total, order); |
@@ -1536,10 +1529,15 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1536 | /* find the first potentially overlapping VMA */ | 1529 | /* find the first potentially overlapping VMA */ |
1537 | vma = find_vma(mm, start); | 1530 | vma = find_vma(mm, start); |
1538 | if (!vma) { | 1531 | if (!vma) { |
1539 | printk(KERN_WARNING | 1532 | static int limit = 0; |
1540 | "munmap of memory not mmapped by process %d (%s):" | 1533 | if (limit < 5) { |
1541 | " 0x%lx-0x%lx\n", | 1534 | printk(KERN_WARNING |
1542 | current->pid, current->comm, start, start + len - 1); | 1535 | "munmap of memory not mmapped by process %d" |
1536 | " (%s): 0x%lx-0x%lx\n", | ||
1537 | current->pid, current->comm, | ||
1538 | start, start + len - 1); | ||
1539 | limit++; | ||
1540 | } | ||
1543 | return -EINVAL; | 1541 | return -EINVAL; |
1544 | } | 1542 | } |
1545 | 1543 | ||