diff options
-rw-r--r-- | fs/proc/meminfo.c | 2 | ||||
-rw-r--r-- | fs/proc/task_nommu.c | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | mm/mmap.c | 3 | ||||
-rw-r--r-- | mm/nommu.c | 52 |
6 files changed, 30 insertions, 34 deletions
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 43d23948384a..74ea974f5ca6 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -120,7 +120,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
120 | K(i.freeram-i.freehigh), | 120 | K(i.freeram-i.freehigh), |
121 | #endif | 121 | #endif |
122 | #ifndef CONFIG_MMU | 122 | #ifndef CONFIG_MMU |
123 | K((unsigned long) atomic_read(&mmap_pages_allocated)), | 123 | K((unsigned long) atomic_long_read(&mmap_pages_allocated)), |
124 | #endif | 124 | #endif |
125 | K(i.totalswap), | 125 | K(i.totalswap), |
126 | K(i.freeswap), | 126 | K(i.freeswap), |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 343ea1216bc8..370be0a2c909 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -136,14 +136,14 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | seq_printf(m, | 138 | seq_printf(m, |
139 | "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", | 139 | "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
140 | vma->vm_start, | 140 | vma->vm_start, |
141 | vma->vm_end, | 141 | vma->vm_end, |
142 | flags & VM_READ ? 'r' : '-', | 142 | flags & VM_READ ? 'r' : '-', |
143 | flags & VM_WRITE ? 'w' : '-', | 143 | flags & VM_WRITE ? 'w' : '-', |
144 | flags & VM_EXEC ? 'x' : '-', | 144 | flags & VM_EXEC ? 'x' : '-', |
145 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', | 145 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', |
146 | vma->vm_pgoff << PAGE_SHIFT, | 146 | (unsigned long long) vma->vm_pgoff << PAGE_SHIFT, |
147 | MAJOR(dev), MINOR(dev), ino, &len); | 147 | MAJOR(dev), MINOR(dev), ino, &len); |
148 | 148 | ||
149 | if (file) { | 149 | if (file) { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index aeabe953ba4f..bff1f0d475c7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1079,7 +1079,7 @@ static inline void setup_per_cpu_pageset(void) {} | |||
1079 | #endif | 1079 | #endif |
1080 | 1080 | ||
1081 | /* nommu.c */ | 1081 | /* nommu.c */ |
1082 | extern atomic_t mmap_pages_allocated; | 1082 | extern atomic_long_t mmap_pages_allocated; |
1083 | 1083 | ||
1084 | /* prio_tree.c */ | 1084 | /* prio_tree.c */ |
1085 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1085 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
diff --git a/kernel/fork.c b/kernel/fork.c index 47c15840a381..51d1aa21483b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1488,6 +1488,7 @@ void __init proc_caches_init(void) | |||
1488 | mm_cachep = kmem_cache_create("mm_struct", | 1488 | mm_cachep = kmem_cache_create("mm_struct", |
1489 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1489 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1490 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1490 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1491 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | ||
1491 | mmap_init(); | 1492 | mmap_init(); |
1492 | } | 1493 | } |
1493 | 1494 | ||
@@ -2481,7 +2481,4 @@ void mm_drop_all_locks(struct mm_struct *mm) | |||
2481 | */ | 2481 | */ |
2482 | void __init mmap_init(void) | 2482 | void __init mmap_init(void) |
2483 | { | 2483 | { |
2484 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
2485 | sizeof(struct vm_area_struct), 0, | ||
2486 | SLAB_PANIC, NULL); | ||
2487 | } | 2484 | } |
diff --git a/mm/nommu.c b/mm/nommu.c index 2fcf47d449b4..72eda4aee2cb 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -69,7 +69,7 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | |||
69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ | 69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ |
70 | int heap_stack_gap = 0; | 70 | int heap_stack_gap = 0; |
71 | 71 | ||
72 | atomic_t mmap_pages_allocated; | 72 | atomic_long_t mmap_pages_allocated; |
73 | 73 | ||
74 | EXPORT_SYMBOL(mem_map); | 74 | EXPORT_SYMBOL(mem_map); |
75 | EXPORT_SYMBOL(num_physpages); | 75 | EXPORT_SYMBOL(num_physpages); |
@@ -463,12 +463,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
463 | */ | 463 | */ |
464 | void __init mmap_init(void) | 464 | void __init mmap_init(void) |
465 | { | 465 | { |
466 | vm_region_jar = kmem_cache_create("vm_region_jar", | 466 | vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); |
467 | sizeof(struct vm_region), 0, | ||
468 | SLAB_PANIC, NULL); | ||
469 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
470 | sizeof(struct vm_area_struct), 0, | ||
471 | SLAB_PANIC, NULL); | ||
472 | } | 467 | } |
473 | 468 | ||
474 | /* | 469 | /* |
@@ -486,27 +481,24 @@ static noinline void validate_nommu_regions(void) | |||
486 | return; | 481 | return; |
487 | 482 | ||
488 | last = rb_entry(lastp, struct vm_region, vm_rb); | 483 | last = rb_entry(lastp, struct vm_region, vm_rb); |
489 | if (unlikely(last->vm_end <= last->vm_start)) | 484 | BUG_ON(unlikely(last->vm_end <= last->vm_start)); |
490 | BUG(); | 485 | BUG_ON(unlikely(last->vm_top < last->vm_end)); |
491 | if (unlikely(last->vm_top < last->vm_end)) | ||
492 | BUG(); | ||
493 | 486 | ||
494 | while ((p = rb_next(lastp))) { | 487 | while ((p = rb_next(lastp))) { |
495 | region = rb_entry(p, struct vm_region, vm_rb); | 488 | region = rb_entry(p, struct vm_region, vm_rb); |
496 | last = rb_entry(lastp, struct vm_region, vm_rb); | 489 | last = rb_entry(lastp, struct vm_region, vm_rb); |
497 | 490 | ||
498 | if (unlikely(region->vm_end <= region->vm_start)) | 491 | BUG_ON(unlikely(region->vm_end <= region->vm_start)); |
499 | BUG(); | 492 | BUG_ON(unlikely(region->vm_top < region->vm_end)); |
500 | if (unlikely(region->vm_top < region->vm_end)) | 493 | BUG_ON(unlikely(region->vm_start < last->vm_top)); |
501 | BUG(); | ||
502 | if (unlikely(region->vm_start < last->vm_top)) | ||
503 | BUG(); | ||
504 | 494 | ||
505 | lastp = p; | 495 | lastp = p; |
506 | } | 496 | } |
507 | } | 497 | } |
508 | #else | 498 | #else |
509 | #define validate_nommu_regions() do {} while(0) | 499 | static void validate_nommu_regions(void) |
500 | { | ||
501 | } | ||
510 | #endif | 502 | #endif |
511 | 503 | ||
512 | /* | 504 | /* |
@@ -563,16 +555,17 @@ static void free_page_series(unsigned long from, unsigned long to) | |||
563 | struct page *page = virt_to_page(from); | 555 | struct page *page = virt_to_page(from); |
564 | 556 | ||
565 | kdebug("- free %lx", from); | 557 | kdebug("- free %lx", from); |
566 | atomic_dec(&mmap_pages_allocated); | 558 | atomic_long_dec(&mmap_pages_allocated); |
567 | if (page_count(page) != 1) | 559 | if (page_count(page) != 1) |
568 | kdebug("free page %p [%d]", page, page_count(page)); | 560 | kdebug("free page %p: refcount not one: %d", |
561 | page, page_count(page)); | ||
569 | put_page(page); | 562 | put_page(page); |
570 | } | 563 | } |
571 | } | 564 | } |
572 | 565 | ||
573 | /* | 566 | /* |
574 | * release a reference to a region | 567 | * release a reference to a region |
575 | * - the caller must hold the region semaphore, which this releases | 568 | * - the caller must hold the region semaphore for writing, which this releases |
576 | * - the region may not have been added to the tree yet, in which case vm_top | 569 | * - the region may not have been added to the tree yet, in which case vm_top |
577 | * will equal vm_start | 570 | * will equal vm_start |
578 | */ | 571 | */ |
@@ -1096,7 +1089,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1096 | goto enomem; | 1089 | goto enomem; |
1097 | 1090 | ||
1098 | total = 1 << order; | 1091 | total = 1 << order; |
1099 | atomic_add(total, &mmap_pages_allocated); | 1092 | atomic_long_add(total, &mmap_pages_allocated); |
1100 | 1093 | ||
1101 | point = rlen >> PAGE_SHIFT; | 1094 | point = rlen >> PAGE_SHIFT; |
1102 | 1095 | ||
@@ -1107,7 +1100,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1107 | order = ilog2(total - point); | 1100 | order = ilog2(total - point); |
1108 | n = 1 << order; | 1101 | n = 1 << order; |
1109 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | 1102 | kdebug("shave %lu/%lu @%lu", n, total - point, total); |
1110 | atomic_sub(n, &mmap_pages_allocated); | 1103 | atomic_long_sub(n, &mmap_pages_allocated); |
1111 | total -= n; | 1104 | total -= n; |
1112 | set_page_refcounted(pages + total); | 1105 | set_page_refcounted(pages + total); |
1113 | __free_pages(pages + total, order); | 1106 | __free_pages(pages + total, order); |
@@ -1536,10 +1529,15 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
1536 | /* find the first potentially overlapping VMA */ | 1529 | /* find the first potentially overlapping VMA */ |
1537 | vma = find_vma(mm, start); | 1530 | vma = find_vma(mm, start); |
1538 | if (!vma) { | 1531 | if (!vma) { |
1539 | printk(KERN_WARNING | 1532 | static int limit = 0; |
1540 | "munmap of memory not mmapped by process %d (%s):" | 1533 | if (limit < 5) { |
1541 | " 0x%lx-0x%lx\n", | 1534 | printk(KERN_WARNING |
1542 | current->pid, current->comm, start, start + len - 1); | 1535 | "munmap of memory not mmapped by process %d" |
1536 | " (%s): 0x%lx-0x%lx\n", | ||
1537 | current->pid, current->comm, | ||
1538 | start, start + len - 1); | ||
1539 | limit++; | ||
1540 | } | ||
1543 | return -EINVAL; | 1541 | return -EINVAL; |
1544 | } | 1542 | } |
1545 | 1543 | ||