diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 16:00:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 16:00:36 -0500 |
commit | 78a45c6f067824cf5d0a9fedea7339ac2e28603c (patch) | |
tree | b4f78c8b6b9059ddace0a18c11629b8d2045f793 /mm/nommu.c | |
parent | f96fe225677b3efb74346ebd56fafe3997b02afa (diff) | |
parent | 29d293b6007b91a4463f05bc8d0b26e0e65c5816 (diff) |
Merge branch 'akpm' (second patch-bomb from Andrew)
Merge second patchbomb from Andrew Morton:
- the rest of MM
- misc fs fixes
- add execveat() syscall
- new ratelimit feature for fault-injection
- decompressor updates
- ipc/ updates
- fallocate feature creep
- fsnotify cleanups
- a few other misc things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (99 commits)
cgroups: Documentation: fix trivial typos and wrong paragraph numberings
parisc: percpu: update comments referring to __get_cpu_var
percpu: update local_ops.txt to reflect this_cpu operations
percpu: remove __get_cpu_var and __raw_get_cpu_var macros
fsnotify: remove destroy_list from fsnotify_mark
fsnotify: unify inode and mount marks handling
fallocate: create FAN_MODIFY and IN_MODIFY events
mm/cma: make kmemleak ignore CMA regions
slub: fix cpuset check in get_any_partial
slab: fix cpuset check in fallback_alloc
shmdt: use i_size_read() instead of ->i_size
ipc/shm.c: fix overly aggressive shmdt() when calls span multiple segments
ipc/msg: increase MSGMNI, remove scaling
ipc/sem.c: increase SEMMSL, SEMMNI, SEMOPM
ipc/sem.c: change memory barrier in sem_lock() to smp_rmb()
lib/decompress.c: consistency of compress formats for kernel image
decompress_bunzip2: off by one in get_next_block()
usr/Kconfig: make initrd compression algorithm selection not expert
fault-inject: add ratelimit option
ratelimit: add initialization macro
...
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 50 |
1 files changed, 19 insertions, 31 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index bd1808e194a7..b51eadf6d952 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -722,11 +722,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
722 | if (vma->vm_file) { | 722 | if (vma->vm_file) { |
723 | mapping = vma->vm_file->f_mapping; | 723 | mapping = vma->vm_file->f_mapping; |
724 | 724 | ||
725 | mutex_lock(&mapping->i_mmap_mutex); | 725 | i_mmap_lock_write(mapping); |
726 | flush_dcache_mmap_lock(mapping); | 726 | flush_dcache_mmap_lock(mapping); |
727 | vma_interval_tree_insert(vma, &mapping->i_mmap); | 727 | vma_interval_tree_insert(vma, &mapping->i_mmap); |
728 | flush_dcache_mmap_unlock(mapping); | 728 | flush_dcache_mmap_unlock(mapping); |
729 | mutex_unlock(&mapping->i_mmap_mutex); | 729 | i_mmap_unlock_write(mapping); |
730 | } | 730 | } |
731 | 731 | ||
732 | /* add the VMA to the tree */ | 732 | /* add the VMA to the tree */ |
@@ -795,11 +795,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) | |||
795 | if (vma->vm_file) { | 795 | if (vma->vm_file) { |
796 | mapping = vma->vm_file->f_mapping; | 796 | mapping = vma->vm_file->f_mapping; |
797 | 797 | ||
798 | mutex_lock(&mapping->i_mmap_mutex); | 798 | i_mmap_lock_write(mapping); |
799 | flush_dcache_mmap_lock(mapping); | 799 | flush_dcache_mmap_lock(mapping); |
800 | vma_interval_tree_remove(vma, &mapping->i_mmap); | 800 | vma_interval_tree_remove(vma, &mapping->i_mmap); |
801 | flush_dcache_mmap_unlock(mapping); | 801 | flush_dcache_mmap_unlock(mapping); |
802 | mutex_unlock(&mapping->i_mmap_mutex); | 802 | i_mmap_unlock_write(mapping); |
803 | } | 803 | } |
804 | 804 | ||
805 | /* remove from the MM's tree and list */ | 805 | /* remove from the MM's tree and list */ |
@@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1149 | unsigned long len, | 1149 | unsigned long len, |
1150 | unsigned long capabilities) | 1150 | unsigned long capabilities) |
1151 | { | 1151 | { |
1152 | struct page *pages; | 1152 | unsigned long total, point; |
1153 | unsigned long total, point, n; | ||
1154 | void *base; | 1153 | void *base; |
1155 | int ret, order; | 1154 | int ret, order; |
1156 | 1155 | ||
@@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1182 | order = get_order(len); | 1181 | order = get_order(len); |
1183 | kdebug("alloc order %d for %lx", order, len); | 1182 | kdebug("alloc order %d for %lx", order, len); |
1184 | 1183 | ||
1185 | pages = alloc_pages(GFP_KERNEL, order); | ||
1186 | if (!pages) | ||
1187 | goto enomem; | ||
1188 | |||
1189 | total = 1 << order; | 1184 | total = 1 << order; |
1190 | atomic_long_add(total, &mmap_pages_allocated); | ||
1191 | |||
1192 | point = len >> PAGE_SHIFT; | 1185 | point = len >> PAGE_SHIFT; |
1193 | 1186 | ||
1194 | /* we allocated a power-of-2 sized page set, so we may want to trim off | 1187 | /* we don't want to allocate a power-of-2 sized page set */ |
1195 | * the excess */ | ||
1196 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 1188 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { |
1197 | while (total > point) { | 1189 | total = point; |
1198 | order = ilog2(total - point); | 1190 | kdebug("try to alloc exact %lu pages", total); |
1199 | n = 1 << order; | 1191 | base = alloc_pages_exact(len, GFP_KERNEL); |
1200 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | 1192 | } else { |
1201 | atomic_long_sub(n, &mmap_pages_allocated); | 1193 | base = (void *)__get_free_pages(GFP_KERNEL, order); |
1202 | total -= n; | ||
1203 | set_page_refcounted(pages + total); | ||
1204 | __free_pages(pages + total, order); | ||
1205 | } | ||
1206 | } | 1194 | } |
1207 | 1195 | ||
1208 | for (point = 1; point < total; point++) | 1196 | if (!base) |
1209 | set_page_refcounted(&pages[point]); | 1197 | goto enomem; |
1198 | |||
1199 | atomic_long_add(total, &mmap_pages_allocated); | ||
1210 | 1200 | ||
1211 | base = page_address(pages); | ||
1212 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; | 1201 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; |
1213 | region->vm_start = (unsigned long) base; | 1202 | region->vm_start = (unsigned long) base; |
1214 | region->vm_end = region->vm_start + len; | 1203 | region->vm_end = region->vm_start + len; |
@@ -2094,14 +2083,14 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
2094 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 2083 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
2095 | 2084 | ||
2096 | down_write(&nommu_region_sem); | 2085 | down_write(&nommu_region_sem); |
2097 | mutex_lock(&inode->i_mapping->i_mmap_mutex); | 2086 | i_mmap_lock_read(inode->i_mapping); |
2098 | 2087 | ||
2099 | /* search for VMAs that fall within the dead zone */ | 2088 | /* search for VMAs that fall within the dead zone */ |
2100 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { | 2089 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { |
2101 | /* found one - only interested if it's shared out of the page | 2090 | /* found one - only interested if it's shared out of the page |
2102 | * cache */ | 2091 | * cache */ |
2103 | if (vma->vm_flags & VM_SHARED) { | 2092 | if (vma->vm_flags & VM_SHARED) { |
2104 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | 2093 | i_mmap_unlock_read(inode->i_mapping); |
2105 | up_write(&nommu_region_sem); | 2094 | up_write(&nommu_region_sem); |
2106 | return -ETXTBSY; /* not quite true, but near enough */ | 2095 | return -ETXTBSY; /* not quite true, but near enough */ |
2107 | } | 2096 | } |
@@ -2113,8 +2102,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
2113 | * we don't check for any regions that start beyond the EOF as there | 2102 | * we don't check for any regions that start beyond the EOF as there |
2114 | * shouldn't be any | 2103 | * shouldn't be any |
2115 | */ | 2104 | */ |
2116 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, | 2105 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { |
2117 | 0, ULONG_MAX) { | ||
2118 | if (!(vma->vm_flags & VM_SHARED)) | 2106 | if (!(vma->vm_flags & VM_SHARED)) |
2119 | continue; | 2107 | continue; |
2120 | 2108 | ||
@@ -2129,7 +2117,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | |||
2129 | } | 2117 | } |
2130 | } | 2118 | } |
2131 | 2119 | ||
2132 | mutex_unlock(&inode->i_mapping->i_mmap_mutex); | 2120 | i_mmap_unlock_read(inode->i_mapping); |
2133 | up_write(&nommu_region_sem); | 2121 | up_write(&nommu_region_sem); |
2134 | return 0; | 2122 | return 0; |
2135 | } | 2123 | } |