aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c47
-rw-r--r--mm/mmap.c53
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slub.c2
4 files changed, 33 insertions, 71 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 2904a347e476..028ec482fdd4 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -294,14 +294,10 @@ static inline int __mlock_posix_error_return(long retval)
294 * 294 *
295 * return number of pages [> 0] to be removed from locked_vm on success 295 * return number of pages [> 0] to be removed from locked_vm on success
296 * of "special" vmas. 296 * of "special" vmas.
297 *
298 * return negative error if vma spanning @start-@range disappears while
299 * mmap semaphore is dropped. Unlikely?
300 */ 297 */
301long mlock_vma_pages_range(struct vm_area_struct *vma, 298long mlock_vma_pages_range(struct vm_area_struct *vma,
302 unsigned long start, unsigned long end) 299 unsigned long start, unsigned long end)
303{ 300{
304 struct mm_struct *mm = vma->vm_mm;
305 int nr_pages = (end - start) / PAGE_SIZE; 301 int nr_pages = (end - start) / PAGE_SIZE;
306 BUG_ON(!(vma->vm_flags & VM_LOCKED)); 302 BUG_ON(!(vma->vm_flags & VM_LOCKED));
307 303
@@ -314,20 +310,8 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
314 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 310 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
315 is_vm_hugetlb_page(vma) || 311 is_vm_hugetlb_page(vma) ||
316 vma == get_gate_vma(current))) { 312 vma == get_gate_vma(current))) {
317 long error;
318 downgrade_write(&mm->mmap_sem);
319
320 error = __mlock_vma_pages_range(vma, start, end, 1);
321 313
322 up_read(&mm->mmap_sem); 314 return __mlock_vma_pages_range(vma, start, end, 1);
323 /* vma can change or disappear */
324 down_write(&mm->mmap_sem);
325 vma = find_vma(mm, start);
326 /* non-NULL vma must contain @start, but need to check @end */
327 if (!vma || end > vma->vm_end)
328 return -ENOMEM;
329
330 return 0; /* hide other errors from mmap(), et al */
331 } 315 }
332 316
333 /* 317 /*
@@ -438,41 +422,14 @@ success:
438 vma->vm_flags = newflags; 422 vma->vm_flags = newflags;
439 423
440 if (lock) { 424 if (lock) {
441 /*
442 * mmap_sem is currently held for write. Downgrade the write
443 * lock to a read lock so that other faults, mmap scans, ...
444 * while we fault in all pages.
445 */
446 downgrade_write(&mm->mmap_sem);
447
448 ret = __mlock_vma_pages_range(vma, start, end, 1); 425 ret = __mlock_vma_pages_range(vma, start, end, 1);
449 426
450 /* 427 if (ret > 0) {
451 * Need to reacquire mmap sem in write mode, as our callers
452 * expect this. We have no support for atomically upgrading
453 * a sem to write, so we need to check for ranges while sem
454 * is unlocked.
455 */
456 up_read(&mm->mmap_sem);
457 /* vma can change or disappear */
458 down_write(&mm->mmap_sem);
459 *prev = find_vma(mm, start);
460 /* non-NULL *prev must contain @start, but need to check @end */
461 if (!(*prev) || end > (*prev)->vm_end)
462 ret = -ENOMEM;
463 else if (ret > 0) {
464 mm->locked_vm -= ret; 428 mm->locked_vm -= ret;
465 ret = 0; 429 ret = 0;
466 } else 430 } else
467 ret = __mlock_posix_error_return(ret); /* translate if needed */ 431 ret = __mlock_posix_error_return(ret); /* translate if needed */
468 } else { 432 } else {
469 /*
470 * TODO: for unlocking, pages will already be resident, so
471 * we don't need to wait for allocations/reclaim/pagein, ...
472 * However, unlocking a very large region can still take a
473 * while. Should we downgrade the semaphore for both lock
474 * AND unlock ?
475 */
476 __mlock_vma_pages_range(vma, start, end, 0); 433 __mlock_vma_pages_range(vma, start, end, 0);
477 } 434 }
478 435
diff --git a/mm/mmap.c b/mm/mmap.c
index d3fa10a726cf..214b6a258eeb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -658,6 +658,9 @@ again: remove_next = 1 + (end > next->vm_end);
658 validate_mm(mm); 658 validate_mm(mm);
659} 659}
660 660
661/* Flags that can be inherited from an existing mapping when merging */
662#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
663
661/* 664/*
662 * If the vma has a ->close operation then the driver probably needs to release 665 * If the vma has a ->close operation then the driver probably needs to release
663 * per-vma resources, so we don't attempt to merge those. 666 * per-vma resources, so we don't attempt to merge those.
@@ -665,7 +668,7 @@ again: remove_next = 1 + (end > next->vm_end);
665static inline int is_mergeable_vma(struct vm_area_struct *vma, 668static inline int is_mergeable_vma(struct vm_area_struct *vma,
666 struct file *file, unsigned long vm_flags) 669 struct file *file, unsigned long vm_flags)
667{ 670{
668 if (vma->vm_flags != vm_flags) 671 if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
669 return 0; 672 return 0;
670 if (vma->vm_file != file) 673 if (vma->vm_file != file)
671 return 0; 674 return 0;
@@ -1087,6 +1090,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1087 mapping_cap_account_dirty(vma->vm_file->f_mapping); 1090 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1088} 1091}
1089 1092
1093/*
1094 * We account for memory if it's a private writeable mapping,
1095 * and VM_NORESERVE wasn't set.
1096 */
1097static inline int accountable_mapping(unsigned int vm_flags)
1098{
1099 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1100}
1101
1090unsigned long mmap_region(struct file *file, unsigned long addr, 1102unsigned long mmap_region(struct file *file, unsigned long addr,
1091 unsigned long len, unsigned long flags, 1103 unsigned long len, unsigned long flags,
1092 unsigned int vm_flags, unsigned long pgoff, 1104 unsigned int vm_flags, unsigned long pgoff,
@@ -1114,23 +1126,24 @@ munmap_back:
1114 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 1126 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1115 return -ENOMEM; 1127 return -ENOMEM;
1116 1128
1117 if (flags & MAP_NORESERVE) 1129 /*
1130 * Set 'VM_NORESERVE' if we should not account for the
1131 * memory use of this mapping. We only honor MAP_NORESERVE
1132 * if we're allowed to overcommit memory.
1133 */
1134 if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1135 vm_flags |= VM_NORESERVE;
1136 if (!accountable)
1118 vm_flags |= VM_NORESERVE; 1137 vm_flags |= VM_NORESERVE;
1119 1138
1120 if (accountable && (!(flags & MAP_NORESERVE) || 1139 /*
1121 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { 1140 * Private writable mapping: check memory availability
1122 if (vm_flags & VM_SHARED) { 1141 */
1123 /* Check memory availability in shmem_file_setup? */ 1142 if (accountable_mapping(vm_flags)) {
1124 vm_flags |= VM_ACCOUNT; 1143 charged = len >> PAGE_SHIFT;
1125 } else if (vm_flags & VM_WRITE) { 1144 if (security_vm_enough_memory(charged))
1126 /* 1145 return -ENOMEM;
1127 * Private writable mapping: check memory availability 1146 vm_flags |= VM_ACCOUNT;
1128 */
1129 charged = len >> PAGE_SHIFT;
1130 if (security_vm_enough_memory(charged))
1131 return -ENOMEM;
1132 vm_flags |= VM_ACCOUNT;
1133 }
1134 } 1147 }
1135 1148
1136 /* 1149 /*
@@ -1181,14 +1194,6 @@ munmap_back:
1181 goto free_vma; 1194 goto free_vma;
1182 } 1195 }
1183 1196
1184 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1185 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1186 * that memory reservation must be checked; but that reservation
1187 * belongs to shared memory object, not to vma: so now clear it.
1188 */
1189 if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1190 vma->vm_flags &= ~VM_ACCOUNT;
1191
1192 /* Can addr have changed?? 1197 /* Can addr have changed??
1193 * 1198 *
1194 * Answer: Yes, several device drivers can do it in their 1199 * Answer: Yes, several device drivers can do it in their
diff --git a/mm/shmem.c b/mm/shmem.c
index 5d0de96c9789..19d566ccdeea 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2628,7 +2628,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2628 goto close_file; 2628 goto close_file;
2629 2629
2630#ifdef CONFIG_SHMEM 2630#ifdef CONFIG_SHMEM
2631 SHMEM_I(inode)->flags = flags & VM_ACCOUNT; 2631 SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
2632#endif 2632#endif
2633 d_instantiate(dentry, inode); 2633 d_instantiate(dentry, inode);
2634 inode->i_size = size; 2634 inode->i_size = size;
diff --git a/mm/slub.c b/mm/slub.c
index 6392ae5cc6b1..bdc9abb08a23 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1996,7 +1996,7 @@ static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1996static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) 1996static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1997{ 1997{
1998 if (c < per_cpu(kmem_cache_cpu, cpu) || 1998 if (c < per_cpu(kmem_cache_cpu, cpu) ||
1999 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { 1999 c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
2000 kfree(c); 2000 kfree(c);
2001 return; 2001 return;
2002 } 2002 }