aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-05 14:52:18 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-05 14:52:18 -0500
commit33ca8a53f262b4af40611bea331b8c87d133af72 (patch)
treed6468c820a556c4915bcb5b761204a0fb19e8225 /mm/mmap.c
parentdb2dcb4f91d5fec5c346a82c309187ee821e2495 (diff)
parent13a7a6ac0a11197edcd0f756a035f472b42cdf8b (diff)
Merge tag 'v4.0-rc2' into irq/core, to refresh the tree before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c100
1 files changed, 78 insertions, 22 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 7f684d5a8087..da9990acc08b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
152 */ 152 */
153int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 153int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
154{ 154{
155 unsigned long free, allowed, reserve; 155 long free, allowed, reserve;
156 156
157 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < 157 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
158 -(s64)vm_committed_as_batch * num_online_cpus(), 158 -(s64)vm_committed_as_batch * num_online_cpus(),
@@ -220,7 +220,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
220 */ 220 */
221 if (mm) { 221 if (mm) {
222 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 222 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
223 allowed -= min(mm->total_vm / 32, reserve); 223 allowed -= min_t(long, mm->total_vm / 32, reserve);
224 } 224 }
225 225
226 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 226 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
@@ -243,10 +243,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
243 mapping_unmap_writable(mapping); 243 mapping_unmap_writable(mapping);
244 244
245 flush_dcache_mmap_lock(mapping); 245 flush_dcache_mmap_lock(mapping);
246 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 246 vma_interval_tree_remove(vma, &mapping->i_mmap);
247 list_del_init(&vma->shared.nonlinear);
248 else
249 vma_interval_tree_remove(vma, &mapping->i_mmap);
250 flush_dcache_mmap_unlock(mapping); 247 flush_dcache_mmap_unlock(mapping);
251} 248}
252 249
@@ -649,10 +646,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
649 atomic_inc(&mapping->i_mmap_writable); 646 atomic_inc(&mapping->i_mmap_writable);
650 647
651 flush_dcache_mmap_lock(mapping); 648 flush_dcache_mmap_lock(mapping);
652 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 649 vma_interval_tree_insert(vma, &mapping->i_mmap);
653 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
654 else
655 vma_interval_tree_insert(vma, &mapping->i_mmap);
656 flush_dcache_mmap_unlock(mapping); 650 flush_dcache_mmap_unlock(mapping);
657 } 651 }
658} 652}
@@ -789,14 +783,11 @@ again: remove_next = 1 + (end > next->vm_end);
789 783
790 if (file) { 784 if (file) {
791 mapping = file->f_mapping; 785 mapping = file->f_mapping;
792 if (!(vma->vm_flags & VM_NONLINEAR)) { 786 root = &mapping->i_mmap;
793 root = &mapping->i_mmap; 787 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
794 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
795 788
796 if (adjust_next) 789 if (adjust_next)
797 uprobe_munmap(next, next->vm_start, 790 uprobe_munmap(next, next->vm_start, next->vm_end);
798 next->vm_end);
799 }
800 791
801 i_mmap_lock_write(mapping); 792 i_mmap_lock_write(mapping);
802 if (insert) { 793 if (insert) {
@@ -2634,6 +2625,75 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2634 return vm_munmap(addr, len); 2625 return vm_munmap(addr, len);
2635} 2626}
2636 2627
2628
2629/*
2630 * Emulation of deprecated remap_file_pages() syscall.
2631 */
2632SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2633 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2634{
2635
2636 struct mm_struct *mm = current->mm;
2637 struct vm_area_struct *vma;
2638 unsigned long populate = 0;
2639 unsigned long ret = -EINVAL;
2640 struct file *file;
2641
2642 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
2643 "See Documentation/vm/remap_file_pages.txt.\n",
2644 current->comm, current->pid);
2645
2646 if (prot)
2647 return ret;
2648 start = start & PAGE_MASK;
2649 size = size & PAGE_MASK;
2650
2651 if (start + size <= start)
2652 return ret;
2653
2654 /* Does pgoff wrap? */
2655 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2656 return ret;
2657
2658 down_write(&mm->mmap_sem);
2659 vma = find_vma(mm, start);
2660
2661 if (!vma || !(vma->vm_flags & VM_SHARED))
2662 goto out;
2663
2664 if (start < vma->vm_start || start + size > vma->vm_end)
2665 goto out;
2666
2667 if (pgoff == linear_page_index(vma, start)) {
2668 ret = 0;
2669 goto out;
2670 }
2671
2672 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2673 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2674 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2675
2676 flags &= MAP_NONBLOCK;
2677 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2678 if (vma->vm_flags & VM_LOCKED) {
2679 flags |= MAP_LOCKED;
2680 /* drop PG_Mlocked flag for over-mapped range */
2681 munlock_vma_pages_range(vma, start, start + size);
2682 }
2683
2684 file = get_file(vma->vm_file);
2685 ret = do_mmap_pgoff(vma->vm_file, start, size,
2686 prot, flags, pgoff, &populate);
2687 fput(file);
2688out:
2689 up_write(&mm->mmap_sem);
2690 if (populate)
2691 mm_populate(ret, populate);
2692 if (!IS_ERR_VALUE(ret))
2693 ret = 0;
2694 return ret;
2695}
2696
2637static inline void verify_mm_writelocked(struct mm_struct *mm) 2697static inline void verify_mm_writelocked(struct mm_struct *mm)
2638{ 2698{
2639#ifdef CONFIG_DEBUG_VM 2699#ifdef CONFIG_DEBUG_VM
@@ -2791,9 +2851,6 @@ void exit_mmap(struct mm_struct *mm)
2791 vma = remove_vma(vma); 2851 vma = remove_vma(vma);
2792 } 2852 }
2793 vm_unacct_memory(nr_accounted); 2853 vm_unacct_memory(nr_accounted);
2794
2795 WARN_ON(atomic_long_read(&mm->nr_ptes) >
2796 (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2797} 2854}
2798 2855
2799/* Insert vm structure into process list sorted by address 2856/* Insert vm structure into process list sorted by address
@@ -3108,8 +3165,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3108 * 3165 *
3109 * mmap_sem in write mode is required in order to block all operations 3166 * mmap_sem in write mode is required in order to block all operations
3110 * that could modify pagetables and free pages without need of 3167 * that could modify pagetables and free pages without need of
3111 * altering the vma layout (for example populate_range() with 3168 * altering the vma layout. It's also needed in write mode to avoid new
3112 * nonlinear vmas). It's also needed in write mode to avoid new
3113 * anon_vmas to be associated with existing vmas. 3169 * anon_vmas to be associated with existing vmas.
3114 * 3170 *
3115 * A single task can't take more than one mm_take_all_locks() in a row 3171 * A single task can't take more than one mm_take_all_locks() in a row