aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-10-08 19:29:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:19 -0400
commit314e51b9851b4f4e8ab302243ff5a6fc6147f379 (patch)
treef757b89206355fd129830782566768693eed23ce /mm
parent0103bd16fb90bc741c7a03fd1ea4e8a505abad23 (diff)
mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA, currently it lost original meaning but still has some effects: | effect | alternative flags -+------------------------+--------------------------------------------- 1| account as reserved_vm | VM_IO 2| skip in core dump | VM_IO, VM_DONTDUMP 3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP 4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP This patch removes reserved_vm counter from mm_struct. Seems like nobody cares about it, it does not exported into userspace directly, it only reduces total_vm showed in proc. Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP. remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP. remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP. [akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup] Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Eric Paris <eparis@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Morris <james.l.morris@oracle.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Kentaro Takeda <takedakn@nttdata.co.jp> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Venkatesh Pallipadi <venki@google.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/vmalloc.c3
6 files changed, 9 insertions, 14 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index f9ccb16559ee..9638620a7530 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1469,8 +1469,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1469 */ 1469 */
1470 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1470 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
1471 VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1471 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1472 VM_RESERVED | VM_HUGETLB | 1472 VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
1473 VM_NONLINEAR | VM_MIXEDMAP))
1474 return 0; /* just ignore the advice */ 1473 return 0; /* just ignore the advice */
1475 1474
1476#ifdef VM_SAO 1475#ifdef VM_SAO
diff --git a/mm/memory.c b/mm/memory.c
index 7b1e4feaec06..e09c04813186 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2297,14 +2297,13 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2297 * rest of the world about it: 2297 * rest of the world about it:
2298 * VM_IO tells people not to look at these pages 2298 * VM_IO tells people not to look at these pages
2299 * (accesses can have side effects). 2299 * (accesses can have side effects).
2300 * VM_RESERVED is specified all over the place, because
2301 * in 2.4 it kept swapout's vma scan off this vma; but
2302 * in 2.6 the LRU scan won't even find its pages, so this
2303 * flag means no more than count its pages in reserved_vm,
2304 * and omit it from core dump, even when VM_IO turned off.
2305 * VM_PFNMAP tells the core MM that the base pages are just 2300 * VM_PFNMAP tells the core MM that the base pages are just
2306 * raw PFN mappings, and do not have a "struct page" associated 2301 * raw PFN mappings, and do not have a "struct page" associated
2307 * with them. 2302 * with them.
2303 * VM_DONTEXPAND
2304 * Disable vma merging and expanding with mremap().
2305 * VM_DONTDUMP
2306 * Omit vma from core dump, even when VM_IO turned off.
2308 * 2307 *
2309 * There's a horrible special case to handle copy-on-write 2308 * There's a horrible special case to handle copy-on-write
2310 * behaviour that some programs depend on. We mark the "original" 2309 * behaviour that some programs depend on. We mark the "original"
@@ -2321,7 +2320,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2321 if (err) 2320 if (err)
2322 return -EINVAL; 2321 return -EINVAL;
2323 2322
2324 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 2323 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2325 2324
2326 BUG_ON(addr >= end); 2325 BUG_ON(addr >= end);
2327 pfn -= addr >> PAGE_SHIFT; 2326 pfn -= addr >> PAGE_SHIFT;
diff --git a/mm/mlock.c b/mm/mlock.c
index ef726e8aa8e9..a948be4b7ba7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -227,7 +227,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
227 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 227 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
228 goto no_mlock; 228 goto no_mlock;
229 229
230 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 230 if (!((vma->vm_flags & VM_DONTEXPAND) ||
231 is_vm_hugetlb_page(vma) || 231 is_vm_hugetlb_page(vma) ||
232 vma == get_gate_vma(current->mm))) { 232 vma == get_gate_vma(current->mm))) {
233 233
diff --git a/mm/mmap.c b/mm/mmap.c
index c1ad2e78ea58..a76042dc806d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -945,8 +945,6 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
945 mm->exec_vm += pages; 945 mm->exec_vm += pages;
946 } else if (flags & stack_flags) 946 } else if (flags & stack_flags)
947 mm->stack_vm += pages; 947 mm->stack_vm += pages;
948 if (flags & (VM_RESERVED|VM_IO))
949 mm->reserved_vm += pages;
950} 948}
951#endif /* CONFIG_PROC_FS */ 949#endif /* CONFIG_PROC_FS */
952 950
diff --git a/mm/nommu.c b/mm/nommu.c
index 9c4a7b63a4df..12e84e69dd06 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1811,7 +1811,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1811 if (addr != (pfn << PAGE_SHIFT)) 1811 if (addr != (pfn << PAGE_SHIFT))
1812 return -EINVAL; 1812 return -EINVAL;
1813 1813
1814 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1814 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1815 return 0; 1815 return 0;
1816} 1816}
1817EXPORT_SYMBOL(remap_pfn_range); 1817EXPORT_SYMBOL(remap_pfn_range);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bb90b1d241c..8de704679bfc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2163,8 +2163,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2163 usize -= PAGE_SIZE; 2163 usize -= PAGE_SIZE;
2164 } while (usize > 0); 2164 } while (usize > 0);
2165 2165
2166 /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 2166 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2167 vma->vm_flags |= VM_RESERVED;
2168 2167
2169 return 0; 2168 return 0;
2170} 2169}