aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rmap.h
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2012-10-08 19:31:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:43 -0400
commit02c6de8d757cb32c0829a45d81c3dfcbcafd998b (patch)
tree0d8f0d182a44ba4ec4af0c909d01eb663e03e254 /include/linux/rmap.h
parent70400303ce0c4ced3139499c676d5c79636b0c72 (diff)
mm: cma: discard clean pages during contiguous allocation instead of migration
Drop clean cache pages instead of migration during alloc_contig_range() to minimise allocation latency by reducing the amount of migration that is necessary. It's useful for CMA because latency of migration is more important than evicting the background process's working set. In addition, as pages are reclaimed then fewer free pages for migration targets are required so it avoids memory reclaiming to get free pages, which is a contributory factor to increased latency. I measured elapsed time of __alloc_contig_migrate_range() which migrates 10M in 40M movable zone in QEMU machine. Before - 146ms, After - 7ms [akpm@linux-foundation.org: fix nommu build] Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Mel Gorman <mgorman@suse.de> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Rik van Riel <riel@redhat.com> Tested-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r--include/linux/rmap.h21
1 files changed, 11 insertions, 10 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b2cce644ffc7..bfe1f4780644 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -71,6 +71,17 @@ struct anon_vma_chain {
71#endif 71#endif
72}; 72};
73 73
74enum ttu_flags {
75 TTU_UNMAP = 0, /* unmap mode */
76 TTU_MIGRATION = 1, /* migration mode */
77 TTU_MUNLOCK = 2, /* munlock mode */
78 TTU_ACTION_MASK = 0xff,
79
80 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
81 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
82 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
83};
84
74#ifdef CONFIG_MMU 85#ifdef CONFIG_MMU
75static inline void get_anon_vma(struct anon_vma *anon_vma) 86static inline void get_anon_vma(struct anon_vma *anon_vma)
76{ 87{
@@ -164,16 +175,6 @@ int page_referenced(struct page *, int is_locked,
164int page_referenced_one(struct page *, struct vm_area_struct *, 175int page_referenced_one(struct page *, struct vm_area_struct *,
165 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); 176 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
166 177
167enum ttu_flags {
168 TTU_UNMAP = 0, /* unmap mode */
169 TTU_MIGRATION = 1, /* migration mode */
170 TTU_MUNLOCK = 2, /* munlock mode */
171 TTU_ACTION_MASK = 0xff,
172
173 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
174 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
175 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
176};
177#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 178#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
178 179
179int try_to_unmap(struct page *, enum ttu_flags flags); 180int try_to_unmap(struct page *, enum ttu_flags flags);