aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-09-09 18:35:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 16:29:01 -0400
commit33c3fc71c8cfa3cc3a98beaa901c069c177dc295 (patch)
treed367186631c578017fda08db238e739d44fd0b99 /mm
parent1d7715c676a1566c2e4c3e77d16b1f9bb4909025 (diff)
mm: introduce idle page tracking
Knowing the portion of memory that is not used by a certain application or memory cgroup (idle memory) can be useful for partitioning the system efficiently, e.g. by setting memory cgroup limits appropriately. Currently, the only means to estimate the amount of idle memory provided by the kernel is /proc/PID/{clear_refs,smaps}: the user can clear the access bit for all pages mapped to a particular process by writing 1 to clear_refs, wait for some time, and then count smaps:Referenced. However, this method has two serious shortcomings: - it does not count unmapped file pages - it affects the reclaimer logic To overcome these drawbacks, this patch introduces two new page flags, Idle and Young, and a new sysfs file, /sys/kernel/mm/page_idle/bitmap. A page's Idle flag can only be set from userspace by setting bit in /sys/kernel/mm/page_idle/bitmap at the offset corresponding to the page, and it is cleared whenever the page is accessed either through page tables (it is cleared in page_referenced() in this case) or using the read(2) system call (mark_page_accessed()). Thus by setting the Idle flag for pages of a particular workload, which can be found e.g. by reading /proc/PID/pagemap, waiting for some time to let the workload access its working set, and then reading the bitmap file, one can estimate the amount of pages that are not used by the workload. The Young page flag is used to avoid interference with the memory reclaimer. A page's Young flag is set whenever the Access bit of a page table entry pointing to the page is cleared by writing to the bitmap file. If page_referenced() is called on a Young page, it will add 1 to its return value, therefore concealing the fact that the Access bit was cleared. Note, since there is no room for extra page flags on 32 bit, this feature uses extended page flags when compiled on 32 bit. [akpm@linux-foundation.org: fix build] [akpm@linux-foundation.org: kpageidle requires an MMU] [akpm@linux-foundation.org: decouple from page-flags rework] Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/Makefile1
-rw-r--r--mm/debug.c4
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/page_idle.c232
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/swap.c3
9 files changed, 278 insertions, 2 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3a4070f5ab79..6413d027c0b2 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -649,6 +649,18 @@ config DEFERRED_STRUCT_PAGE_INIT
649 processes running early in the lifetime of the systemm until kswapd 649 processes running early in the lifetime of the systemm until kswapd
650 finishes the initialisation. 650 finishes the initialisation.
651 651
652config IDLE_PAGE_TRACKING
653 bool "Enable idle page tracking"
654 depends on SYSFS && MMU
655 select PAGE_EXTENSION if !64BIT
656 help
657 This feature allows to estimate the amount of user pages that have
658 not been touched during a given period of time. This information can
659 be useful to tune memory cgroup limits and/or for job placement
660 within a compute cluster.
661
662 See Documentation/vm/idle_page_tracking.txt for more details.
663
652config ZONE_DEVICE 664config ZONE_DEVICE
653 bool "Device memory (pmem, etc...) hotplug support" if EXPERT 665 bool "Device memory (pmem, etc...) hotplug support" if EXPERT
654 default !ZONE_DMA 666 default !ZONE_DMA
diff --git a/mm/Makefile b/mm/Makefile
index b424d5e5b6ff..56f8eed73f1a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -79,3 +79,4 @@ obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
79obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o 79obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
80obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o 80obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
81obj-$(CONFIG_USERFAULTFD) += userfaultfd.o 81obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
82obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
diff --git a/mm/debug.c b/mm/debug.c
index 76089ddf99ea..6c1b3ea61bfd 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -48,6 +48,10 @@ static const struct trace_print_flags pageflag_names[] = {
48#ifdef CONFIG_TRANSPARENT_HUGEPAGE 48#ifdef CONFIG_TRANSPARENT_HUGEPAGE
49 {1UL << PG_compound_lock, "compound_lock" }, 49 {1UL << PG_compound_lock, "compound_lock" },
50#endif 50#endif
51#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
52 {1UL << PG_young, "young" },
53 {1UL << PG_idle, "idle" },
54#endif
51}; 55};
52 56
53static void dump_flags(unsigned long flags, 57static void dump_flags(unsigned long flags,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b16279cbd91d..4b06b8db9df2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -25,6 +25,7 @@
25#include <linux/migrate.h> 25#include <linux/migrate.h>
26#include <linux/hashtable.h> 26#include <linux/hashtable.h>
27#include <linux/userfaultfd_k.h> 27#include <linux/userfaultfd_k.h>
28#include <linux/page_idle.h>
28 29
29#include <asm/tlb.h> 30#include <asm/tlb.h>
30#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
@@ -1757,6 +1758,11 @@ static void __split_huge_page_refcount(struct page *page,
1757 /* clear PageTail before overwriting first_page */ 1758 /* clear PageTail before overwriting first_page */
1758 smp_wmb(); 1759 smp_wmb();
1759 1760
1761 if (page_is_young(page))
1762 set_page_young(page_tail);
1763 if (page_is_idle(page))
1764 set_page_idle(page_tail);
1765
1760 /* 1766 /*
1761 * __split_huge_page_splitting() already set the 1767 * __split_huge_page_splitting() already set the
1762 * splitting bit in all pmd that could map this 1768 * splitting bit in all pmd that could map this
@@ -2262,7 +2268,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2262 VM_BUG_ON_PAGE(PageLRU(page), page); 2268 VM_BUG_ON_PAGE(PageLRU(page), page);
2263 2269
2264 /* If there is no mapped pte young don't collapse the page */ 2270 /* If there is no mapped pte young don't collapse the page */
2265 if (pte_young(pteval) || PageReferenced(page) || 2271 if (pte_young(pteval) ||
2272 page_is_young(page) || PageReferenced(page) ||
2266 mmu_notifier_test_young(vma->vm_mm, address)) 2273 mmu_notifier_test_young(vma->vm_mm, address))
2267 referenced = true; 2274 referenced = true;
2268 } 2275 }
@@ -2693,7 +2700,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2693 */ 2700 */
2694 if (page_count(page) != 1 + !!PageSwapCache(page)) 2701 if (page_count(page) != 1 + !!PageSwapCache(page))
2695 goto out_unmap; 2702 goto out_unmap;
2696 if (pte_young(pteval) || PageReferenced(page) || 2703 if (pte_young(pteval) ||
2704 page_is_young(page) || PageReferenced(page) ||
2697 mmu_notifier_test_young(vma->vm_mm, address)) 2705 mmu_notifier_test_young(vma->vm_mm, address))
2698 referenced = true; 2706 referenced = true;
2699 } 2707 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 02ce25df16c2..c3cb566af3e2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -37,6 +37,7 @@
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/balloon_compaction.h> 38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h> 39#include <linux/mmu_notifier.h>
40#include <linux/page_idle.h>
40 41
41#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
42 43
@@ -524,6 +525,11 @@ void migrate_page_copy(struct page *newpage, struct page *page)
524 __set_page_dirty_nobuffers(newpage); 525 __set_page_dirty_nobuffers(newpage);
525 } 526 }
526 527
528 if (page_is_young(page))
529 set_page_young(newpage);
530 if (page_is_idle(page))
531 set_page_idle(newpage);
532
527 /* 533 /*
528 * Copy NUMA information to the new page, to prevent over-eager 534 * Copy NUMA information to the new page, to prevent over-eager
529 * future migrations of this same page. 535 * future migrations of this same page.
diff --git a/mm/page_ext.c b/mm/page_ext.c
index d86fd2f5353f..292ca7b8debd 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -6,6 +6,7 @@
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/kmemleak.h> 7#include <linux/kmemleak.h>
8#include <linux/page_owner.h> 8#include <linux/page_owner.h>
9#include <linux/page_idle.h>
9 10
10/* 11/*
11 * struct page extension 12 * struct page extension
@@ -59,6 +60,9 @@ static struct page_ext_operations *page_ext_ops[] = {
59#ifdef CONFIG_PAGE_OWNER 60#ifdef CONFIG_PAGE_OWNER
60 &page_owner_ops, 61 &page_owner_ops,
61#endif 62#endif
63#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
64 &page_idle_ops,
65#endif
62}; 66};
63 67
64static unsigned long total_usage; 68static unsigned long total_usage;
diff --git a/mm/page_idle.c b/mm/page_idle.c
new file mode 100644
index 000000000000..d5dd79041484
--- /dev/null
+++ b/mm/page_idle.c
@@ -0,0 +1,232 @@
1#include <linux/init.h>
2#include <linux/bootmem.h>
3#include <linux/fs.h>
4#include <linux/sysfs.h>
5#include <linux/kobject.h>
6#include <linux/mm.h>
7#include <linux/mmzone.h>
8#include <linux/pagemap.h>
9#include <linux/rmap.h>
10#include <linux/mmu_notifier.h>
11#include <linux/page_ext.h>
12#include <linux/page_idle.h>
13
14#define BITMAP_CHUNK_SIZE sizeof(u64)
15#define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
16
17/*
18 * Idle page tracking only considers user memory pages, for other types of
19 * pages the idle flag is always unset and an attempt to set it is silently
20 * ignored.
21 *
22 * We treat a page as a user memory page if it is on an LRU list, because it is
23 * always safe to pass such a page to rmap_walk(), which is essential for idle
24 * page tracking. With such an indicator of user pages we can skip isolated
25 * pages, but since there are not usually many of them, it will hardly affect
26 * the overall result.
27 *
28 * This function tries to get a user memory page by pfn as described above.
29 */
30static struct page *page_idle_get_page(unsigned long pfn)
31{
32 struct page *page;
33 struct zone *zone;
34
35 if (!pfn_valid(pfn))
36 return NULL;
37
38 page = pfn_to_page(pfn);
39 if (!page || !PageLRU(page) ||
40 !get_page_unless_zero(page))
41 return NULL;
42
43 zone = page_zone(page);
44 spin_lock_irq(&zone->lru_lock);
45 if (unlikely(!PageLRU(page))) {
46 put_page(page);
47 page = NULL;
48 }
49 spin_unlock_irq(&zone->lru_lock);
50 return page;
51}
52
53static int page_idle_clear_pte_refs_one(struct page *page,
54 struct vm_area_struct *vma,
55 unsigned long addr, void *arg)
56{
57 struct mm_struct *mm = vma->vm_mm;
58 spinlock_t *ptl;
59 pmd_t *pmd;
60 pte_t *pte;
61 bool referenced = false;
62
63 if (unlikely(PageTransHuge(page))) {
64 pmd = page_check_address_pmd(page, mm, addr,
65 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
66 if (pmd) {
67 referenced = pmdp_clear_young_notify(vma, addr, pmd);
68 spin_unlock(ptl);
69 }
70 } else {
71 pte = page_check_address(page, mm, addr, &ptl, 0);
72 if (pte) {
73 referenced = ptep_clear_young_notify(vma, addr, pte);
74 pte_unmap_unlock(pte, ptl);
75 }
76 }
77 if (referenced) {
78 clear_page_idle(page);
79 /*
80 * We cleared the referenced bit in a mapping to this page. To
81 * avoid interference with page reclaim, mark it young so that
82 * page_referenced() will return > 0.
83 */
84 set_page_young(page);
85 }
86 return SWAP_AGAIN;
87}
88
89static void page_idle_clear_pte_refs(struct page *page)
90{
91 /*
92 * Since rwc.arg is unused, rwc is effectively immutable, so we
93 * can make it static const to save some cycles and stack.
94 */
95 static const struct rmap_walk_control rwc = {
96 .rmap_one = page_idle_clear_pte_refs_one,
97 .anon_lock = page_lock_anon_vma_read,
98 };
99 bool need_lock;
100
101 if (!page_mapped(page) ||
102 !page_rmapping(page))
103 return;
104
105 need_lock = !PageAnon(page) || PageKsm(page);
106 if (need_lock && !trylock_page(page))
107 return;
108
109 rmap_walk(page, (struct rmap_walk_control *)&rwc);
110
111 if (need_lock)
112 unlock_page(page);
113}
114
115static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
116 struct bin_attribute *attr, char *buf,
117 loff_t pos, size_t count)
118{
119 u64 *out = (u64 *)buf;
120 struct page *page;
121 unsigned long pfn, end_pfn;
122 int bit;
123
124 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
125 return -EINVAL;
126
127 pfn = pos * BITS_PER_BYTE;
128 if (pfn >= max_pfn)
129 return 0;
130
131 end_pfn = pfn + count * BITS_PER_BYTE;
132 if (end_pfn > max_pfn)
133 end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
134
135 for (; pfn < end_pfn; pfn++) {
136 bit = pfn % BITMAP_CHUNK_BITS;
137 if (!bit)
138 *out = 0ULL;
139 page = page_idle_get_page(pfn);
140 if (page) {
141 if (page_is_idle(page)) {
142 /*
143 * The page might have been referenced via a
144 * pte, in which case it is not idle. Clear
145 * refs and recheck.
146 */
147 page_idle_clear_pte_refs(page);
148 if (page_is_idle(page))
149 *out |= 1ULL << bit;
150 }
151 put_page(page);
152 }
153 if (bit == BITMAP_CHUNK_BITS - 1)
154 out++;
155 cond_resched();
156 }
157 return (char *)out - buf;
158}
159
160static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
161 struct bin_attribute *attr, char *buf,
162 loff_t pos, size_t count)
163{
164 const u64 *in = (u64 *)buf;
165 struct page *page;
166 unsigned long pfn, end_pfn;
167 int bit;
168
169 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE)
170 return -EINVAL;
171
172 pfn = pos * BITS_PER_BYTE;
173 if (pfn >= max_pfn)
174 return -ENXIO;
175
176 end_pfn = pfn + count * BITS_PER_BYTE;
177 if (end_pfn > max_pfn)
178 end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
179
180 for (; pfn < end_pfn; pfn++) {
181 bit = pfn % BITMAP_CHUNK_BITS;
182 if ((*in >> bit) & 1) {
183 page = page_idle_get_page(pfn);
184 if (page) {
185 page_idle_clear_pte_refs(page);
186 set_page_idle(page);
187 put_page(page);
188 }
189 }
190 if (bit == BITMAP_CHUNK_BITS - 1)
191 in++;
192 cond_resched();
193 }
194 return (char *)in - buf;
195}
196
197static struct bin_attribute page_idle_bitmap_attr =
198 __BIN_ATTR(bitmap, S_IRUSR | S_IWUSR,
199 page_idle_bitmap_read, page_idle_bitmap_write, 0);
200
201static struct bin_attribute *page_idle_bin_attrs[] = {
202 &page_idle_bitmap_attr,
203 NULL,
204};
205
206static struct attribute_group page_idle_attr_group = {
207 .bin_attrs = page_idle_bin_attrs,
208 .name = "page_idle",
209};
210
211#ifndef CONFIG_64BIT
212static bool need_page_idle(void)
213{
214 return true;
215}
216struct page_ext_operations page_idle_ops = {
217 .need = need_page_idle,
218};
219#endif
220
221static int __init page_idle_init(void)
222{
223 int err;
224
225 err = sysfs_create_group(mm_kobj, &page_idle_attr_group);
226 if (err) {
227 pr_err("page_idle: register sysfs failed\n");
228 return err;
229 }
230 return 0;
231}
232subsys_initcall(page_idle_init);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0db38e7d0a72..f5b5c1f3dcd7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -59,6 +59,7 @@
59#include <linux/migrate.h> 59#include <linux/migrate.h>
60#include <linux/hugetlb.h> 60#include <linux/hugetlb.h>
61#include <linux/backing-dev.h> 61#include <linux/backing-dev.h>
62#include <linux/page_idle.h>
62 63
63#include <asm/tlbflush.h> 64#include <asm/tlbflush.h>
64 65
@@ -886,6 +887,11 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
886 pte_unmap_unlock(pte, ptl); 887 pte_unmap_unlock(pte, ptl);
887 } 888 }
888 889
890 if (referenced)
891 clear_page_idle(page);
892 if (test_and_clear_page_young(page))
893 referenced++;
894
889 if (referenced) { 895 if (referenced) {
890 pra->referenced++; 896 pra->referenced++;
891 pra->vm_flags |= vma->vm_flags; 897 pra->vm_flags |= vma->vm_flags;
diff --git a/mm/swap.c b/mm/swap.c
index a3a0a2f1f7c3..983f692a47fd 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -32,6 +32,7 @@
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/uio.h> 33#include <linux/uio.h>
34#include <linux/hugetlb.h> 34#include <linux/hugetlb.h>
35#include <linux/page_idle.h>
35 36
36#include "internal.h" 37#include "internal.h"
37 38
@@ -622,6 +623,8 @@ void mark_page_accessed(struct page *page)
622 } else if (!PageReferenced(page)) { 623 } else if (!PageReferenced(page)) {
623 SetPageReferenced(page); 624 SetPageReferenced(page);
624 } 625 }
626 if (page_is_idle(page))
627 clear_page_idle(page);
625} 628}
626EXPORT_SYMBOL(mark_page_accessed); 629EXPORT_SYMBOL(mark_page_accessed);
627 630