summaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit23047a96d7cfcfca1a6d026ecaec526ea4803e9e (patch)
tree3c90e27cc6dcb6a386a54c503bbb0860e828509b /include/linux/mmzone.h
parent612e44939c3c77245ac80843c0c7876c8cf97282 (diff)
mm: workingset: per-cgroup cache thrash detection
Cache thrash detection (see a528910e12ec "mm: thrash detection-based file cache sizing" for details) currently only works on the system level, not inside cgroups. Worse, as the refaults are compared to the global number of active cache, cgroups might wrongfully get all their refaults activated when their pages are hotter than those of others. Move the refault machinery from the zone to the lruvec, and then tag eviction entries with the memcg ID. This makes the thrash detection work correctly inside cgroups. [sergey.senozhatsky@gmail.com: do not return from workingset_activation() with locked rcu and page] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h13
1 files changed, 7 insertions, 6 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9fc23ab550a7..03cbdd906f55 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -212,10 +212,12 @@ struct zone_reclaim_stat {
212}; 212};
213 213
214struct lruvec { 214struct lruvec {
215 struct list_head lists[NR_LRU_LISTS]; 215 struct list_head lists[NR_LRU_LISTS];
216 struct zone_reclaim_stat reclaim_stat; 216 struct zone_reclaim_stat reclaim_stat;
217 /* Evictions & activations on the inactive file list */
218 atomic_long_t inactive_age;
217#ifdef CONFIG_MEMCG 219#ifdef CONFIG_MEMCG
218 struct zone *zone; 220 struct zone *zone;
219#endif 221#endif
220}; 222};
221 223
@@ -490,9 +492,6 @@ struct zone {
490 spinlock_t lru_lock; 492 spinlock_t lru_lock;
491 struct lruvec lruvec; 493 struct lruvec lruvec;
492 494
493 /* Evictions & activations on the inactive file list */
494 atomic_long_t inactive_age;
495
496 /* 495 /*
497 * When free pages are below this point, additional steps are taken 496 * When free pages are below this point, additional steps are taken
498 * when reading the number of free pages to avoid per-cpu counter 497 * when reading the number of free pages to avoid per-cpu counter
@@ -761,6 +760,8 @@ static inline struct zone *lruvec_zone(struct lruvec *lruvec)
761#endif 760#endif
762} 761}
763 762
763extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
764
764#ifdef CONFIG_HAVE_MEMORY_PRESENT 765#ifdef CONFIG_HAVE_MEMORY_PRESENT
765void memory_present(int nid, unsigned long start, unsigned long end); 766void memory_present(int nid, unsigned long start, unsigned long end);
766#else 767#else