aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-10-18 23:26:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:50:25 -0400
commit556adecba110bf5f1db6c6b56416cfab5bcab698 (patch)
treea721d84d28c4d99a54632b472b452ea3d4b2b137 /include/linux
parent4f98a2fee8acdb4ac84545df98cccecfd130f8db (diff)
vmscan: second chance replacement for anonymous pages
We avoid evicting and scanning anonymous pages for the most part, but under some workloads we can end up with most of memory filled with anonymous pages. At that point, we suddenly need to clear the referenced bits on all of memory, which can take ages on very large memory systems. We can reduce the maximum number of pages that need to be scanned by not taking the referenced state into account when deactivating an anonymous page. After all, every anonymous page starts out referenced, so why check? If an anonymous page gets referenced again before it reaches the end of the inactive list, we move it back to the active list. To keep the maximum amount of necessary work reasonable, we scale the active to inactive ratio with the size of memory, using the formula active:inactive ratio = sqrt(memory in GB * 10). Kswapd CPU use now seems to scale by the amount of pageout bandwidth, instead of by the amount of memory present in the system. [kamezawa.hiroyu@jp.fujitsu.com: fix OOM with memcg] [kamezawa.hiroyu@jp.fujitsu.com: memcg: lru scan fix] Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm_inline.h19
-rw-r--r--include/linux/mmzone.h6
2 files changed, 25 insertions, 0 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 2eb599465d56..f451fedd1e75 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -117,4 +117,23 @@ static inline enum lru_list page_lru(struct page *page)
117 return lru; 117 return lru;
118} 118}
119 119
120/**
121 * inactive_anon_is_low - check if anonymous pages need to be deactivated
122 * @zone: zone to check
123 *
124 * Returns true if the zone does not have enough inactive anon pages,
125 * meaning some active anon pages need to be deactivated.
126 */
127static inline int inactive_anon_is_low(struct zone *zone)
128{
129 unsigned long active, inactive;
130
131 active = zone_page_state(zone, NR_ACTIVE_ANON);
132 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
133
134 if (inactive * zone->inactive_ratio < active)
135 return 1;
136
137 return 0;
138}
120#endif 139#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 59a4c8fd6ebd..9c5111f49a32 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -323,6 +323,12 @@ struct zone {
323 */ 323 */
324 int prev_priority; 324 int prev_priority;
325 325
326 /*
327 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
328 * this zone's LRU. Maintained by the pageout code.
329 */
330 unsigned int inactive_ratio;
331
326 332
327 ZONE_PADDING(_pad2_) 333 ZONE_PADDING(_pad2_)
328 /* Rarely used or read-mostly fields */ 334 /* Rarely used or read-mostly fields */