aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-10-18 23:26:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:50:25 -0400
commit4f98a2fee8acdb4ac84545df98cccecfd130f8db (patch)
tree035a2937f4c3e2f7b4269412041c073ac646937c /mm/swap.c
parentb2e185384f534781fd22f5ce170b2ad26f97df70 (diff)
vmscan: split LRU lists into anon & file sets
Split the LRU lists in two, one set for pages that are backed by real file systems ("file") and one for pages that are backed by memory and swap ("anon"). The latter includes tmpfs. The advantage of doing this is that the VM will not have to scan over lots of anonymous pages (which we generally do not want to swap out), just to find the page cache pages that it should evict. This patch has the infrastructure and a basic policy to balance how much we scan the anon lists and how much we scan the file lists. The big policy changes are in separate patches. [lee.schermerhorn@hp.com: collect lru meminfo statistics from correct offset] [kosaki.motohiro@jp.fujitsu.com: prevent incorrect oom under split_lru] [kosaki.motohiro@jp.fujitsu.com: fix pagevec_move_tail() doesn't treat unevictable page] [hugh@veritas.com: memcg swapbacked pages active] [hugh@veritas.com: splitlru: BDI_CAP_SWAP_BACKED] [akpm@linux-foundation.org: fix /proc/vmstat units] [nishimura@mxp.nes.nec.co.jp: memcg: fix handling of shmem migration] [kosaki.motohiro@jp.fujitsu.com: adjust Quicklists field of /proc/meminfo] [kosaki.motohiro@jp.fujitsu.com: fix style issue of get_scan_ratio()] Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 88a394872677..0b1974a08974 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -116,7 +116,8 @@ static void pagevec_move_tail(struct pagevec *pvec)
116 spin_lock(&zone->lru_lock); 116 spin_lock(&zone->lru_lock);
117 } 117 }
118 if (PageLRU(page) && !PageActive(page)) { 118 if (PageLRU(page) && !PageActive(page)) {
119 list_move_tail(&page->lru, &zone->lru[LRU_INACTIVE].list); 119 int lru = page_is_file_cache(page);
120 list_move_tail(&page->lru, &zone->lru[lru].list);
120 pgmoved++; 121 pgmoved++;
121 } 122 }
122 } 123 }
@@ -157,11 +158,18 @@ void activate_page(struct page *page)
157 158
158 spin_lock_irq(&zone->lru_lock); 159 spin_lock_irq(&zone->lru_lock);
159 if (PageLRU(page) && !PageActive(page)) { 160 if (PageLRU(page) && !PageActive(page)) {
160 del_page_from_inactive_list(zone, page); 161 int file = page_is_file_cache(page);
162 int lru = LRU_BASE + file;
163 del_page_from_lru_list(zone, page, lru);
164
161 SetPageActive(page); 165 SetPageActive(page);
162 add_page_to_active_list(zone, page); 166 lru += LRU_ACTIVE;
167 add_page_to_lru_list(zone, page, lru);
163 __count_vm_event(PGACTIVATE); 168 __count_vm_event(PGACTIVATE);
164 mem_cgroup_move_lists(page, true); 169 mem_cgroup_move_lists(page, true);
170
171 zone->recent_rotated[!!file]++;
172 zone->recent_scanned[!!file]++;
165 } 173 }
166 spin_unlock_irq(&zone->lru_lock); 174 spin_unlock_irq(&zone->lru_lock);
167} 175}