aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-10-18 23:26:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:50:25 -0400
commit7e9cd484204f9e5b316ed35b241abf088d76e0af (patch)
tree79f2567e7bb96af2d97d8d5407cc990e26eda95c /mm
parent556adecba110bf5f1db6c6b56416cfab5bcab698 (diff)
vmscan: fix pagecache reclaim referenced bit check
Moving referenced pages back to the head of the active list creates a huge scalability problem, because by the time a large memory system finally runs out of free memory, every single page in the system will have been referenced. Not only do we not have the time to scan every single page on the active list, but since they have will all have the referenced bit set, that bit conveys no useful information. A more scalable solution is to just move every page that hits the end of the active list to the inactive list. We clear the referenced bit off of mapped pages, which need just one reference to be moved back onto the active list. Unmapped pages will be moved back to the active list after two references (see mark_page_accessed). We preserve the PG_referenced flag on unmapped pages to preserve accesses that were made while the page was on the active list. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c45
1 files changed, 13 insertions, 32 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c82ee9a33cfc..9588973849d0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1064,7 +1064,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1064 int pgdeactivate = 0; 1064 int pgdeactivate = 0;
1065 unsigned long pgscanned; 1065 unsigned long pgscanned;
1066 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1066 LIST_HEAD(l_hold); /* The pages which were snipped off */
1067 LIST_HEAD(l_active);
1068 LIST_HEAD(l_inactive); 1067 LIST_HEAD(l_inactive);
1069 struct page *page; 1068 struct page *page;
1070 struct pagevec pvec; 1069 struct pagevec pvec;
@@ -1095,21 +1094,28 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1095 cond_resched(); 1094 cond_resched();
1096 page = lru_to_page(&l_hold); 1095 page = lru_to_page(&l_hold);
1097 list_del(&page->lru); 1096 list_del(&page->lru);
1097
1098 /* page_referenced clears PageReferenced */
1099 if (page_mapping_inuse(page) &&
1100 page_referenced(page, 0, sc->mem_cgroup))
1101 pgmoved++;
1102
1098 list_add(&page->lru, &l_inactive); 1103 list_add(&page->lru, &l_inactive);
1099 } 1104 }
1100 1105
1101 /* 1106 /*
1102 * Count the referenced pages as rotated, even when they are moved 1107 * Count referenced pages from currently used mappings as
1103 * to the inactive list. This helps balance scan pressure between 1108 * rotated, even though they are moved to the inactive list.
1104 * file and anonymous pages in get_scan_ratio. 1109 * This helps balance scan pressure between file and anonymous
1105 */ 1110 * pages in get_scan_ratio.
1111 */
1106 zone->recent_rotated[!!file] += pgmoved; 1112 zone->recent_rotated[!!file] += pgmoved;
1107 1113
1108 /* 1114 /*
1109 * Now put the pages back on the appropriate [file or anon] inactive 1115 * Move the pages to the [file or anon] inactive list.
1110 * and active lists.
1111 */ 1116 */
1112 pagevec_init(&pvec, 1); 1117 pagevec_init(&pvec, 1);
1118
1113 pgmoved = 0; 1119 pgmoved = 0;
1114 lru = LRU_BASE + file * LRU_FILE; 1120 lru = LRU_BASE + file * LRU_FILE;
1115 spin_lock_irq(&zone->lru_lock); 1121 spin_lock_irq(&zone->lru_lock);
@@ -1142,31 +1148,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1142 pagevec_strip(&pvec); 1148 pagevec_strip(&pvec);
1143 spin_lock_irq(&zone->lru_lock); 1149 spin_lock_irq(&zone->lru_lock);
1144 } 1150 }
1145
1146 pgmoved = 0;
1147 lru = LRU_ACTIVE + file * LRU_FILE;
1148 while (!list_empty(&l_active)) {
1149 page = lru_to_page(&l_active);
1150 prefetchw_prev_lru_page(page, &l_active, flags);
1151 VM_BUG_ON(PageLRU(page));
1152 SetPageLRU(page);
1153 VM_BUG_ON(!PageActive(page));
1154
1155 list_move(&page->lru, &zone->lru[lru].list);
1156 mem_cgroup_move_lists(page, true);
1157 pgmoved++;
1158 if (!pagevec_add(&pvec, page)) {
1159 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1160 pgmoved = 0;
1161 spin_unlock_irq(&zone->lru_lock);
1162 if (vm_swap_full())
1163 pagevec_swap_free(&pvec);
1164 __pagevec_release(&pvec);
1165 spin_lock_irq(&zone->lru_lock);
1166 }
1167 }
1168 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1169
1170 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1151 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1171 __count_vm_events(PGDEACTIVATE, pgdeactivate); 1152 __count_vm_events(PGDEACTIVATE, pgdeactivate);
1172 spin_unlock_irq(&zone->lru_lock); 1153 spin_unlock_irq(&zone->lru_lock);