diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-08-09 20:19:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 23:45:00 -0400 |
commit | e247dbce5cc747a714e8dcbd6b3f442cc2a284cf (patch) | |
tree | 9ade331a0be10aab1a67160b6feebc2ef06d5878 /mm/vmscan.c | |
parent | 25edde0332916ae706ccf83de688be57bcc844b7 (diff) |
vmscan: simplify shrink_inactive_list()
Now, max_scan of shrink_inactive_list() is always passed less than
SWAP_CLUSTER_MAX. then, we can remove scanning pages loop in it. This
patch also help stack diet.
detail
- remove "while (nr_scanned < max_scan)" loop
- remove nr_freed (now, we use nr_reclaimed directly)
- remove nr_scan (now, we use nr_scanned directly)
- rename max_scan to nr_to_scan
- pass nr_to_scan into isolate_pages() directly instead
using SWAP_CLUSTER_MAX
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 212 |
1 files changed, 102 insertions, 110 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 594eba8a44c0..8522327bd04f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1136,15 +1136,21 @@ static int too_many_isolated(struct zone *zone, int file, | |||
1136 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number | 1136 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number |
1137 | * of reclaimed pages | 1137 | * of reclaimed pages |
1138 | */ | 1138 | */ |
1139 | static unsigned long shrink_inactive_list(unsigned long max_scan, | 1139 | static unsigned long shrink_inactive_list(unsigned long nr_to_scan, |
1140 | struct zone *zone, struct scan_control *sc, | 1140 | struct zone *zone, struct scan_control *sc, |
1141 | int priority, int file) | 1141 | int priority, int file) |
1142 | { | 1142 | { |
1143 | LIST_HEAD(page_list); | 1143 | LIST_HEAD(page_list); |
1144 | struct pagevec pvec; | 1144 | struct pagevec pvec; |
1145 | unsigned long nr_scanned = 0; | 1145 | unsigned long nr_scanned; |
1146 | unsigned long nr_reclaimed = 0; | 1146 | unsigned long nr_reclaimed = 0; |
1147 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | 1147 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); |
1148 | struct page *page; | ||
1149 | unsigned long nr_taken; | ||
1150 | unsigned long nr_active; | ||
1151 | unsigned int count[NR_LRU_LISTS] = { 0, }; | ||
1152 | unsigned long nr_anon; | ||
1153 | unsigned long nr_file; | ||
1148 | 1154 | ||
1149 | while (unlikely(too_many_isolated(zone, file, sc))) { | 1155 | while (unlikely(too_many_isolated(zone, file, sc))) { |
1150 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1156 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
@@ -1159,129 +1165,115 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1159 | 1165 | ||
1160 | lru_add_drain(); | 1166 | lru_add_drain(); |
1161 | spin_lock_irq(&zone->lru_lock); | 1167 | spin_lock_irq(&zone->lru_lock); |
1162 | do { | ||
1163 | struct page *page; | ||
1164 | unsigned long nr_taken; | ||
1165 | unsigned long nr_scan; | ||
1166 | unsigned long nr_freed; | ||
1167 | unsigned long nr_active; | ||
1168 | unsigned int count[NR_LRU_LISTS] = { 0, }; | ||
1169 | int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE; | ||
1170 | unsigned long nr_anon; | ||
1171 | unsigned long nr_file; | ||
1172 | 1168 | ||
1173 | if (scanning_global_lru(sc)) { | 1169 | if (scanning_global_lru(sc)) { |
1174 | nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX, | 1170 | nr_taken = isolate_pages_global(nr_to_scan, |
1175 | &page_list, &nr_scan, | 1171 | &page_list, &nr_scanned, sc->order, |
1176 | sc->order, mode, | 1172 | sc->lumpy_reclaim_mode ? |
1177 | zone, 0, file); | 1173 | ISOLATE_BOTH : ISOLATE_INACTIVE, |
1178 | zone->pages_scanned += nr_scan; | 1174 | zone, 0, file); |
1179 | if (current_is_kswapd()) | 1175 | zone->pages_scanned += nr_scanned; |
1180 | __count_zone_vm_events(PGSCAN_KSWAPD, zone, | 1176 | if (current_is_kswapd()) |
1181 | nr_scan); | 1177 | __count_zone_vm_events(PGSCAN_KSWAPD, zone, |
1182 | else | 1178 | nr_scanned); |
1183 | __count_zone_vm_events(PGSCAN_DIRECT, zone, | 1179 | else |
1184 | nr_scan); | 1180 | __count_zone_vm_events(PGSCAN_DIRECT, zone, |
1185 | } else { | 1181 | nr_scanned); |
1186 | nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX, | 1182 | } else { |
1187 | &page_list, &nr_scan, | 1183 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, |
1188 | sc->order, mode, | 1184 | &page_list, &nr_scanned, sc->order, |
1189 | zone, sc->mem_cgroup, | 1185 | sc->lumpy_reclaim_mode ? |
1190 | 0, file); | 1186 | ISOLATE_BOTH : ISOLATE_INACTIVE, |
1191 | /* | 1187 | zone, sc->mem_cgroup, |
1192 | * mem_cgroup_isolate_pages() keeps track of | 1188 | 0, file); |
1193 | * scanned pages on its own. | 1189 | /* |
1194 | */ | 1190 | * mem_cgroup_isolate_pages() keeps track of |
1195 | } | 1191 | * scanned pages on its own. |
1192 | */ | ||
1193 | } | ||
1196 | 1194 | ||
1197 | if (nr_taken == 0) | 1195 | if (nr_taken == 0) |
1198 | goto done; | 1196 | goto done; |
1199 | 1197 | ||
1200 | nr_active = clear_active_flags(&page_list, count); | 1198 | nr_active = clear_active_flags(&page_list, count); |
1201 | __count_vm_events(PGDEACTIVATE, nr_active); | 1199 | __count_vm_events(PGDEACTIVATE, nr_active); |
1202 | 1200 | ||
1203 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, | 1201 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, |
1204 | -count[LRU_ACTIVE_FILE]); | 1202 | -count[LRU_ACTIVE_FILE]); |
1205 | __mod_zone_page_state(zone, NR_INACTIVE_FILE, | 1203 | __mod_zone_page_state(zone, NR_INACTIVE_FILE, |
1206 | -count[LRU_INACTIVE_FILE]); | 1204 | -count[LRU_INACTIVE_FILE]); |
1207 | __mod_zone_page_state(zone, NR_ACTIVE_ANON, | 1205 | __mod_zone_page_state(zone, NR_ACTIVE_ANON, |
1208 | -count[LRU_ACTIVE_ANON]); | 1206 | -count[LRU_ACTIVE_ANON]); |
1209 | __mod_zone_page_state(zone, NR_INACTIVE_ANON, | 1207 | __mod_zone_page_state(zone, NR_INACTIVE_ANON, |
1210 | -count[LRU_INACTIVE_ANON]); | 1208 | -count[LRU_INACTIVE_ANON]); |
1211 | 1209 | ||
1212 | nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; | 1210 | nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; |
1213 | nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; | 1211 | nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; |
1214 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); | 1212 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); |
1215 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); | 1213 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); |
1216 | 1214 | ||
1217 | reclaim_stat->recent_scanned[0] += nr_anon; | 1215 | reclaim_stat->recent_scanned[0] += nr_anon; |
1218 | reclaim_stat->recent_scanned[1] += nr_file; | 1216 | reclaim_stat->recent_scanned[1] += nr_file; |
1219 | 1217 | ||
1220 | spin_unlock_irq(&zone->lru_lock); | 1218 | spin_unlock_irq(&zone->lru_lock); |
1221 | 1219 | ||
1222 | nr_scanned += nr_scan; | 1220 | nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); |
1223 | nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); | 1221 | |
1222 | /* | ||
1223 | * If we are direct reclaiming for contiguous pages and we do | ||
1224 | * not reclaim everything in the list, try again and wait | ||
1225 | * for IO to complete. This will stall high-order allocations | ||
1226 | * but that should be acceptable to the caller | ||
1227 | */ | ||
1228 | if (nr_reclaimed < nr_taken && !current_is_kswapd() && | ||
1229 | sc->lumpy_reclaim_mode) { | ||
1230 | congestion_wait(BLK_RW_ASYNC, HZ/10); | ||
1224 | 1231 | ||
1225 | /* | 1232 | /* |
1226 | * If we are direct reclaiming for contiguous pages and we do | 1233 | * The attempt at page out may have made some |
1227 | * not reclaim everything in the list, try again and wait | 1234 | * of the pages active, mark them inactive again. |
1228 | * for IO to complete. This will stall high-order allocations | ||
1229 | * but that should be acceptable to the caller | ||
1230 | */ | 1235 | */ |
1231 | if (nr_freed < nr_taken && !current_is_kswapd() && | 1236 | nr_active = clear_active_flags(&page_list, count); |
1232 | sc->lumpy_reclaim_mode) { | 1237 | count_vm_events(PGDEACTIVATE, nr_active); |
1233 | congestion_wait(BLK_RW_ASYNC, HZ/10); | ||
1234 | |||
1235 | /* | ||
1236 | * The attempt at page out may have made some | ||
1237 | * of the pages active, mark them inactive again. | ||
1238 | */ | ||
1239 | nr_active = clear_active_flags(&page_list, count); | ||
1240 | count_vm_events(PGDEACTIVATE, nr_active); | ||
1241 | |||
1242 | nr_freed += shrink_page_list(&page_list, sc, | ||
1243 | PAGEOUT_IO_SYNC); | ||
1244 | } | ||
1245 | 1238 | ||
1246 | nr_reclaimed += nr_freed; | 1239 | nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC); |
1240 | } | ||
1247 | 1241 | ||
1248 | local_irq_disable(); | 1242 | local_irq_disable(); |
1249 | if (current_is_kswapd()) | 1243 | if (current_is_kswapd()) |
1250 | __count_vm_events(KSWAPD_STEAL, nr_freed); | 1244 | __count_vm_events(KSWAPD_STEAL, nr_reclaimed); |
1251 | __count_zone_vm_events(PGSTEAL, zone, nr_freed); | 1245 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); |
1252 | 1246 | ||
1253 | spin_lock(&zone->lru_lock); | 1247 | spin_lock(&zone->lru_lock); |
1254 | /* | 1248 | /* |
1255 | * Put back any unfreeable pages. | 1249 | * Put back any unfreeable pages. |
1256 | */ | 1250 | */ |
1257 | while (!list_empty(&page_list)) { | 1251 | while (!list_empty(&page_list)) { |
1258 | int lru; | 1252 | int lru; |
1259 | page = lru_to_page(&page_list); | 1253 | page = lru_to_page(&page_list); |
1260 | VM_BUG_ON(PageLRU(page)); | 1254 | VM_BUG_ON(PageLRU(page)); |
1261 | list_del(&page->lru); | 1255 | list_del(&page->lru); |
1262 | if (unlikely(!page_evictable(page, NULL))) { | 1256 | if (unlikely(!page_evictable(page, NULL))) { |
1263 | spin_unlock_irq(&zone->lru_lock); | 1257 | spin_unlock_irq(&zone->lru_lock); |
1264 | putback_lru_page(page); | 1258 | putback_lru_page(page); |
1265 | spin_lock_irq(&zone->lru_lock); | 1259 | spin_lock_irq(&zone->lru_lock); |
1266 | continue; | 1260 | continue; |
1267 | } | ||
1268 | SetPageLRU(page); | ||
1269 | lru = page_lru(page); | ||
1270 | add_page_to_lru_list(zone, page, lru); | ||
1271 | if (is_active_lru(lru)) { | ||
1272 | int file = is_file_lru(lru); | ||
1273 | reclaim_stat->recent_rotated[file]++; | ||
1274 | } | ||
1275 | if (!pagevec_add(&pvec, page)) { | ||
1276 | spin_unlock_irq(&zone->lru_lock); | ||
1277 | __pagevec_release(&pvec); | ||
1278 | spin_lock_irq(&zone->lru_lock); | ||
1279 | } | ||
1280 | } | 1261 | } |
1281 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); | 1262 | SetPageLRU(page); |
1282 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); | 1263 | lru = page_lru(page); |
1283 | 1264 | add_page_to_lru_list(zone, page, lru); | |
1284 | } while (nr_scanned < max_scan); | 1265 | if (is_active_lru(lru)) { |
1266 | int file = is_file_lru(lru); | ||
1267 | reclaim_stat->recent_rotated[file]++; | ||
1268 | } | ||
1269 | if (!pagevec_add(&pvec, page)) { | ||
1270 | spin_unlock_irq(&zone->lru_lock); | ||
1271 | __pagevec_release(&pvec); | ||
1272 | spin_lock_irq(&zone->lru_lock); | ||
1273 | } | ||
1274 | } | ||
1275 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); | ||
1276 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); | ||
1285 | 1277 | ||
1286 | done: | 1278 | done: |
1287 | spin_unlock_irq(&zone->lru_lock); | 1279 | spin_unlock_irq(&zone->lru_lock); |