aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-02-11 20:55:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-12 00:41:11 -0500
commit2903fb1694dcb08a3c1d9d823cfae7ba30e66cd3 (patch)
tree7d794e0997e935033354f3c3ffd81c1f31354db3 /mm/vmscan.c
parent072eaa5d9cc3e63f567ffd9ad87b36194fdd8010 (diff)
[PATCH] vmscan: skip reclaim_mapped determination if we do not swap
This puts the variables and the way to get to reclaim_mapped in one block. And allows zone_reclaim or other things to skip the determination (maybe this whole block of code does not belong into refill_inactive_zone()?) Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c75
1 files changed, 41 insertions, 34 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 58ed5125b1a7..1838c15ca4fd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1195,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
1195 struct page *page; 1195 struct page *page;
1196 struct pagevec pvec; 1196 struct pagevec pvec;
1197 int reclaim_mapped = 0; 1197 int reclaim_mapped = 0;
1198 long mapped_ratio; 1198
1199 long distress; 1199 if (unlikely(sc->may_swap)) {
1200 long swap_tendency; 1200 long mapped_ratio;
1201 long distress;
1202 long swap_tendency;
1203
1204 /*
1205 * `distress' is a measure of how much trouble we're having
1206 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
1207 */
1208 distress = 100 >> zone->prev_priority;
1209
1210 /*
1211 * The point of this algorithm is to decide when to start
1212 * reclaiming mapped memory instead of just pagecache. Work out
1213 * how much memory
1214 * is mapped.
1215 */
1216 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1217
1218 /*
1219 * Now decide how much we really want to unmap some pages. The
1220 * mapped ratio is downgraded - just because there's a lot of
1221 * mapped memory doesn't necessarily mean that page reclaim
1222 * isn't succeeding.
1223 *
1224 * The distress ratio is important - we don't want to start
1225 * going oom.
1226 *
1227 * A 100% value of vm_swappiness overrides this algorithm
1228 * altogether.
1229 */
1230 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1231
1232 /*
1233 * Now use this metric to decide whether to start moving mapped
1234 * memory onto the inactive list.
1235 */
1236 if (swap_tendency >= 100)
1237 reclaim_mapped = 1;
1238 }
1201 1239
1202 lru_add_drain(); 1240 lru_add_drain();
1203 spin_lock_irq(&zone->lru_lock); 1241 spin_lock_irq(&zone->lru_lock);
@@ -1207,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
1207 zone->nr_active -= pgmoved; 1245 zone->nr_active -= pgmoved;
1208 spin_unlock_irq(&zone->lru_lock); 1246 spin_unlock_irq(&zone->lru_lock);
1209 1247
1210 /*
1211 * `distress' is a measure of how much trouble we're having reclaiming
1212 * pages. 0 -> no problems. 100 -> great trouble.
1213 */
1214 distress = 100 >> zone->prev_priority;
1215
1216 /*
1217 * The point of this algorithm is to decide when to start reclaiming
1218 * mapped memory instead of just pagecache. Work out how much memory
1219 * is mapped.
1220 */
1221 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1222
1223 /*
1224 * Now decide how much we really want to unmap some pages. The mapped
1225 * ratio is downgraded - just because there's a lot of mapped memory
1226 * doesn't necessarily mean that page reclaim isn't succeeding.
1227 *
1228 * The distress ratio is important - we don't want to start going oom.
1229 *
1230 * A 100% value of vm_swappiness overrides this algorithm altogether.
1231 */
1232 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1233
1234 /*
1235 * Now use this metric to decide whether to start moving mapped memory
1236 * onto the inactive list.
1237 */
1238 if (swap_tendency >= 100 && sc->may_swap)
1239 reclaim_mapped = 1;
1240
1241 while (!list_empty(&l_hold)) { 1248 while (!list_empty(&l_hold)) {
1242 cond_resched(); 1249 cond_resched();
1243 page = lru_to_page(&l_hold); 1250 page = lru_to_page(&l_hold);