diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 81 |
1 files changed, 45 insertions, 36 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 5db32fdfaf39..1838c15ca4fd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -443,6 +443,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
443 | BUG_ON(PageActive(page)); | 443 | BUG_ON(PageActive(page)); |
444 | 444 | ||
445 | sc->nr_scanned++; | 445 | sc->nr_scanned++; |
446 | |||
447 | if (!sc->may_swap && page_mapped(page)) | ||
448 | goto keep_locked; | ||
449 | |||
446 | /* Double the slab pressure for mapped and swapcache pages */ | 450 | /* Double the slab pressure for mapped and swapcache pages */ |
447 | if (page_mapped(page) || PageSwapCache(page)) | 451 | if (page_mapped(page) || PageSwapCache(page)) |
448 | sc->nr_scanned++; | 452 | sc->nr_scanned++; |
@@ -1191,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) | |||
1191 | struct page *page; | 1195 | struct page *page; |
1192 | struct pagevec pvec; | 1196 | struct pagevec pvec; |
1193 | int reclaim_mapped = 0; | 1197 | int reclaim_mapped = 0; |
1194 | long mapped_ratio; | 1198 | |
1195 | long distress; | 1199 | if (unlikely(sc->may_swap)) { |
1196 | long swap_tendency; | 1200 | long mapped_ratio; |
1201 | long distress; | ||
1202 | long swap_tendency; | ||
1203 | |||
1204 | /* | ||
1205 | * `distress' is a measure of how much trouble we're having | ||
1206 | * reclaiming pages. 0 -> no problems. 100 -> great trouble. | ||
1207 | */ | ||
1208 | distress = 100 >> zone->prev_priority; | ||
1209 | |||
1210 | /* | ||
1211 | * The point of this algorithm is to decide when to start | ||
1212 | * reclaiming mapped memory instead of just pagecache. Work out | ||
1213 | * how much memory | ||
1214 | * is mapped. | ||
1215 | */ | ||
1216 | mapped_ratio = (sc->nr_mapped * 100) / total_memory; | ||
1217 | |||
1218 | /* | ||
1219 | * Now decide how much we really want to unmap some pages. The | ||
1220 | * mapped ratio is downgraded - just because there's a lot of | ||
1221 | * mapped memory doesn't necessarily mean that page reclaim | ||
1222 | * isn't succeeding. | ||
1223 | * | ||
1224 | * The distress ratio is important - we don't want to start | ||
1225 | * going oom. | ||
1226 | * | ||
1227 | * A 100% value of vm_swappiness overrides this algorithm | ||
1228 | * altogether. | ||
1229 | */ | ||
1230 | swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; | ||
1231 | |||
1232 | /* | ||
1233 | * Now use this metric to decide whether to start moving mapped | ||
1234 | * memory onto the inactive list. | ||
1235 | */ | ||
1236 | if (swap_tendency >= 100) | ||
1237 | reclaim_mapped = 1; | ||
1238 | } | ||
1197 | 1239 | ||
1198 | lru_add_drain(); | 1240 | lru_add_drain(); |
1199 | spin_lock_irq(&zone->lru_lock); | 1241 | spin_lock_irq(&zone->lru_lock); |
@@ -1203,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) | |||
1203 | zone->nr_active -= pgmoved; | 1245 | zone->nr_active -= pgmoved; |
1204 | spin_unlock_irq(&zone->lru_lock); | 1246 | spin_unlock_irq(&zone->lru_lock); |
1205 | 1247 | ||
1206 | /* | ||
1207 | * `distress' is a measure of how much trouble we're having reclaiming | ||
1208 | * pages. 0 -> no problems. 100 -> great trouble. | ||
1209 | */ | ||
1210 | distress = 100 >> zone->prev_priority; | ||
1211 | |||
1212 | /* | ||
1213 | * The point of this algorithm is to decide when to start reclaiming | ||
1214 | * mapped memory instead of just pagecache. Work out how much memory | ||
1215 | * is mapped. | ||
1216 | */ | ||
1217 | mapped_ratio = (sc->nr_mapped * 100) / total_memory; | ||
1218 | |||
1219 | /* | ||
1220 | * Now decide how much we really want to unmap some pages. The mapped | ||
1221 | * ratio is downgraded - just because there's a lot of mapped memory | ||
1222 | * doesn't necessarily mean that page reclaim isn't succeeding. | ||
1223 | * | ||
1224 | * The distress ratio is important - we don't want to start going oom. | ||
1225 | * | ||
1226 | * A 100% value of vm_swappiness overrides this algorithm altogether. | ||
1227 | */ | ||
1228 | swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; | ||
1229 | |||
1230 | /* | ||
1231 | * Now use this metric to decide whether to start moving mapped memory | ||
1232 | * onto the inactive list. | ||
1233 | */ | ||
1234 | if (swap_tendency >= 100) | ||
1235 | reclaim_mapped = 1; | ||
1236 | |||
1237 | while (!list_empty(&l_hold)) { | 1248 | while (!list_empty(&l_hold)) { |
1238 | cond_resched(); | 1249 | cond_resched(); |
1239 | page = lru_to_page(&l_hold); | 1250 | page = lru_to_page(&l_hold); |
@@ -1610,9 +1621,7 @@ scan: | |||
1610 | sc.nr_reclaimed = 0; | 1621 | sc.nr_reclaimed = 0; |
1611 | sc.priority = priority; | 1622 | sc.priority = priority; |
1612 | sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; | 1623 | sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; |
1613 | atomic_inc(&zone->reclaim_in_progress); | ||
1614 | shrink_zone(zone, &sc); | 1624 | shrink_zone(zone, &sc); |
1615 | atomic_dec(&zone->reclaim_in_progress); | ||
1616 | reclaim_state->reclaimed_slab = 0; | 1625 | reclaim_state->reclaimed_slab = 0; |
1617 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 1626 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, |
1618 | lru_pages); | 1627 | lru_pages); |