diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 120 |
1 files changed, 57 insertions, 63 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 1fd4912a596c..46fdaa546b8d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -819,10 +819,10 @@ static unsigned long isolate_pages_global(unsigned long nr, | |||
819 | int active) | 819 | int active) |
820 | { | 820 | { |
821 | if (active) | 821 | if (active) |
822 | return isolate_lru_pages(nr, &z->active_list, dst, | 822 | return isolate_lru_pages(nr, &z->lru[LRU_ACTIVE].list, dst, |
823 | scanned, order, mode); | 823 | scanned, order, mode); |
824 | else | 824 | else |
825 | return isolate_lru_pages(nr, &z->inactive_list, dst, | 825 | return isolate_lru_pages(nr, &z->lru[LRU_INACTIVE].list, dst, |
826 | scanned, order, mode); | 826 | scanned, order, mode); |
827 | } | 827 | } |
828 | 828 | ||
@@ -973,10 +973,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
973 | VM_BUG_ON(PageLRU(page)); | 973 | VM_BUG_ON(PageLRU(page)); |
974 | SetPageLRU(page); | 974 | SetPageLRU(page); |
975 | list_del(&page->lru); | 975 | list_del(&page->lru); |
976 | if (PageActive(page)) | 976 | add_page_to_lru_list(zone, page, page_lru(page)); |
977 | add_page_to_active_list(zone, page); | ||
978 | else | ||
979 | add_page_to_inactive_list(zone, page); | ||
980 | if (!pagevec_add(&pvec, page)) { | 977 | if (!pagevec_add(&pvec, page)) { |
981 | spin_unlock_irq(&zone->lru_lock); | 978 | spin_unlock_irq(&zone->lru_lock); |
982 | __pagevec_release(&pvec); | 979 | __pagevec_release(&pvec); |
@@ -1144,8 +1141,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1144 | int pgdeactivate = 0; | 1141 | int pgdeactivate = 0; |
1145 | unsigned long pgscanned; | 1142 | unsigned long pgscanned; |
1146 | LIST_HEAD(l_hold); /* The pages which were snipped off */ | 1143 | LIST_HEAD(l_hold); /* The pages which were snipped off */ |
1147 | LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ | 1144 | LIST_HEAD(l_active); |
1148 | LIST_HEAD(l_active); /* Pages to go onto the active_list */ | 1145 | LIST_HEAD(l_inactive); |
1149 | struct page *page; | 1146 | struct page *page; |
1150 | struct pagevec pvec; | 1147 | struct pagevec pvec; |
1151 | int reclaim_mapped = 0; | 1148 | int reclaim_mapped = 0; |
@@ -1194,7 +1191,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1194 | VM_BUG_ON(!PageActive(page)); | 1191 | VM_BUG_ON(!PageActive(page)); |
1195 | ClearPageActive(page); | 1192 | ClearPageActive(page); |
1196 | 1193 | ||
1197 | list_move(&page->lru, &zone->inactive_list); | 1194 | list_move(&page->lru, &zone->lru[LRU_INACTIVE].list); |
1198 | mem_cgroup_move_lists(page, false); | 1195 | mem_cgroup_move_lists(page, false); |
1199 | pgmoved++; | 1196 | pgmoved++; |
1200 | if (!pagevec_add(&pvec, page)) { | 1197 | if (!pagevec_add(&pvec, page)) { |
@@ -1224,7 +1221,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1224 | SetPageLRU(page); | 1221 | SetPageLRU(page); |
1225 | VM_BUG_ON(!PageActive(page)); | 1222 | VM_BUG_ON(!PageActive(page)); |
1226 | 1223 | ||
1227 | list_move(&page->lru, &zone->active_list); | 1224 | list_move(&page->lru, &zone->lru[LRU_ACTIVE].list); |
1228 | mem_cgroup_move_lists(page, true); | 1225 | mem_cgroup_move_lists(page, true); |
1229 | pgmoved++; | 1226 | pgmoved++; |
1230 | if (!pagevec_add(&pvec, page)) { | 1227 | if (!pagevec_add(&pvec, page)) { |
@@ -1244,65 +1241,64 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1244 | pagevec_release(&pvec); | 1241 | pagevec_release(&pvec); |
1245 | } | 1242 | } |
1246 | 1243 | ||
1244 | static unsigned long shrink_list(enum lru_list l, unsigned long nr_to_scan, | ||
1245 | struct zone *zone, struct scan_control *sc, int priority) | ||
1246 | { | ||
1247 | if (l == LRU_ACTIVE) { | ||
1248 | shrink_active_list(nr_to_scan, zone, sc, priority); | ||
1249 | return 0; | ||
1250 | } | ||
1251 | return shrink_inactive_list(nr_to_scan, zone, sc); | ||
1252 | } | ||
1253 | |||
1247 | /* | 1254 | /* |
1248 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 1255 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
1249 | */ | 1256 | */ |
1250 | static unsigned long shrink_zone(int priority, struct zone *zone, | 1257 | static unsigned long shrink_zone(int priority, struct zone *zone, |
1251 | struct scan_control *sc) | 1258 | struct scan_control *sc) |
1252 | { | 1259 | { |
1253 | unsigned long nr_active; | 1260 | unsigned long nr[NR_LRU_LISTS]; |
1254 | unsigned long nr_inactive; | ||
1255 | unsigned long nr_to_scan; | 1261 | unsigned long nr_to_scan; |
1256 | unsigned long nr_reclaimed = 0; | 1262 | unsigned long nr_reclaimed = 0; |
1263 | enum lru_list l; | ||
1257 | 1264 | ||
1258 | if (scan_global_lru(sc)) { | 1265 | if (scan_global_lru(sc)) { |
1259 | /* | 1266 | /* |
1260 | * Add one to nr_to_scan just to make sure that the kernel | 1267 | * Add one to nr_to_scan just to make sure that the kernel |
1261 | * will slowly sift through the active list. | 1268 | * will slowly sift through the active list. |
1262 | */ | 1269 | */ |
1263 | zone->nr_scan_active += | 1270 | for_each_lru(l) { |
1264 | (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; | 1271 | zone->lru[l].nr_scan += (zone_page_state(zone, |
1265 | nr_active = zone->nr_scan_active; | 1272 | NR_LRU_BASE + l) >> priority) + 1; |
1266 | zone->nr_scan_inactive += | 1273 | nr[l] = zone->lru[l].nr_scan; |
1267 | (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; | 1274 | if (nr[l] >= sc->swap_cluster_max) |
1268 | nr_inactive = zone->nr_scan_inactive; | 1275 | zone->lru[l].nr_scan = 0; |
1269 | if (nr_inactive >= sc->swap_cluster_max) | 1276 | else |
1270 | zone->nr_scan_inactive = 0; | 1277 | nr[l] = 0; |
1271 | else | 1278 | } |
1272 | nr_inactive = 0; | ||
1273 | |||
1274 | if (nr_active >= sc->swap_cluster_max) | ||
1275 | zone->nr_scan_active = 0; | ||
1276 | else | ||
1277 | nr_active = 0; | ||
1278 | } else { | 1279 | } else { |
1279 | /* | 1280 | /* |
1280 | * This reclaim occurs not because zone memory shortage but | 1281 | * This reclaim occurs not because zone memory shortage but |
1281 | * because memory controller hits its limit. | 1282 | * because memory controller hits its limit. |
1282 | * Then, don't modify zone reclaim related data. | 1283 | * Then, don't modify zone reclaim related data. |
1283 | */ | 1284 | */ |
1284 | nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup, | 1285 | nr[LRU_ACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup, |
1285 | zone, priority); | 1286 | zone, priority, LRU_ACTIVE); |
1286 | 1287 | ||
1287 | nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup, | 1288 | nr[LRU_INACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup, |
1288 | zone, priority); | 1289 | zone, priority, LRU_INACTIVE); |
1289 | } | 1290 | } |
1290 | 1291 | ||
1291 | 1292 | while (nr[LRU_ACTIVE] || nr[LRU_INACTIVE]) { | |
1292 | while (nr_active || nr_inactive) { | 1293 | for_each_lru(l) { |
1293 | if (nr_active) { | 1294 | if (nr[l]) { |
1294 | nr_to_scan = min(nr_active, | 1295 | nr_to_scan = min(nr[l], |
1295 | (unsigned long)sc->swap_cluster_max); | 1296 | (unsigned long)sc->swap_cluster_max); |
1296 | nr_active -= nr_to_scan; | 1297 | nr[l] -= nr_to_scan; |
1297 | shrink_active_list(nr_to_scan, zone, sc, priority); | ||
1298 | } | ||
1299 | 1298 | ||
1300 | if (nr_inactive) { | 1299 | nr_reclaimed += shrink_list(l, nr_to_scan, |
1301 | nr_to_scan = min(nr_inactive, | 1300 | zone, sc, priority); |
1302 | (unsigned long)sc->swap_cluster_max); | 1301 | } |
1303 | nr_inactive -= nr_to_scan; | ||
1304 | nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, | ||
1305 | sc); | ||
1306 | } | 1302 | } |
1307 | } | 1303 | } |
1308 | 1304 | ||
@@ -1819,6 +1815,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1819 | { | 1815 | { |
1820 | struct zone *zone; | 1816 | struct zone *zone; |
1821 | unsigned long nr_to_scan, ret = 0; | 1817 | unsigned long nr_to_scan, ret = 0; |
1818 | enum lru_list l; | ||
1822 | 1819 | ||
1823 | for_each_zone(zone) { | 1820 | for_each_zone(zone) { |
1824 | 1821 | ||
@@ -1828,28 +1825,25 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1828 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) | 1825 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) |
1829 | continue; | 1826 | continue; |
1830 | 1827 | ||
1831 | /* For pass = 0 we don't shrink the active list */ | 1828 | for_each_lru(l) { |
1832 | if (pass > 0) { | 1829 | /* For pass = 0 we don't shrink the active list */ |
1833 | zone->nr_scan_active += | 1830 | if (pass == 0 && l == LRU_ACTIVE) |
1834 | (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; | 1831 | continue; |
1835 | if (zone->nr_scan_active >= nr_pages || pass > 3) { | 1832 | |
1836 | zone->nr_scan_active = 0; | 1833 | zone->lru[l].nr_scan += |
1834 | (zone_page_state(zone, NR_LRU_BASE + l) | ||
1835 | >> prio) + 1; | ||
1836 | if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { | ||
1837 | zone->lru[l].nr_scan = 0; | ||
1837 | nr_to_scan = min(nr_pages, | 1838 | nr_to_scan = min(nr_pages, |
1838 | zone_page_state(zone, NR_ACTIVE)); | 1839 | zone_page_state(zone, |
1839 | shrink_active_list(nr_to_scan, zone, sc, prio); | 1840 | NR_LRU_BASE + l)); |
1841 | ret += shrink_list(l, nr_to_scan, zone, | ||
1842 | sc, prio); | ||
1843 | if (ret >= nr_pages) | ||
1844 | return ret; | ||
1840 | } | 1845 | } |
1841 | } | 1846 | } |
1842 | |||
1843 | zone->nr_scan_inactive += | ||
1844 | (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; | ||
1845 | if (zone->nr_scan_inactive >= nr_pages || pass > 3) { | ||
1846 | zone->nr_scan_inactive = 0; | ||
1847 | nr_to_scan = min(nr_pages, | ||
1848 | zone_page_state(zone, NR_INACTIVE)); | ||
1849 | ret += shrink_inactive_list(nr_to_scan, zone, sc); | ||
1850 | if (ret >= nr_pages) | ||
1851 | return ret; | ||
1852 | } | ||
1853 | } | 1847 | } |
1854 | 1848 | ||
1855 | return ret; | 1849 | return ret; |