aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 05d439dc1af9..eeb3bc9d1d36 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1031,6 +1031,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1031 1031
1032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1033 struct page *page; 1033 struct page *page;
1034 int nr_pages;
1034 1035
1035 page = lru_to_page(src); 1036 page = lru_to_page(src);
1036 prefetchw_prev_lru_page(page, src, flags); 1037 prefetchw_prev_lru_page(page, src, flags);
@@ -1039,9 +1040,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1039 1040
1040 switch (__isolate_lru_page(page, mode)) { 1041 switch (__isolate_lru_page(page, mode)) {
1041 case 0: 1042 case 0:
1042 mem_cgroup_lru_del_list(page, lru); 1043 nr_pages = hpage_nr_pages(page);
1044 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1043 list_move(&page->lru, dst); 1045 list_move(&page->lru, dst);
1044 nr_taken += hpage_nr_pages(page); 1046 nr_taken += nr_pages;
1045 break; 1047 break;
1046 1048
1047 case -EBUSY: 1049 case -EBUSY:
@@ -1093,15 +1095,16 @@ int isolate_lru_page(struct page *page)
1093 1095
1094 if (PageLRU(page)) { 1096 if (PageLRU(page)) {
1095 struct zone *zone = page_zone(page); 1097 struct zone *zone = page_zone(page);
1098 struct lruvec *lruvec;
1096 1099
1097 spin_lock_irq(&zone->lru_lock); 1100 spin_lock_irq(&zone->lru_lock);
1101 lruvec = mem_cgroup_page_lruvec(page, zone);
1098 if (PageLRU(page)) { 1102 if (PageLRU(page)) {
1099 int lru = page_lru(page); 1103 int lru = page_lru(page);
1100 ret = 0;
1101 get_page(page); 1104 get_page(page);
1102 ClearPageLRU(page); 1105 ClearPageLRU(page);
1103 1106 del_page_from_lru_list(page, lruvec, lru);
1104 del_page_from_lru_list(zone, page, lru); 1107 ret = 0;
1105 } 1108 }
1106 spin_unlock_irq(&zone->lru_lock); 1109 spin_unlock_irq(&zone->lru_lock);
1107 } 1110 }
@@ -1155,9 +1158,13 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1155 spin_lock_irq(&zone->lru_lock); 1158 spin_lock_irq(&zone->lru_lock);
1156 continue; 1159 continue;
1157 } 1160 }
1161
1162 lruvec = mem_cgroup_page_lruvec(page, zone);
1163
1158 SetPageLRU(page); 1164 SetPageLRU(page);
1159 lru = page_lru(page); 1165 lru = page_lru(page);
1160 add_page_to_lru_list(zone, page, lru); 1166 add_page_to_lru_list(page, lruvec, lru);
1167
1161 if (is_active_lru(lru)) { 1168 if (is_active_lru(lru)) {
1162 int file = is_file_lru(lru); 1169 int file = is_file_lru(lru);
1163 int numpages = hpage_nr_pages(page); 1170 int numpages = hpage_nr_pages(page);
@@ -1166,7 +1173,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1166 if (put_page_testzero(page)) { 1173 if (put_page_testzero(page)) {
1167 __ClearPageLRU(page); 1174 __ClearPageLRU(page);
1168 __ClearPageActive(page); 1175 __ClearPageActive(page);
1169 del_page_from_lru_list(zone, page, lru); 1176 del_page_from_lru_list(page, lruvec, lru);
1170 1177
1171 if (unlikely(PageCompound(page))) { 1178 if (unlikely(PageCompound(page))) {
1172 spin_unlock_irq(&zone->lru_lock); 1179 spin_unlock_irq(&zone->lru_lock);
@@ -1314,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1314 * But we had to alter page->flags anyway. 1321 * But we had to alter page->flags anyway.
1315 */ 1322 */
1316 1323
1317static void move_active_pages_to_lru(struct zone *zone, 1324static void move_active_pages_to_lru(struct lruvec *lruvec,
1318 struct list_head *list, 1325 struct list_head *list,
1319 struct list_head *pages_to_free, 1326 struct list_head *pages_to_free,
1320 enum lru_list lru) 1327 enum lru_list lru)
1321{ 1328{
1329 struct zone *zone = lruvec_zone(lruvec);
1322 unsigned long pgmoved = 0; 1330 unsigned long pgmoved = 0;
1323 struct page *page; 1331 struct page *page;
1332 int nr_pages;
1324 1333
1325 while (!list_empty(list)) { 1334 while (!list_empty(list)) {
1326 struct lruvec *lruvec;
1327
1328 page = lru_to_page(list); 1335 page = lru_to_page(list);
1336 lruvec = mem_cgroup_page_lruvec(page, zone);
1329 1337
1330 VM_BUG_ON(PageLRU(page)); 1338 VM_BUG_ON(PageLRU(page));
1331 SetPageLRU(page); 1339 SetPageLRU(page);
1332 1340
1333 lruvec = mem_cgroup_lru_add_list(zone, page, lru); 1341 nr_pages = hpage_nr_pages(page);
1342 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1334 list_move(&page->lru, &lruvec->lists[lru]); 1343 list_move(&page->lru, &lruvec->lists[lru]);
1335 pgmoved += hpage_nr_pages(page); 1344 pgmoved += nr_pages;
1336 1345
1337 if (put_page_testzero(page)) { 1346 if (put_page_testzero(page)) {
1338 __ClearPageLRU(page); 1347 __ClearPageLRU(page);
1339 __ClearPageActive(page); 1348 __ClearPageActive(page);
1340 del_page_from_lru_list(zone, page, lru); 1349 del_page_from_lru_list(page, lruvec, lru);
1341 1350
1342 if (unlikely(PageCompound(page))) { 1351 if (unlikely(PageCompound(page))) {
1343 spin_unlock_irq(&zone->lru_lock); 1352 spin_unlock_irq(&zone->lru_lock);
@@ -1443,8 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
1443 */ 1452 */
1444 reclaim_stat->recent_rotated[file] += nr_rotated; 1453 reclaim_stat->recent_rotated[file] += nr_rotated;
1445 1454
1446 move_active_pages_to_lru(zone, &l_active, &l_hold, lru); 1455 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1447 move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE); 1456 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1448 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1457 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1449 spin_unlock_irq(&zone->lru_lock); 1458 spin_unlock_irq(&zone->lru_lock);
1450 1459
@@ -3237,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3237 zone = pagezone; 3246 zone = pagezone;
3238 spin_lock_irq(&zone->lru_lock); 3247 spin_lock_irq(&zone->lru_lock);
3239 } 3248 }
3249 lruvec = mem_cgroup_page_lruvec(page, zone);
3240 3250
3241 if (!PageLRU(page) || !PageUnevictable(page)) 3251 if (!PageLRU(page) || !PageUnevictable(page))
3242 continue; 3252 continue;
@@ -3246,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3246 3256
3247 VM_BUG_ON(PageActive(page)); 3257 VM_BUG_ON(PageActive(page));
3248 ClearPageUnevictable(page); 3258 ClearPageUnevictable(page);
3249 __dec_zone_state(zone, NR_UNEVICTABLE); 3259 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3250 lruvec = mem_cgroup_lru_move_lists(zone, page, 3260 add_page_to_lru_list(page, lruvec, lru);
3251 LRU_UNEVICTABLE, lru);
3252 list_move(&page->lru, &lruvec->lists[lru]);
3253 __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
3254 pgrescued++; 3261 pgrescued++;
3255 } 3262 }
3256 } 3263 }