diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 115 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 120 | ||||
-rw-r--r-- | mm/vmstat.c | 3 |
5 files changed, 108 insertions, 141 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36896f3eb7f5..c0cbd7790c51 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
35 | #include <linux/mm_inline.h> | ||
35 | 36 | ||
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | 38 | ||
@@ -85,22 +86,13 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, | |||
85 | /* | 86 | /* |
86 | * per-zone information in memory controller. | 87 | * per-zone information in memory controller. |
87 | */ | 88 | */ |
88 | |||
89 | enum mem_cgroup_zstat_index { | ||
90 | MEM_CGROUP_ZSTAT_ACTIVE, | ||
91 | MEM_CGROUP_ZSTAT_INACTIVE, | ||
92 | |||
93 | NR_MEM_CGROUP_ZSTAT, | ||
94 | }; | ||
95 | |||
96 | struct mem_cgroup_per_zone { | 89 | struct mem_cgroup_per_zone { |
97 | /* | 90 | /* |
98 | * spin_lock to protect the per cgroup LRU | 91 | * spin_lock to protect the per cgroup LRU |
99 | */ | 92 | */ |
100 | spinlock_t lru_lock; | 93 | spinlock_t lru_lock; |
101 | struct list_head active_list; | 94 | struct list_head lists[NR_LRU_LISTS]; |
102 | struct list_head inactive_list; | 95 | unsigned long count[NR_LRU_LISTS]; |
103 | unsigned long count[NR_MEM_CGROUP_ZSTAT]; | ||
104 | }; | 96 | }; |
105 | /* Macro for accessing counter */ | 97 | /* Macro for accessing counter */ |
106 | #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) | 98 | #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) |
@@ -227,7 +219,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc) | |||
227 | } | 219 | } |
228 | 220 | ||
229 | static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, | 221 | static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, |
230 | enum mem_cgroup_zstat_index idx) | 222 | enum lru_list idx) |
231 | { | 223 | { |
232 | int nid, zid; | 224 | int nid, zid; |
233 | struct mem_cgroup_per_zone *mz; | 225 | struct mem_cgroup_per_zone *mz; |
@@ -297,11 +289,9 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, | |||
297 | struct page_cgroup *pc) | 289 | struct page_cgroup *pc) |
298 | { | 290 | { |
299 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 291 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; |
292 | int lru = !!from; | ||
300 | 293 | ||
301 | if (from) | 294 | MEM_CGROUP_ZSTAT(mz, lru) -= 1; |
302 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | ||
303 | else | ||
304 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; | ||
305 | 295 | ||
306 | mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); | 296 | mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); |
307 | list_del(&pc->lru); | 297 | list_del(&pc->lru); |
@@ -310,37 +300,35 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, | |||
310 | static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, | 300 | static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, |
311 | struct page_cgroup *pc) | 301 | struct page_cgroup *pc) |
312 | { | 302 | { |
313 | int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 303 | int lru = LRU_INACTIVE; |
304 | |||
305 | if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) | ||
306 | lru += LRU_ACTIVE; | ||
307 | |||
308 | MEM_CGROUP_ZSTAT(mz, lru) += 1; | ||
309 | list_add(&pc->lru, &mz->lists[lru]); | ||
314 | 310 | ||
315 | if (!to) { | ||
316 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | ||
317 | list_add(&pc->lru, &mz->inactive_list); | ||
318 | } else { | ||
319 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; | ||
320 | list_add(&pc->lru, &mz->active_list); | ||
321 | } | ||
322 | mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); | 311 | mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); |
323 | } | 312 | } |
324 | 313 | ||
325 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | 314 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) |
326 | { | 315 | { |
327 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | ||
328 | struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | 316 | struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); |
317 | int lru = LRU_INACTIVE; | ||
329 | 318 | ||
330 | if (from) | 319 | if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) |
331 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 320 | lru += LRU_ACTIVE; |
332 | else | ||
333 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; | ||
334 | 321 | ||
335 | if (active) { | 322 | MEM_CGROUP_ZSTAT(mz, lru) -= 1; |
336 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; | 323 | |
324 | if (active) | ||
337 | pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; | 325 | pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; |
338 | list_move(&pc->lru, &mz->active_list); | 326 | else |
339 | } else { | ||
340 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | ||
341 | pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; | 327 | pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; |
342 | list_move(&pc->lru, &mz->inactive_list); | 328 | |
343 | } | 329 | lru = !!active; |
330 | MEM_CGROUP_ZSTAT(mz, lru) += 1; | ||
331 | list_move(&pc->lru, &mz->lists[lru]); | ||
344 | } | 332 | } |
345 | 333 | ||
346 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | 334 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) |
@@ -412,8 +400,8 @@ long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | |||
412 | { | 400 | { |
413 | unsigned long active, inactive; | 401 | unsigned long active, inactive; |
414 | /* active and inactive are the number of pages. 'long' is ok.*/ | 402 | /* active and inactive are the number of pages. 'long' is ok.*/ |
415 | active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); | 403 | active = mem_cgroup_get_all_zonestat(mem, LRU_ACTIVE); |
416 | inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); | 404 | inactive = mem_cgroup_get_all_zonestat(mem, LRU_INACTIVE); |
417 | return (long) (active / (inactive + 1)); | 405 | return (long) (active / (inactive + 1)); |
418 | } | 406 | } |
419 | 407 | ||
@@ -444,28 +432,17 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) | |||
444 | * (see include/linux/mmzone.h) | 432 | * (see include/linux/mmzone.h) |
445 | */ | 433 | */ |
446 | 434 | ||
447 | long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, | 435 | long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, |
448 | struct zone *zone, int priority) | 436 | int priority, enum lru_list lru) |
449 | { | 437 | { |
450 | long nr_active; | 438 | long nr_pages; |
451 | int nid = zone->zone_pgdat->node_id; | 439 | int nid = zone->zone_pgdat->node_id; |
452 | int zid = zone_idx(zone); | 440 | int zid = zone_idx(zone); |
453 | struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | 441 | struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); |
454 | 442 | ||
455 | nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); | 443 | nr_pages = MEM_CGROUP_ZSTAT(mz, lru); |
456 | return (nr_active >> priority); | ||
457 | } | ||
458 | |||
459 | long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, | ||
460 | struct zone *zone, int priority) | ||
461 | { | ||
462 | long nr_inactive; | ||
463 | int nid = zone->zone_pgdat->node_id; | ||
464 | int zid = zone_idx(zone); | ||
465 | struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | ||
466 | 444 | ||
467 | nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); | 445 | return (nr_pages >> priority); |
468 | return (nr_inactive >> priority); | ||
469 | } | 446 | } |
470 | 447 | ||
471 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 448 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
@@ -484,14 +461,11 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
484 | int nid = z->zone_pgdat->node_id; | 461 | int nid = z->zone_pgdat->node_id; |
485 | int zid = zone_idx(z); | 462 | int zid = zone_idx(z); |
486 | struct mem_cgroup_per_zone *mz; | 463 | struct mem_cgroup_per_zone *mz; |
464 | int lru = !!active; | ||
487 | 465 | ||
488 | BUG_ON(!mem_cont); | 466 | BUG_ON(!mem_cont); |
489 | mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); | 467 | mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); |
490 | if (active) | 468 | src = &mz->lists[lru]; |
491 | src = &mz->active_list; | ||
492 | else | ||
493 | src = &mz->inactive_list; | ||
494 | |||
495 | 469 | ||
496 | spin_lock(&mz->lru_lock); | 470 | spin_lock(&mz->lru_lock); |
497 | scan = 0; | 471 | scan = 0; |
@@ -863,7 +837,7 @@ int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) | |||
863 | #define FORCE_UNCHARGE_BATCH (128) | 837 | #define FORCE_UNCHARGE_BATCH (128) |
864 | static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, | 838 | static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, |
865 | struct mem_cgroup_per_zone *mz, | 839 | struct mem_cgroup_per_zone *mz, |
866 | int active) | 840 | enum lru_list lru) |
867 | { | 841 | { |
868 | struct page_cgroup *pc; | 842 | struct page_cgroup *pc; |
869 | struct page *page; | 843 | struct page *page; |
@@ -871,10 +845,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, | |||
871 | unsigned long flags; | 845 | unsigned long flags; |
872 | struct list_head *list; | 846 | struct list_head *list; |
873 | 847 | ||
874 | if (active) | 848 | list = &mz->lists[lru]; |
875 | list = &mz->active_list; | ||
876 | else | ||
877 | list = &mz->inactive_list; | ||
878 | 849 | ||
879 | spin_lock_irqsave(&mz->lru_lock, flags); | 850 | spin_lock_irqsave(&mz->lru_lock, flags); |
880 | while (!list_empty(list)) { | 851 | while (!list_empty(list)) { |
@@ -922,11 +893,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem) | |||
922 | for_each_node_state(node, N_POSSIBLE) | 893 | for_each_node_state(node, N_POSSIBLE) |
923 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 894 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
924 | struct mem_cgroup_per_zone *mz; | 895 | struct mem_cgroup_per_zone *mz; |
896 | enum lru_list l; | ||
925 | mz = mem_cgroup_zoneinfo(mem, node, zid); | 897 | mz = mem_cgroup_zoneinfo(mem, node, zid); |
926 | /* drop all page_cgroup in active_list */ | 898 | for_each_lru(l) |
927 | mem_cgroup_force_empty_list(mem, mz, 1); | 899 | mem_cgroup_force_empty_list(mem, mz, l); |
928 | /* drop all page_cgroup in inactive_list */ | ||
929 | mem_cgroup_force_empty_list(mem, mz, 0); | ||
930 | } | 900 | } |
931 | } | 901 | } |
932 | ret = 0; | 902 | ret = 0; |
@@ -1015,9 +985,9 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, | |||
1015 | unsigned long active, inactive; | 985 | unsigned long active, inactive; |
1016 | 986 | ||
1017 | inactive = mem_cgroup_get_all_zonestat(mem_cont, | 987 | inactive = mem_cgroup_get_all_zonestat(mem_cont, |
1018 | MEM_CGROUP_ZSTAT_INACTIVE); | 988 | LRU_INACTIVE); |
1019 | active = mem_cgroup_get_all_zonestat(mem_cont, | 989 | active = mem_cgroup_get_all_zonestat(mem_cont, |
1020 | MEM_CGROUP_ZSTAT_ACTIVE); | 990 | LRU_ACTIVE); |
1021 | cb->fill(cb, "active", (active) * PAGE_SIZE); | 991 | cb->fill(cb, "active", (active) * PAGE_SIZE); |
1022 | cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); | 992 | cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); |
1023 | } | 993 | } |
@@ -1062,6 +1032,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
1062 | { | 1032 | { |
1063 | struct mem_cgroup_per_node *pn; | 1033 | struct mem_cgroup_per_node *pn; |
1064 | struct mem_cgroup_per_zone *mz; | 1034 | struct mem_cgroup_per_zone *mz; |
1035 | enum lru_list l; | ||
1065 | int zone, tmp = node; | 1036 | int zone, tmp = node; |
1066 | /* | 1037 | /* |
1067 | * This routine is called against possible nodes. | 1038 | * This routine is called against possible nodes. |
@@ -1082,9 +1053,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
1082 | 1053 | ||
1083 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 1054 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
1084 | mz = &pn->zoneinfo[zone]; | 1055 | mz = &pn->zoneinfo[zone]; |
1085 | INIT_LIST_HEAD(&mz->active_list); | ||
1086 | INIT_LIST_HEAD(&mz->inactive_list); | ||
1087 | spin_lock_init(&mz->lru_lock); | 1056 | spin_lock_init(&mz->lru_lock); |
1057 | for_each_lru(l) | ||
1058 | INIT_LIST_HEAD(&mz->lists[l]); | ||
1088 | } | 1059 | } |
1089 | return 0; | 1060 | return 0; |
1090 | } | 1061 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9eb9eb928285..ee7a96ef40dc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3414,6 +3414,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3414 | for (j = 0; j < MAX_NR_ZONES; j++) { | 3414 | for (j = 0; j < MAX_NR_ZONES; j++) { |
3415 | struct zone *zone = pgdat->node_zones + j; | 3415 | struct zone *zone = pgdat->node_zones + j; |
3416 | unsigned long size, realsize, memmap_pages; | 3416 | unsigned long size, realsize, memmap_pages; |
3417 | enum lru_list l; | ||
3417 | 3418 | ||
3418 | size = zone_spanned_pages_in_node(nid, j, zones_size); | 3419 | size = zone_spanned_pages_in_node(nid, j, zones_size); |
3419 | realsize = size - zone_absent_pages_in_node(nid, j, | 3420 | realsize = size - zone_absent_pages_in_node(nid, j, |
@@ -3465,10 +3466,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3465 | zone->prev_priority = DEF_PRIORITY; | 3466 | zone->prev_priority = DEF_PRIORITY; |
3466 | 3467 | ||
3467 | zone_pcp_init(zone); | 3468 | zone_pcp_init(zone); |
3468 | INIT_LIST_HEAD(&zone->active_list); | 3469 | for_each_lru(l) { |
3469 | INIT_LIST_HEAD(&zone->inactive_list); | 3470 | INIT_LIST_HEAD(&zone->lru[l].list); |
3470 | zone->nr_scan_active = 0; | 3471 | zone->lru[l].nr_scan = 0; |
3471 | zone->nr_scan_inactive = 0; | 3472 | } |
3472 | zap_zone_vm_stats(zone); | 3473 | zap_zone_vm_stats(zone); |
3473 | zone->flags = 0; | 3474 | zone->flags = 0; |
3474 | if (!size) | 3475 | if (!size) |
@@ -117,7 +117,7 @@ static void pagevec_move_tail(struct pagevec *pvec) | |||
117 | spin_lock(&zone->lru_lock); | 117 | spin_lock(&zone->lru_lock); |
118 | } | 118 | } |
119 | if (PageLRU(page) && !PageActive(page)) { | 119 | if (PageLRU(page) && !PageActive(page)) { |
120 | list_move_tail(&page->lru, &zone->inactive_list); | 120 | list_move_tail(&page->lru, &zone->lru[LRU_INACTIVE].list); |
121 | pgmoved++; | 121 | pgmoved++; |
122 | } | 122 | } |
123 | } | 123 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 1fd4912a596c..46fdaa546b8d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -819,10 +819,10 @@ static unsigned long isolate_pages_global(unsigned long nr, | |||
819 | int active) | 819 | int active) |
820 | { | 820 | { |
821 | if (active) | 821 | if (active) |
822 | return isolate_lru_pages(nr, &z->active_list, dst, | 822 | return isolate_lru_pages(nr, &z->lru[LRU_ACTIVE].list, dst, |
823 | scanned, order, mode); | 823 | scanned, order, mode); |
824 | else | 824 | else |
825 | return isolate_lru_pages(nr, &z->inactive_list, dst, | 825 | return isolate_lru_pages(nr, &z->lru[LRU_INACTIVE].list, dst, |
826 | scanned, order, mode); | 826 | scanned, order, mode); |
827 | } | 827 | } |
828 | 828 | ||
@@ -973,10 +973,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
973 | VM_BUG_ON(PageLRU(page)); | 973 | VM_BUG_ON(PageLRU(page)); |
974 | SetPageLRU(page); | 974 | SetPageLRU(page); |
975 | list_del(&page->lru); | 975 | list_del(&page->lru); |
976 | if (PageActive(page)) | 976 | add_page_to_lru_list(zone, page, page_lru(page)); |
977 | add_page_to_active_list(zone, page); | ||
978 | else | ||
979 | add_page_to_inactive_list(zone, page); | ||
980 | if (!pagevec_add(&pvec, page)) { | 977 | if (!pagevec_add(&pvec, page)) { |
981 | spin_unlock_irq(&zone->lru_lock); | 978 | spin_unlock_irq(&zone->lru_lock); |
982 | __pagevec_release(&pvec); | 979 | __pagevec_release(&pvec); |
@@ -1144,8 +1141,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1144 | int pgdeactivate = 0; | 1141 | int pgdeactivate = 0; |
1145 | unsigned long pgscanned; | 1142 | unsigned long pgscanned; |
1146 | LIST_HEAD(l_hold); /* The pages which were snipped off */ | 1143 | LIST_HEAD(l_hold); /* The pages which were snipped off */ |
1147 | LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ | 1144 | LIST_HEAD(l_active); |
1148 | LIST_HEAD(l_active); /* Pages to go onto the active_list */ | 1145 | LIST_HEAD(l_inactive); |
1149 | struct page *page; | 1146 | struct page *page; |
1150 | struct pagevec pvec; | 1147 | struct pagevec pvec; |
1151 | int reclaim_mapped = 0; | 1148 | int reclaim_mapped = 0; |
@@ -1194,7 +1191,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1194 | VM_BUG_ON(!PageActive(page)); | 1191 | VM_BUG_ON(!PageActive(page)); |
1195 | ClearPageActive(page); | 1192 | ClearPageActive(page); |
1196 | 1193 | ||
1197 | list_move(&page->lru, &zone->inactive_list); | 1194 | list_move(&page->lru, &zone->lru[LRU_INACTIVE].list); |
1198 | mem_cgroup_move_lists(page, false); | 1195 | mem_cgroup_move_lists(page, false); |
1199 | pgmoved++; | 1196 | pgmoved++; |
1200 | if (!pagevec_add(&pvec, page)) { | 1197 | if (!pagevec_add(&pvec, page)) { |
@@ -1224,7 +1221,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1224 | SetPageLRU(page); | 1221 | SetPageLRU(page); |
1225 | VM_BUG_ON(!PageActive(page)); | 1222 | VM_BUG_ON(!PageActive(page)); |
1226 | 1223 | ||
1227 | list_move(&page->lru, &zone->active_list); | 1224 | list_move(&page->lru, &zone->lru[LRU_ACTIVE].list); |
1228 | mem_cgroup_move_lists(page, true); | 1225 | mem_cgroup_move_lists(page, true); |
1229 | pgmoved++; | 1226 | pgmoved++; |
1230 | if (!pagevec_add(&pvec, page)) { | 1227 | if (!pagevec_add(&pvec, page)) { |
@@ -1244,65 +1241,64 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1244 | pagevec_release(&pvec); | 1241 | pagevec_release(&pvec); |
1245 | } | 1242 | } |
1246 | 1243 | ||
1244 | static unsigned long shrink_list(enum lru_list l, unsigned long nr_to_scan, | ||
1245 | struct zone *zone, struct scan_control *sc, int priority) | ||
1246 | { | ||
1247 | if (l == LRU_ACTIVE) { | ||
1248 | shrink_active_list(nr_to_scan, zone, sc, priority); | ||
1249 | return 0; | ||
1250 | } | ||
1251 | return shrink_inactive_list(nr_to_scan, zone, sc); | ||
1252 | } | ||
1253 | |||
1247 | /* | 1254 | /* |
1248 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 1255 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
1249 | */ | 1256 | */ |
1250 | static unsigned long shrink_zone(int priority, struct zone *zone, | 1257 | static unsigned long shrink_zone(int priority, struct zone *zone, |
1251 | struct scan_control *sc) | 1258 | struct scan_control *sc) |
1252 | { | 1259 | { |
1253 | unsigned long nr_active; | 1260 | unsigned long nr[NR_LRU_LISTS]; |
1254 | unsigned long nr_inactive; | ||
1255 | unsigned long nr_to_scan; | 1261 | unsigned long nr_to_scan; |
1256 | unsigned long nr_reclaimed = 0; | 1262 | unsigned long nr_reclaimed = 0; |
1263 | enum lru_list l; | ||
1257 | 1264 | ||
1258 | if (scan_global_lru(sc)) { | 1265 | if (scan_global_lru(sc)) { |
1259 | /* | 1266 | /* |
1260 | * Add one to nr_to_scan just to make sure that the kernel | 1267 | * Add one to nr_to_scan just to make sure that the kernel |
1261 | * will slowly sift through the active list. | 1268 | * will slowly sift through the active list. |
1262 | */ | 1269 | */ |
1263 | zone->nr_scan_active += | 1270 | for_each_lru(l) { |
1264 | (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; | 1271 | zone->lru[l].nr_scan += (zone_page_state(zone, |
1265 | nr_active = zone->nr_scan_active; | 1272 | NR_LRU_BASE + l) >> priority) + 1; |
1266 | zone->nr_scan_inactive += | 1273 | nr[l] = zone->lru[l].nr_scan; |
1267 | (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; | 1274 | if (nr[l] >= sc->swap_cluster_max) |
1268 | nr_inactive = zone->nr_scan_inactive; | 1275 | zone->lru[l].nr_scan = 0; |
1269 | if (nr_inactive >= sc->swap_cluster_max) | 1276 | else |
1270 | zone->nr_scan_inactive = 0; | 1277 | nr[l] = 0; |
1271 | else | 1278 | } |
1272 | nr_inactive = 0; | ||
1273 | |||
1274 | if (nr_active >= sc->swap_cluster_max) | ||
1275 | zone->nr_scan_active = 0; | ||
1276 | else | ||
1277 | nr_active = 0; | ||
1278 | } else { | 1279 | } else { |
1279 | /* | 1280 | /* |
1280 | * This reclaim occurs not because zone memory shortage but | 1281 | * This reclaim occurs not because zone memory shortage but |
1281 | * because memory controller hits its limit. | 1282 | * because memory controller hits its limit. |
1282 | * Then, don't modify zone reclaim related data. | 1283 | * Then, don't modify zone reclaim related data. |
1283 | */ | 1284 | */ |
1284 | nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup, | 1285 | nr[LRU_ACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup, |
1285 | zone, priority); | 1286 | zone, priority, LRU_ACTIVE); |
1286 | 1287 | ||
1287 | nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup, | 1288 | nr[LRU_INACTIVE] = mem_cgroup_calc_reclaim(sc->mem_cgroup, |
1288 | zone, priority); | 1289 | zone, priority, LRU_INACTIVE); |
1289 | } | 1290 | } |
1290 | 1291 | ||
1291 | 1292 | while (nr[LRU_ACTIVE] || nr[LRU_INACTIVE]) { | |
1292 | while (nr_active || nr_inactive) { | 1293 | for_each_lru(l) { |
1293 | if (nr_active) { | 1294 | if (nr[l]) { |
1294 | nr_to_scan = min(nr_active, | 1295 | nr_to_scan = min(nr[l], |
1295 | (unsigned long)sc->swap_cluster_max); | 1296 | (unsigned long)sc->swap_cluster_max); |
1296 | nr_active -= nr_to_scan; | 1297 | nr[l] -= nr_to_scan; |
1297 | shrink_active_list(nr_to_scan, zone, sc, priority); | ||
1298 | } | ||
1299 | 1298 | ||
1300 | if (nr_inactive) { | 1299 | nr_reclaimed += shrink_list(l, nr_to_scan, |
1301 | nr_to_scan = min(nr_inactive, | 1300 | zone, sc, priority); |
1302 | (unsigned long)sc->swap_cluster_max); | 1301 | } |
1303 | nr_inactive -= nr_to_scan; | ||
1304 | nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, | ||
1305 | sc); | ||
1306 | } | 1302 | } |
1307 | } | 1303 | } |
1308 | 1304 | ||
@@ -1819,6 +1815,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1819 | { | 1815 | { |
1820 | struct zone *zone; | 1816 | struct zone *zone; |
1821 | unsigned long nr_to_scan, ret = 0; | 1817 | unsigned long nr_to_scan, ret = 0; |
1818 | enum lru_list l; | ||
1822 | 1819 | ||
1823 | for_each_zone(zone) { | 1820 | for_each_zone(zone) { |
1824 | 1821 | ||
@@ -1828,28 +1825,25 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1828 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) | 1825 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) |
1829 | continue; | 1826 | continue; |
1830 | 1827 | ||
1831 | /* For pass = 0 we don't shrink the active list */ | 1828 | for_each_lru(l) { |
1832 | if (pass > 0) { | 1829 | /* For pass = 0 we don't shrink the active list */ |
1833 | zone->nr_scan_active += | 1830 | if (pass == 0 && l == LRU_ACTIVE) |
1834 | (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; | 1831 | continue; |
1835 | if (zone->nr_scan_active >= nr_pages || pass > 3) { | 1832 | |
1836 | zone->nr_scan_active = 0; | 1833 | zone->lru[l].nr_scan += |
1834 | (zone_page_state(zone, NR_LRU_BASE + l) | ||
1835 | >> prio) + 1; | ||
1836 | if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { | ||
1837 | zone->lru[l].nr_scan = 0; | ||
1837 | nr_to_scan = min(nr_pages, | 1838 | nr_to_scan = min(nr_pages, |
1838 | zone_page_state(zone, NR_ACTIVE)); | 1839 | zone_page_state(zone, |
1839 | shrink_active_list(nr_to_scan, zone, sc, prio); | 1840 | NR_LRU_BASE + l)); |
1841 | ret += shrink_list(l, nr_to_scan, zone, | ||
1842 | sc, prio); | ||
1843 | if (ret >= nr_pages) | ||
1844 | return ret; | ||
1840 | } | 1845 | } |
1841 | } | 1846 | } |
1842 | |||
1843 | zone->nr_scan_inactive += | ||
1844 | (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; | ||
1845 | if (zone->nr_scan_inactive >= nr_pages || pass > 3) { | ||
1846 | zone->nr_scan_inactive = 0; | ||
1847 | nr_to_scan = min(nr_pages, | ||
1848 | zone_page_state(zone, NR_INACTIVE)); | ||
1849 | ret += shrink_inactive_list(nr_to_scan, zone, sc); | ||
1850 | if (ret >= nr_pages) | ||
1851 | return ret; | ||
1852 | } | ||
1853 | } | 1847 | } |
1854 | 1848 | ||
1855 | return ret; | 1849 | return ret; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index d7826af2fb07..52c0335c1b71 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -696,7 +696,8 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
696 | zone->pages_low, | 696 | zone->pages_low, |
697 | zone->pages_high, | 697 | zone->pages_high, |
698 | zone->pages_scanned, | 698 | zone->pages_scanned, |
699 | zone->nr_scan_active, zone->nr_scan_inactive, | 699 | zone->lru[LRU_ACTIVE].nr_scan, |
700 | zone->lru[LRU_INACTIVE].nr_scan, | ||
700 | zone->spanned_pages, | 701 | zone->spanned_pages, |
701 | zone->present_pages); | 702 | zone->present_pages); |
702 | 703 | ||