diff options
author | Hugh Dickins <hughd@google.com> | 2012-05-29 18:07:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:28 -0400 |
commit | fa9add641b1b1c564db916accac1db346e7a2759 (patch) | |
tree | 875e74ec4d7fed0018fdbc134ad899949c5e3384 | |
parent | 75b00af77ed5b5a3d55549f9e0c33f3969b9330c (diff) |
mm/memcg: apply add/del_page to lruvec
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.
This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.
In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.
Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/memcontrol.h | 32 | ||||
-rw-r--r-- | include/linux/mm_inline.h | 20 | ||||
-rw-r--r-- | include/linux/swap.h | 4 | ||||
-rw-r--r-- | mm/compaction.c | 5 | ||||
-rw-r--r-- | mm/huge_memory.c | 8 | ||||
-rw-r--r-- | mm/memcontrol.c | 101 | ||||
-rw-r--r-- | mm/swap.c | 86 | ||||
-rw-r--r-- | mm/vmscan.c | 47 |
8 files changed, 122 insertions, 181 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 609ef7c28c6c..83e7ba90d6e5 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -63,11 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
63 | gfp_t gfp_mask); | 63 | gfp_t gfp_mask); |
64 | 64 | ||
65 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 65 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
66 | struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, | 66 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
67 | enum lru_list); | ||
68 | void mem_cgroup_lru_del_list(struct page *, enum lru_list); | ||
69 | struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, | ||
70 | enum lru_list, enum lru_list); | ||
71 | 67 | ||
72 | /* For coalescing uncharge for reducing memcg' overhead*/ | 68 | /* For coalescing uncharge for reducing memcg' overhead*/ |
73 | extern void mem_cgroup_uncharge_start(void); | 69 | extern void mem_cgroup_uncharge_start(void); |
@@ -122,8 +118,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); | |||
122 | int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); | 118 | int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); |
123 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | 119 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
124 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); | 120 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); |
125 | struct zone_reclaim_stat* | 121 | void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); |
126 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | ||
127 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 122 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
128 | struct task_struct *p); | 123 | struct task_struct *p); |
129 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, | 124 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, |
@@ -250,21 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, | |||
250 | return &zone->lruvec; | 245 | return &zone->lruvec; |
251 | } | 246 | } |
252 | 247 | ||
253 | static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, | 248 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, |
254 | struct page *page, | 249 | struct zone *zone) |
255 | enum lru_list lru) | ||
256 | { | ||
257 | return &zone->lruvec; | ||
258 | } | ||
259 | |||
260 | static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) | ||
261 | { | ||
262 | } | ||
263 | |||
264 | static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, | ||
265 | struct page *page, | ||
266 | enum lru_list from, | ||
267 | enum lru_list to) | ||
268 | { | 250 | { |
269 | return &zone->lruvec; | 251 | return &zone->lruvec; |
270 | } | 252 | } |
@@ -345,10 +327,10 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |||
345 | return 0; | 327 | return 0; |
346 | } | 328 | } |
347 | 329 | ||
348 | static inline struct zone_reclaim_stat* | 330 | static inline void |
349 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | 331 | mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
332 | int increment) | ||
350 | { | 333 | { |
351 | return NULL; | ||
352 | } | 334 | } |
353 | 335 | ||
354 | static inline void | 336 | static inline void |
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 16d45d9c31a4..1397ccf81e91 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page) | |||
21 | return !PageSwapBacked(page); | 21 | return !PageSwapBacked(page); |
22 | } | 22 | } |
23 | 23 | ||
24 | static __always_inline void | 24 | static __always_inline void add_page_to_lru_list(struct page *page, |
25 | add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) | 25 | struct lruvec *lruvec, enum lru_list lru) |
26 | { | 26 | { |
27 | struct lruvec *lruvec; | 27 | int nr_pages = hpage_nr_pages(page); |
28 | 28 | mem_cgroup_update_lru_size(lruvec, lru, nr_pages); | |
29 | lruvec = mem_cgroup_lru_add_list(zone, page, lru); | ||
30 | list_add(&page->lru, &lruvec->lists[lru]); | 29 | list_add(&page->lru, &lruvec->lists[lru]); |
31 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); | 30 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); |
32 | } | 31 | } |
33 | 32 | ||
34 | static __always_inline void | 33 | static __always_inline void del_page_from_lru_list(struct page *page, |
35 | del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) | 34 | struct lruvec *lruvec, enum lru_list lru) |
36 | { | 35 | { |
37 | mem_cgroup_lru_del_list(page, lru); | 36 | int nr_pages = hpage_nr_pages(page); |
37 | mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); | ||
38 | list_del(&page->lru); | 38 | list_del(&page->lru); |
39 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); | 39 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); |
40 | } | 40 | } |
41 | 41 | ||
42 | /** | 42 | /** |
diff --git a/include/linux/swap.h b/include/linux/swap.h index ff38eb7c0ec4..b6661933e252 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void); | |||
221 | /* linux/mm/swap.c */ | 221 | /* linux/mm/swap.c */ |
222 | extern void __lru_cache_add(struct page *, enum lru_list lru); | 222 | extern void __lru_cache_add(struct page *, enum lru_list lru); |
223 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | 223 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); |
224 | extern void lru_add_page_tail(struct zone* zone, | 224 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, |
225 | struct page *page, struct page *page_tail); | 225 | struct lruvec *lruvec); |
226 | extern void activate_page(struct page *); | 226 | extern void activate_page(struct page *); |
227 | extern void mark_page_accessed(struct page *); | 227 | extern void mark_page_accessed(struct page *); |
228 | extern void lru_add_drain(void); | 228 | extern void lru_add_drain(void); |
diff --git a/mm/compaction.c b/mm/compaction.c index 74e1b3803839..4ac338af5120 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -227,6 +227,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
227 | unsigned long nr_scanned = 0, nr_isolated = 0; | 227 | unsigned long nr_scanned = 0, nr_isolated = 0; |
228 | struct list_head *migratelist = &cc->migratepages; | 228 | struct list_head *migratelist = &cc->migratepages; |
229 | isolate_mode_t mode = 0; | 229 | isolate_mode_t mode = 0; |
230 | struct lruvec *lruvec; | ||
230 | 231 | ||
231 | /* | 232 | /* |
232 | * Ensure that there are not too many pages isolated from the LRU | 233 | * Ensure that there are not too many pages isolated from the LRU |
@@ -328,6 +329,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
328 | if (cc->mode != COMPACT_SYNC) | 329 | if (cc->mode != COMPACT_SYNC) |
329 | mode |= ISOLATE_ASYNC_MIGRATE; | 330 | mode |= ISOLATE_ASYNC_MIGRATE; |
330 | 331 | ||
332 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
333 | |||
331 | /* Try isolate the page */ | 334 | /* Try isolate the page */ |
332 | if (__isolate_lru_page(page, mode) != 0) | 335 | if (__isolate_lru_page(page, mode) != 0) |
333 | continue; | 336 | continue; |
@@ -335,7 +338,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
335 | VM_BUG_ON(PageTransCompound(page)); | 338 | VM_BUG_ON(PageTransCompound(page)); |
336 | 339 | ||
337 | /* Successfully isolated */ | 340 | /* Successfully isolated */ |
338 | del_page_from_lru_list(zone, page, page_lru(page)); | 341 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
339 | list_add(&page->lru, migratelist); | 342 | list_add(&page->lru, migratelist); |
340 | cc->nr_migratepages++; | 343 | cc->nr_migratepages++; |
341 | nr_isolated++; | 344 | nr_isolated++; |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d0def42c121b..57c4b9309015 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1231,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page) | |||
1231 | { | 1231 | { |
1232 | int i; | 1232 | int i; |
1233 | struct zone *zone = page_zone(page); | 1233 | struct zone *zone = page_zone(page); |
1234 | struct lruvec *lruvec; | ||
1234 | int tail_count = 0; | 1235 | int tail_count = 0; |
1235 | 1236 | ||
1236 | /* prevent PageLRU to go away from under us, and freeze lru stats */ | 1237 | /* prevent PageLRU to go away from under us, and freeze lru stats */ |
1237 | spin_lock_irq(&zone->lru_lock); | 1238 | spin_lock_irq(&zone->lru_lock); |
1239 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
1240 | |||
1238 | compound_lock(page); | 1241 | compound_lock(page); |
1239 | /* complete memcg works before add pages to LRU */ | 1242 | /* complete memcg works before add pages to LRU */ |
1240 | mem_cgroup_split_huge_fixup(page); | 1243 | mem_cgroup_split_huge_fixup(page); |
@@ -1309,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page) | |||
1309 | BUG_ON(!PageDirty(page_tail)); | 1312 | BUG_ON(!PageDirty(page_tail)); |
1310 | BUG_ON(!PageSwapBacked(page_tail)); | 1313 | BUG_ON(!PageSwapBacked(page_tail)); |
1311 | 1314 | ||
1312 | 1315 | lru_add_page_tail(page, page_tail, lruvec); | |
1313 | lru_add_page_tail(zone, page, page_tail); | ||
1314 | } | 1316 | } |
1315 | atomic_sub(tail_count, &page->_count); | 1317 | atomic_sub(tail_count, &page->_count); |
1316 | BUG_ON(atomic_read(&page->_count) <= 0); | 1318 | BUG_ON(atomic_read(&page->_count) <= 0); |
1317 | 1319 | ||
1318 | __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); | 1320 | __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); |
1319 | __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); | 1321 | __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); |
1320 | 1322 | ||
1321 | ClearPageCompound(page); | 1323 | ClearPageCompound(page); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 75198dac3fe8..bb8d7d3cf302 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event); | |||
1035 | /** | 1035 | /** |
1036 | * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg | 1036 | * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg |
1037 | * @zone: zone of the wanted lruvec | 1037 | * @zone: zone of the wanted lruvec |
1038 | * @mem: memcg of the wanted lruvec | 1038 | * @memcg: memcg of the wanted lruvec |
1039 | * | 1039 | * |
1040 | * Returns the lru list vector holding pages for the given @zone and | 1040 | * Returns the lru list vector holding pages for the given @zone and |
1041 | * @mem. This can be the global zone lruvec, if the memory controller | 1041 | * @mem. This can be the global zone lruvec, if the memory controller |
@@ -1068,19 +1068,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, | |||
1068 | */ | 1068 | */ |
1069 | 1069 | ||
1070 | /** | 1070 | /** |
1071 | * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec | 1071 | * mem_cgroup_page_lruvec - return lruvec for adding an lru page |
1072 | * @zone: zone of the page | ||
1073 | * @page: the page | 1072 | * @page: the page |
1074 | * @lru: current lru | 1073 | * @zone: zone of the page |
1075 | * | ||
1076 | * This function accounts for @page being added to @lru, and returns | ||
1077 | * the lruvec for the given @zone and the memcg @page is charged to. | ||
1078 | * | ||
1079 | * The callsite is then responsible for physically linking the page to | ||
1080 | * the returned lruvec->lists[@lru]. | ||
1081 | */ | 1074 | */ |
1082 | struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, | 1075 | struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) |
1083 | enum lru_list lru) | ||
1084 | { | 1076 | { |
1085 | struct mem_cgroup_per_zone *mz; | 1077 | struct mem_cgroup_per_zone *mz; |
1086 | struct mem_cgroup *memcg; | 1078 | struct mem_cgroup *memcg; |
@@ -1093,7 +1085,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, | |||
1093 | memcg = pc->mem_cgroup; | 1085 | memcg = pc->mem_cgroup; |
1094 | 1086 | ||
1095 | /* | 1087 | /* |
1096 | * Surreptitiously switch any uncharged page to root: | 1088 | * Surreptitiously switch any uncharged offlist page to root: |
1097 | * an uncharged page off lru does nothing to secure | 1089 | * an uncharged page off lru does nothing to secure |
1098 | * its former mem_cgroup from sudden removal. | 1090 | * its former mem_cgroup from sudden removal. |
1099 | * | 1091 | * |
@@ -1101,65 +1093,35 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, | |||
1101 | * under page_cgroup lock: between them, they make all uses | 1093 | * under page_cgroup lock: between them, they make all uses |
1102 | * of pc->mem_cgroup safe. | 1094 | * of pc->mem_cgroup safe. |
1103 | */ | 1095 | */ |
1104 | if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) | 1096 | if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) |
1105 | pc->mem_cgroup = memcg = root_mem_cgroup; | 1097 | pc->mem_cgroup = memcg = root_mem_cgroup; |
1106 | 1098 | ||
1107 | mz = page_cgroup_zoneinfo(memcg, page); | 1099 | mz = page_cgroup_zoneinfo(memcg, page); |
1108 | /* compound_order() is stabilized through lru_lock */ | ||
1109 | mz->lru_size[lru] += 1 << compound_order(page); | ||
1110 | return &mz->lruvec; | 1100 | return &mz->lruvec; |
1111 | } | 1101 | } |
1112 | 1102 | ||
1113 | /** | 1103 | /** |
1114 | * mem_cgroup_lru_del_list - account for removing an lru page | 1104 | * mem_cgroup_update_lru_size - account for adding or removing an lru page |
1115 | * @page: the page | 1105 | * @lruvec: mem_cgroup per zone lru vector |
1116 | * @lru: target lru | 1106 | * @lru: index of lru list the page is sitting on |
1107 | * @nr_pages: positive when adding or negative when removing | ||
1117 | * | 1108 | * |
1118 | * This function accounts for @page being removed from @lru. | 1109 | * This function must be called when a page is added to or removed from an |
1119 | * | 1110 | * lru list. |
1120 | * The callsite is then responsible for physically unlinking | ||
1121 | * @page->lru. | ||
1122 | */ | 1111 | */ |
1123 | void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) | 1112 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
1113 | int nr_pages) | ||
1124 | { | 1114 | { |
1125 | struct mem_cgroup_per_zone *mz; | 1115 | struct mem_cgroup_per_zone *mz; |
1126 | struct mem_cgroup *memcg; | 1116 | unsigned long *lru_size; |
1127 | struct page_cgroup *pc; | ||
1128 | 1117 | ||
1129 | if (mem_cgroup_disabled()) | 1118 | if (mem_cgroup_disabled()) |
1130 | return; | 1119 | return; |
1131 | 1120 | ||
1132 | pc = lookup_page_cgroup(page); | 1121 | mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); |
1133 | memcg = pc->mem_cgroup; | 1122 | lru_size = mz->lru_size + lru; |
1134 | VM_BUG_ON(!memcg); | 1123 | *lru_size += nr_pages; |
1135 | mz = page_cgroup_zoneinfo(memcg, page); | 1124 | VM_BUG_ON((long)(*lru_size) < 0); |
1136 | /* huge page split is done under lru_lock. so, we have no races. */ | ||
1137 | VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page))); | ||
1138 | mz->lru_size[lru] -= 1 << compound_order(page); | ||
1139 | } | ||
1140 | |||
1141 | /** | ||
1142 | * mem_cgroup_lru_move_lists - account for moving a page between lrus | ||
1143 | * @zone: zone of the page | ||
1144 | * @page: the page | ||
1145 | * @from: current lru | ||
1146 | * @to: target lru | ||
1147 | * | ||
1148 | * This function accounts for @page being moved between the lrus @from | ||
1149 | * and @to, and returns the lruvec for the given @zone and the memcg | ||
1150 | * @page is charged to. | ||
1151 | * | ||
1152 | * The callsite is then responsible for physically relinking | ||
1153 | * @page->lru to the returned lruvec->lists[@to]. | ||
1154 | */ | ||
1155 | struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, | ||
1156 | struct page *page, | ||
1157 | enum lru_list from, | ||
1158 | enum lru_list to) | ||
1159 | { | ||
1160 | /* XXX: Optimize this, especially for @from == @to */ | ||
1161 | mem_cgroup_lru_del_list(page, from); | ||
1162 | return mem_cgroup_lru_add_list(zone, page, to); | ||
1163 | } | 1125 | } |
1164 | 1126 | ||
1165 | /* | 1127 | /* |
@@ -1252,24 +1214,6 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) | |||
1252 | return (active > inactive); | 1214 | return (active > inactive); |
1253 | } | 1215 | } |
1254 | 1216 | ||
1255 | struct zone_reclaim_stat * | ||
1256 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | ||
1257 | { | ||
1258 | struct page_cgroup *pc; | ||
1259 | struct mem_cgroup_per_zone *mz; | ||
1260 | |||
1261 | if (mem_cgroup_disabled()) | ||
1262 | return NULL; | ||
1263 | |||
1264 | pc = lookup_page_cgroup(page); | ||
1265 | if (!PageCgroupUsed(pc)) | ||
1266 | return NULL; | ||
1267 | /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | ||
1268 | smp_rmb(); | ||
1269 | mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); | ||
1270 | return &mz->lruvec.reclaim_stat; | ||
1271 | } | ||
1272 | |||
1273 | #define mem_cgroup_from_res_counter(counter, member) \ | 1217 | #define mem_cgroup_from_res_counter(counter, member) \ |
1274 | container_of(counter, struct mem_cgroup, member) | 1218 | container_of(counter, struct mem_cgroup, member) |
1275 | 1219 | ||
@@ -2509,6 +2453,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2509 | { | 2453 | { |
2510 | struct page_cgroup *pc = lookup_page_cgroup(page); | 2454 | struct page_cgroup *pc = lookup_page_cgroup(page); |
2511 | struct zone *uninitialized_var(zone); | 2455 | struct zone *uninitialized_var(zone); |
2456 | struct lruvec *lruvec; | ||
2512 | bool was_on_lru = false; | 2457 | bool was_on_lru = false; |
2513 | bool anon; | 2458 | bool anon; |
2514 | 2459 | ||
@@ -2531,8 +2476,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2531 | zone = page_zone(page); | 2476 | zone = page_zone(page); |
2532 | spin_lock_irq(&zone->lru_lock); | 2477 | spin_lock_irq(&zone->lru_lock); |
2533 | if (PageLRU(page)) { | 2478 | if (PageLRU(page)) { |
2479 | lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); | ||
2534 | ClearPageLRU(page); | 2480 | ClearPageLRU(page); |
2535 | del_page_from_lru_list(zone, page, page_lru(page)); | 2481 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
2536 | was_on_lru = true; | 2482 | was_on_lru = true; |
2537 | } | 2483 | } |
2538 | } | 2484 | } |
@@ -2550,9 +2496,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2550 | 2496 | ||
2551 | if (lrucare) { | 2497 | if (lrucare) { |
2552 | if (was_on_lru) { | 2498 | if (was_on_lru) { |
2499 | lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); | ||
2553 | VM_BUG_ON(PageLRU(page)); | 2500 | VM_BUG_ON(PageLRU(page)); |
2554 | SetPageLRU(page); | 2501 | SetPageLRU(page); |
2555 | add_page_to_lru_list(zone, page, page_lru(page)); | 2502 | add_page_to_lru_list(page, lruvec, page_lru(page)); |
2556 | } | 2503 | } |
2557 | spin_unlock_irq(&zone->lru_lock); | 2504 | spin_unlock_irq(&zone->lru_lock); |
2558 | } | 2505 | } |
@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); | |||
47 | static void __page_cache_release(struct page *page) | 47 | static void __page_cache_release(struct page *page) |
48 | { | 48 | { |
49 | if (PageLRU(page)) { | 49 | if (PageLRU(page)) { |
50 | unsigned long flags; | ||
51 | struct zone *zone = page_zone(page); | 50 | struct zone *zone = page_zone(page); |
51 | struct lruvec *lruvec; | ||
52 | unsigned long flags; | ||
52 | 53 | ||
53 | spin_lock_irqsave(&zone->lru_lock, flags); | 54 | spin_lock_irqsave(&zone->lru_lock, flags); |
55 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
54 | VM_BUG_ON(!PageLRU(page)); | 56 | VM_BUG_ON(!PageLRU(page)); |
55 | __ClearPageLRU(page); | 57 | __ClearPageLRU(page); |
56 | del_page_from_lru_list(zone, page, page_off_lru(page)); | 58 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
57 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 59 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
58 | } | 60 | } |
59 | } | 61 | } |
@@ -235,11 +237,12 @@ void put_pages_list(struct list_head *pages) | |||
235 | EXPORT_SYMBOL(put_pages_list); | 237 | EXPORT_SYMBOL(put_pages_list); |
236 | 238 | ||
237 | static void pagevec_lru_move_fn(struct pagevec *pvec, | 239 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
238 | void (*move_fn)(struct page *page, void *arg), | 240 | void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), |
239 | void *arg) | 241 | void *arg) |
240 | { | 242 | { |
241 | int i; | 243 | int i; |
242 | struct zone *zone = NULL; | 244 | struct zone *zone = NULL; |
245 | struct lruvec *lruvec; | ||
243 | unsigned long flags = 0; | 246 | unsigned long flags = 0; |
244 | 247 | ||
245 | for (i = 0; i < pagevec_count(pvec); i++) { | 248 | for (i = 0; i < pagevec_count(pvec); i++) { |
@@ -253,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, | |||
253 | spin_lock_irqsave(&zone->lru_lock, flags); | 256 | spin_lock_irqsave(&zone->lru_lock, flags); |
254 | } | 257 | } |
255 | 258 | ||
256 | (*move_fn)(page, arg); | 259 | lruvec = mem_cgroup_page_lruvec(page, zone); |
260 | (*move_fn)(page, lruvec, arg); | ||
257 | } | 261 | } |
258 | if (zone) | 262 | if (zone) |
259 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 263 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
@@ -261,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, | |||
261 | pagevec_reinit(pvec); | 265 | pagevec_reinit(pvec); |
262 | } | 266 | } |
263 | 267 | ||
264 | static void pagevec_move_tail_fn(struct page *page, void *arg) | 268 | static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, |
269 | void *arg) | ||
265 | { | 270 | { |
266 | int *pgmoved = arg; | 271 | int *pgmoved = arg; |
267 | 272 | ||
268 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 273 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
269 | enum lru_list lru = page_lru_base_type(page); | 274 | enum lru_list lru = page_lru_base_type(page); |
270 | struct lruvec *lruvec; | ||
271 | |||
272 | lruvec = mem_cgroup_lru_move_lists(page_zone(page), | ||
273 | page, lru, lru); | ||
274 | list_move_tail(&page->lru, &lruvec->lists[lru]); | 275 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
275 | (*pgmoved)++; | 276 | (*pgmoved)++; |
276 | } | 277 | } |
@@ -309,35 +310,30 @@ void rotate_reclaimable_page(struct page *page) | |||
309 | } | 310 | } |
310 | } | 311 | } |
311 | 312 | ||
312 | static void update_page_reclaim_stat(struct zone *zone, struct page *page, | 313 | static void update_page_reclaim_stat(struct lruvec *lruvec, |
313 | int file, int rotated) | 314 | int file, int rotated) |
314 | { | 315 | { |
315 | struct zone_reclaim_stat *reclaim_stat; | 316 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; |
316 | |||
317 | reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); | ||
318 | if (!reclaim_stat) | ||
319 | reclaim_stat = &zone->lruvec.reclaim_stat; | ||
320 | 317 | ||
321 | reclaim_stat->recent_scanned[file]++; | 318 | reclaim_stat->recent_scanned[file]++; |
322 | if (rotated) | 319 | if (rotated) |
323 | reclaim_stat->recent_rotated[file]++; | 320 | reclaim_stat->recent_rotated[file]++; |
324 | } | 321 | } |
325 | 322 | ||
326 | static void __activate_page(struct page *page, void *arg) | 323 | static void __activate_page(struct page *page, struct lruvec *lruvec, |
324 | void *arg) | ||
327 | { | 325 | { |
328 | struct zone *zone = page_zone(page); | ||
329 | |||
330 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 326 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
331 | int file = page_is_file_cache(page); | 327 | int file = page_is_file_cache(page); |
332 | int lru = page_lru_base_type(page); | 328 | int lru = page_lru_base_type(page); |
333 | del_page_from_lru_list(zone, page, lru); | ||
334 | 329 | ||
330 | del_page_from_lru_list(page, lruvec, lru); | ||
335 | SetPageActive(page); | 331 | SetPageActive(page); |
336 | lru += LRU_ACTIVE; | 332 | lru += LRU_ACTIVE; |
337 | add_page_to_lru_list(zone, page, lru); | 333 | add_page_to_lru_list(page, lruvec, lru); |
338 | __count_vm_event(PGACTIVATE); | ||
339 | 334 | ||
340 | update_page_reclaim_stat(zone, page, file, 1); | 335 | __count_vm_event(PGACTIVATE); |
336 | update_page_reclaim_stat(lruvec, file, 1); | ||
341 | } | 337 | } |
342 | } | 338 | } |
343 | 339 | ||
@@ -374,7 +370,7 @@ void activate_page(struct page *page) | |||
374 | struct zone *zone = page_zone(page); | 370 | struct zone *zone = page_zone(page); |
375 | 371 | ||
376 | spin_lock_irq(&zone->lru_lock); | 372 | spin_lock_irq(&zone->lru_lock); |
377 | __activate_page(page, NULL); | 373 | __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); |
378 | spin_unlock_irq(&zone->lru_lock); | 374 | spin_unlock_irq(&zone->lru_lock); |
379 | } | 375 | } |
380 | #endif | 376 | #endif |
@@ -441,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru) | |||
441 | void add_page_to_unevictable_list(struct page *page) | 437 | void add_page_to_unevictable_list(struct page *page) |
442 | { | 438 | { |
443 | struct zone *zone = page_zone(page); | 439 | struct zone *zone = page_zone(page); |
440 | struct lruvec *lruvec; | ||
444 | 441 | ||
445 | spin_lock_irq(&zone->lru_lock); | 442 | spin_lock_irq(&zone->lru_lock); |
443 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
446 | SetPageUnevictable(page); | 444 | SetPageUnevictable(page); |
447 | SetPageLRU(page); | 445 | SetPageLRU(page); |
448 | add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); | 446 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); |
449 | spin_unlock_irq(&zone->lru_lock); | 447 | spin_unlock_irq(&zone->lru_lock); |
450 | } | 448 | } |
451 | 449 | ||
@@ -470,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page) | |||
470 | * be write it out by flusher threads as this is much more effective | 468 | * be write it out by flusher threads as this is much more effective |
471 | * than the single-page writeout from reclaim. | 469 | * than the single-page writeout from reclaim. |
472 | */ | 470 | */ |
473 | static void lru_deactivate_fn(struct page *page, void *arg) | 471 | static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, |
472 | void *arg) | ||
474 | { | 473 | { |
475 | int lru, file; | 474 | int lru, file; |
476 | bool active; | 475 | bool active; |
477 | struct zone *zone = page_zone(page); | ||
478 | 476 | ||
479 | if (!PageLRU(page)) | 477 | if (!PageLRU(page)) |
480 | return; | 478 | return; |
@@ -487,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg) | |||
487 | return; | 485 | return; |
488 | 486 | ||
489 | active = PageActive(page); | 487 | active = PageActive(page); |
490 | |||
491 | file = page_is_file_cache(page); | 488 | file = page_is_file_cache(page); |
492 | lru = page_lru_base_type(page); | 489 | lru = page_lru_base_type(page); |
493 | del_page_from_lru_list(zone, page, lru + active); | 490 | |
491 | del_page_from_lru_list(page, lruvec, lru + active); | ||
494 | ClearPageActive(page); | 492 | ClearPageActive(page); |
495 | ClearPageReferenced(page); | 493 | ClearPageReferenced(page); |
496 | add_page_to_lru_list(zone, page, lru); | 494 | add_page_to_lru_list(page, lruvec, lru); |
497 | 495 | ||
498 | if (PageWriteback(page) || PageDirty(page)) { | 496 | if (PageWriteback(page) || PageDirty(page)) { |
499 | /* | 497 | /* |
@@ -503,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg) | |||
503 | */ | 501 | */ |
504 | SetPageReclaim(page); | 502 | SetPageReclaim(page); |
505 | } else { | 503 | } else { |
506 | struct lruvec *lruvec; | ||
507 | /* | 504 | /* |
508 | * The page's writeback ends up during pagevec | 505 | * The page's writeback ends up during pagevec |
509 | * We moves tha page into tail of inactive. | 506 | * We moves tha page into tail of inactive. |
510 | */ | 507 | */ |
511 | lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); | ||
512 | list_move_tail(&page->lru, &lruvec->lists[lru]); | 508 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
513 | __count_vm_event(PGROTATED); | 509 | __count_vm_event(PGROTATED); |
514 | } | 510 | } |
515 | 511 | ||
516 | if (active) | 512 | if (active) |
517 | __count_vm_event(PGDEACTIVATE); | 513 | __count_vm_event(PGDEACTIVATE); |
518 | update_page_reclaim_stat(zone, page, file, 0); | 514 | update_page_reclaim_stat(lruvec, file, 0); |
519 | } | 515 | } |
520 | 516 | ||
521 | /* | 517 | /* |
@@ -615,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold) | |||
615 | int i; | 611 | int i; |
616 | LIST_HEAD(pages_to_free); | 612 | LIST_HEAD(pages_to_free); |
617 | struct zone *zone = NULL; | 613 | struct zone *zone = NULL; |
614 | struct lruvec *lruvec; | ||
618 | unsigned long uninitialized_var(flags); | 615 | unsigned long uninitialized_var(flags); |
619 | 616 | ||
620 | for (i = 0; i < nr; i++) { | 617 | for (i = 0; i < nr; i++) { |
@@ -642,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold) | |||
642 | zone = pagezone; | 639 | zone = pagezone; |
643 | spin_lock_irqsave(&zone->lru_lock, flags); | 640 | spin_lock_irqsave(&zone->lru_lock, flags); |
644 | } | 641 | } |
642 | |||
643 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
645 | VM_BUG_ON(!PageLRU(page)); | 644 | VM_BUG_ON(!PageLRU(page)); |
646 | __ClearPageLRU(page); | 645 | __ClearPageLRU(page); |
647 | del_page_from_lru_list(zone, page, page_off_lru(page)); | 646 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
648 | } | 647 | } |
649 | 648 | ||
650 | list_add(&page->lru, &pages_to_free); | 649 | list_add(&page->lru, &pages_to_free); |
@@ -676,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release); | |||
676 | 675 | ||
677 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 676 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
678 | /* used by __split_huge_page_refcount() */ | 677 | /* used by __split_huge_page_refcount() */ |
679 | void lru_add_page_tail(struct zone* zone, | 678 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
680 | struct page *page, struct page *page_tail) | 679 | struct lruvec *lruvec) |
681 | { | 680 | { |
682 | int uninitialized_var(active); | 681 | int uninitialized_var(active); |
683 | enum lru_list lru; | 682 | enum lru_list lru; |
@@ -686,7 +685,8 @@ void lru_add_page_tail(struct zone* zone, | |||
686 | VM_BUG_ON(!PageHead(page)); | 685 | VM_BUG_ON(!PageHead(page)); |
687 | VM_BUG_ON(PageCompound(page_tail)); | 686 | VM_BUG_ON(PageCompound(page_tail)); |
688 | VM_BUG_ON(PageLRU(page_tail)); | 687 | VM_BUG_ON(PageLRU(page_tail)); |
689 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); | 688 | VM_BUG_ON(NR_CPUS != 1 && |
689 | !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); | ||
690 | 690 | ||
691 | SetPageLRU(page_tail); | 691 | SetPageLRU(page_tail); |
692 | 692 | ||
@@ -715,20 +715,20 @@ void lru_add_page_tail(struct zone* zone, | |||
715 | * Use the standard add function to put page_tail on the list, | 715 | * Use the standard add function to put page_tail on the list, |
716 | * but then correct its position so they all end up in order. | 716 | * but then correct its position so they all end up in order. |
717 | */ | 717 | */ |
718 | add_page_to_lru_list(zone, page_tail, lru); | 718 | add_page_to_lru_list(page_tail, lruvec, lru); |
719 | list_head = page_tail->lru.prev; | 719 | list_head = page_tail->lru.prev; |
720 | list_move_tail(&page_tail->lru, list_head); | 720 | list_move_tail(&page_tail->lru, list_head); |
721 | } | 721 | } |
722 | 722 | ||
723 | if (!PageUnevictable(page)) | 723 | if (!PageUnevictable(page)) |
724 | update_page_reclaim_stat(zone, page_tail, file, active); | 724 | update_page_reclaim_stat(lruvec, file, active); |
725 | } | 725 | } |
726 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 726 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
727 | 727 | ||
728 | static void __pagevec_lru_add_fn(struct page *page, void *arg) | 728 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
729 | void *arg) | ||
729 | { | 730 | { |
730 | enum lru_list lru = (enum lru_list)arg; | 731 | enum lru_list lru = (enum lru_list)arg; |
731 | struct zone *zone = page_zone(page); | ||
732 | int file = is_file_lru(lru); | 732 | int file = is_file_lru(lru); |
733 | int active = is_active_lru(lru); | 733 | int active = is_active_lru(lru); |
734 | 734 | ||
@@ -739,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg) | |||
739 | SetPageLRU(page); | 739 | SetPageLRU(page); |
740 | if (active) | 740 | if (active) |
741 | SetPageActive(page); | 741 | SetPageActive(page); |
742 | add_page_to_lru_list(zone, page, lru); | 742 | add_page_to_lru_list(page, lruvec, lru); |
743 | update_page_reclaim_stat(zone, page, file, active); | 743 | update_page_reclaim_stat(lruvec, file, active); |
744 | } | 744 | } |
745 | 745 | ||
746 | /* | 746 | /* |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 05d439dc1af9..eeb3bc9d1d36 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1031,6 +1031,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1031 | 1031 | ||
1032 | for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { | 1032 | for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { |
1033 | struct page *page; | 1033 | struct page *page; |
1034 | int nr_pages; | ||
1034 | 1035 | ||
1035 | page = lru_to_page(src); | 1036 | page = lru_to_page(src); |
1036 | prefetchw_prev_lru_page(page, src, flags); | 1037 | prefetchw_prev_lru_page(page, src, flags); |
@@ -1039,9 +1040,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1039 | 1040 | ||
1040 | switch (__isolate_lru_page(page, mode)) { | 1041 | switch (__isolate_lru_page(page, mode)) { |
1041 | case 0: | 1042 | case 0: |
1042 | mem_cgroup_lru_del_list(page, lru); | 1043 | nr_pages = hpage_nr_pages(page); |
1044 | mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); | ||
1043 | list_move(&page->lru, dst); | 1045 | list_move(&page->lru, dst); |
1044 | nr_taken += hpage_nr_pages(page); | 1046 | nr_taken += nr_pages; |
1045 | break; | 1047 | break; |
1046 | 1048 | ||
1047 | case -EBUSY: | 1049 | case -EBUSY: |
@@ -1093,15 +1095,16 @@ int isolate_lru_page(struct page *page) | |||
1093 | 1095 | ||
1094 | if (PageLRU(page)) { | 1096 | if (PageLRU(page)) { |
1095 | struct zone *zone = page_zone(page); | 1097 | struct zone *zone = page_zone(page); |
1098 | struct lruvec *lruvec; | ||
1096 | 1099 | ||
1097 | spin_lock_irq(&zone->lru_lock); | 1100 | spin_lock_irq(&zone->lru_lock); |
1101 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
1098 | if (PageLRU(page)) { | 1102 | if (PageLRU(page)) { |
1099 | int lru = page_lru(page); | 1103 | int lru = page_lru(page); |
1100 | ret = 0; | ||
1101 | get_page(page); | 1104 | get_page(page); |
1102 | ClearPageLRU(page); | 1105 | ClearPageLRU(page); |
1103 | 1106 | del_page_from_lru_list(page, lruvec, lru); | |
1104 | del_page_from_lru_list(zone, page, lru); | 1107 | ret = 0; |
1105 | } | 1108 | } |
1106 | spin_unlock_irq(&zone->lru_lock); | 1109 | spin_unlock_irq(&zone->lru_lock); |
1107 | } | 1110 | } |
@@ -1155,9 +1158,13 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) | |||
1155 | spin_lock_irq(&zone->lru_lock); | 1158 | spin_lock_irq(&zone->lru_lock); |
1156 | continue; | 1159 | continue; |
1157 | } | 1160 | } |
1161 | |||
1162 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
1163 | |||
1158 | SetPageLRU(page); | 1164 | SetPageLRU(page); |
1159 | lru = page_lru(page); | 1165 | lru = page_lru(page); |
1160 | add_page_to_lru_list(zone, page, lru); | 1166 | add_page_to_lru_list(page, lruvec, lru); |
1167 | |||
1161 | if (is_active_lru(lru)) { | 1168 | if (is_active_lru(lru)) { |
1162 | int file = is_file_lru(lru); | 1169 | int file = is_file_lru(lru); |
1163 | int numpages = hpage_nr_pages(page); | 1170 | int numpages = hpage_nr_pages(page); |
@@ -1166,7 +1173,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) | |||
1166 | if (put_page_testzero(page)) { | 1173 | if (put_page_testzero(page)) { |
1167 | __ClearPageLRU(page); | 1174 | __ClearPageLRU(page); |
1168 | __ClearPageActive(page); | 1175 | __ClearPageActive(page); |
1169 | del_page_from_lru_list(zone, page, lru); | 1176 | del_page_from_lru_list(page, lruvec, lru); |
1170 | 1177 | ||
1171 | if (unlikely(PageCompound(page))) { | 1178 | if (unlikely(PageCompound(page))) { |
1172 | spin_unlock_irq(&zone->lru_lock); | 1179 | spin_unlock_irq(&zone->lru_lock); |
@@ -1314,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1314 | * But we had to alter page->flags anyway. | 1321 | * But we had to alter page->flags anyway. |
1315 | */ | 1322 | */ |
1316 | 1323 | ||
1317 | static void move_active_pages_to_lru(struct zone *zone, | 1324 | static void move_active_pages_to_lru(struct lruvec *lruvec, |
1318 | struct list_head *list, | 1325 | struct list_head *list, |
1319 | struct list_head *pages_to_free, | 1326 | struct list_head *pages_to_free, |
1320 | enum lru_list lru) | 1327 | enum lru_list lru) |
1321 | { | 1328 | { |
1329 | struct zone *zone = lruvec_zone(lruvec); | ||
1322 | unsigned long pgmoved = 0; | 1330 | unsigned long pgmoved = 0; |
1323 | struct page *page; | 1331 | struct page *page; |
1332 | int nr_pages; | ||
1324 | 1333 | ||
1325 | while (!list_empty(list)) { | 1334 | while (!list_empty(list)) { |
1326 | struct lruvec *lruvec; | ||
1327 | |||
1328 | page = lru_to_page(list); | 1335 | page = lru_to_page(list); |
1336 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
1329 | 1337 | ||
1330 | VM_BUG_ON(PageLRU(page)); | 1338 | VM_BUG_ON(PageLRU(page)); |
1331 | SetPageLRU(page); | 1339 | SetPageLRU(page); |
1332 | 1340 | ||
1333 | lruvec = mem_cgroup_lru_add_list(zone, page, lru); | 1341 | nr_pages = hpage_nr_pages(page); |
1342 | mem_cgroup_update_lru_size(lruvec, lru, nr_pages); | ||
1334 | list_move(&page->lru, &lruvec->lists[lru]); | 1343 | list_move(&page->lru, &lruvec->lists[lru]); |
1335 | pgmoved += hpage_nr_pages(page); | 1344 | pgmoved += nr_pages; |
1336 | 1345 | ||
1337 | if (put_page_testzero(page)) { | 1346 | if (put_page_testzero(page)) { |
1338 | __ClearPageLRU(page); | 1347 | __ClearPageLRU(page); |
1339 | __ClearPageActive(page); | 1348 | __ClearPageActive(page); |
1340 | del_page_from_lru_list(zone, page, lru); | 1349 | del_page_from_lru_list(page, lruvec, lru); |
1341 | 1350 | ||
1342 | if (unlikely(PageCompound(page))) { | 1351 | if (unlikely(PageCompound(page))) { |
1343 | spin_unlock_irq(&zone->lru_lock); | 1352 | spin_unlock_irq(&zone->lru_lock); |
@@ -1443,8 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
1443 | */ | 1452 | */ |
1444 | reclaim_stat->recent_rotated[file] += nr_rotated; | 1453 | reclaim_stat->recent_rotated[file] += nr_rotated; |
1445 | 1454 | ||
1446 | move_active_pages_to_lru(zone, &l_active, &l_hold, lru); | 1455 | move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); |
1447 | move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE); | 1456 | move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); |
1448 | __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); | 1457 | __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); |
1449 | spin_unlock_irq(&zone->lru_lock); | 1458 | spin_unlock_irq(&zone->lru_lock); |
1450 | 1459 | ||
@@ -3237,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) | |||
3237 | zone = pagezone; | 3246 | zone = pagezone; |
3238 | spin_lock_irq(&zone->lru_lock); | 3247 | spin_lock_irq(&zone->lru_lock); |
3239 | } | 3248 | } |
3249 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
3240 | 3250 | ||
3241 | if (!PageLRU(page) || !PageUnevictable(page)) | 3251 | if (!PageLRU(page) || !PageUnevictable(page)) |
3242 | continue; | 3252 | continue; |
@@ -3246,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) | |||
3246 | 3256 | ||
3247 | VM_BUG_ON(PageActive(page)); | 3257 | VM_BUG_ON(PageActive(page)); |
3248 | ClearPageUnevictable(page); | 3258 | ClearPageUnevictable(page); |
3249 | __dec_zone_state(zone, NR_UNEVICTABLE); | 3259 | del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); |
3250 | lruvec = mem_cgroup_lru_move_lists(zone, page, | 3260 | add_page_to_lru_list(page, lruvec, lru); |
3251 | LRU_UNEVICTABLE, lru); | ||
3252 | list_move(&page->lru, &lruvec->lists[lru]); | ||
3253 | __inc_zone_state(zone, NR_INACTIVE_ANON + lru); | ||
3254 | pgrescued++; | 3261 | pgrescued++; |
3255 | } | 3262 | } |
3256 | } | 3263 | } |