diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-03-04 17:29:03 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-04 19:35:14 -0500 |
commit | 427d5416f317681498337ab19218d195edea02d6 (patch) | |
tree | 967cfc87ec775ca3c05f95f2339697e796383191 | |
parent | bd845e38c7a7251a95a8f2c38aa7fb87140b771d (diff) |
memcg: move_lists on page not page_cgroup
Each caller of mem_cgroup_move_lists is having to use page_get_page_cgroup:
it's more convenient if it acts upon the page itself not the page_cgroup; and
in a later patch this becomes important to handle within memcontrol.c.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/memcontrol.h | 5 | ||||
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 5 |
4 files changed, 9 insertions, 7 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e4247c83c1c7..56432ff8d4e3 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -36,7 +36,7 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | |||
36 | gfp_t gfp_mask); | 36 | gfp_t gfp_mask); |
37 | extern void mem_cgroup_uncharge(struct page_cgroup *pc); | 37 | extern void mem_cgroup_uncharge(struct page_cgroup *pc); |
38 | extern void mem_cgroup_uncharge_page(struct page *page); | 38 | extern void mem_cgroup_uncharge_page(struct page *page); |
39 | extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active); | 39 | extern void mem_cgroup_move_lists(struct page *page, bool active); |
40 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 40 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
41 | struct list_head *dst, | 41 | struct list_head *dst, |
42 | unsigned long *scanned, int order, | 42 | unsigned long *scanned, int order, |
@@ -106,8 +106,7 @@ static inline void mem_cgroup_uncharge_page(struct page *page) | |||
106 | { | 106 | { |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline void mem_cgroup_move_lists(struct page_cgroup *pc, | 109 | static inline void mem_cgroup_move_lists(struct page *page, bool active) |
110 | bool active) | ||
111 | { | 110 | { |
112 | } | 111 | } |
113 | 112 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 41041c0a6898..afdd406f618a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -407,11 +407,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | |||
407 | /* | 407 | /* |
408 | * This routine assumes that the appropriate zone's lru lock is already held | 408 | * This routine assumes that the appropriate zone's lru lock is already held |
409 | */ | 409 | */ |
410 | void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | 410 | void mem_cgroup_move_lists(struct page *page, bool active) |
411 | { | 411 | { |
412 | struct page_cgroup *pc; | ||
412 | struct mem_cgroup_per_zone *mz; | 413 | struct mem_cgroup_per_zone *mz; |
413 | unsigned long flags; | 414 | unsigned long flags; |
414 | 415 | ||
416 | pc = page_get_page_cgroup(page); | ||
415 | if (!pc) | 417 | if (!pc) |
416 | return; | 418 | return; |
417 | 419 | ||
@@ -176,7 +176,7 @@ void activate_page(struct page *page) | |||
176 | SetPageActive(page); | 176 | SetPageActive(page); |
177 | add_page_to_active_list(zone, page); | 177 | add_page_to_active_list(zone, page); |
178 | __count_vm_event(PGACTIVATE); | 178 | __count_vm_event(PGACTIVATE); |
179 | mem_cgroup_move_lists(page_get_page_cgroup(page), true); | 179 | mem_cgroup_move_lists(page, true); |
180 | } | 180 | } |
181 | spin_unlock_irq(&zone->lru_lock); | 181 | spin_unlock_irq(&zone->lru_lock); |
182 | } | 182 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 106ba10e1ac6..45711585684e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1128,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1128 | ClearPageActive(page); | 1128 | ClearPageActive(page); |
1129 | 1129 | ||
1130 | list_move(&page->lru, &zone->inactive_list); | 1130 | list_move(&page->lru, &zone->inactive_list); |
1131 | mem_cgroup_move_lists(page_get_page_cgroup(page), false); | 1131 | mem_cgroup_move_lists(page, false); |
1132 | pgmoved++; | 1132 | pgmoved++; |
1133 | if (!pagevec_add(&pvec, page)) { | 1133 | if (!pagevec_add(&pvec, page)) { |
1134 | __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); | 1134 | __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); |
@@ -1156,8 +1156,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1156 | VM_BUG_ON(PageLRU(page)); | 1156 | VM_BUG_ON(PageLRU(page)); |
1157 | SetPageLRU(page); | 1157 | SetPageLRU(page); |
1158 | VM_BUG_ON(!PageActive(page)); | 1158 | VM_BUG_ON(!PageActive(page)); |
1159 | |||
1159 | list_move(&page->lru, &zone->active_list); | 1160 | list_move(&page->lru, &zone->active_list); |
1160 | mem_cgroup_move_lists(page_get_page_cgroup(page), true); | 1161 | mem_cgroup_move_lists(page, true); |
1161 | pgmoved++; | 1162 | pgmoved++; |
1162 | if (!pagevec_add(&pvec, page)) { | 1163 | if (!pagevec_add(&pvec, page)) { |
1163 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); | 1164 | __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); |