diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-04-29 04:00:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 11:06:10 -0400 |
commit | 3eae90c3cdd4e762d0f4f5e939c98780fccded57 (patch) | |
tree | d5b6f138e8464827b7b882fc6acea3916aca8957 /mm | |
parent | 29f2a4dac856e9433a502b05b40e8e90385d8e27 (diff) |
memcg: remove redundant function calls
remove_list/add_list uses page_cgroup_zoneinfo() in it.
So, it's called twice before and after lock.
mz = page_cgroup_zoneinfo();
lock();
mz = page_cgroup_zoneinfo();
....
unlock();
And address of mz never changes.
This is not good. This patch fixes this behavior.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f891876efee1..395fd8e4166a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -275,10 +275,10 @@ static void unlock_page_cgroup(struct page *page) | |||
275 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 275 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
276 | } | 276 | } |
277 | 277 | ||
278 | static void __mem_cgroup_remove_list(struct page_cgroup *pc) | 278 | static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, |
279 | struct page_cgroup *pc) | ||
279 | { | 280 | { |
280 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 281 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; |
281 | struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | ||
282 | 282 | ||
283 | if (from) | 283 | if (from) |
284 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 284 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; |
@@ -289,10 +289,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc) | |||
289 | list_del_init(&pc->lru); | 289 | list_del_init(&pc->lru); |
290 | } | 290 | } |
291 | 291 | ||
292 | static void __mem_cgroup_add_list(struct page_cgroup *pc) | 292 | static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, |
293 | struct page_cgroup *pc) | ||
293 | { | 294 | { |
294 | int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 295 | int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; |
295 | struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | ||
296 | 296 | ||
297 | if (!to) { | 297 | if (!to) { |
298 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | 298 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; |
@@ -618,7 +618,7 @@ retry: | |||
618 | 618 | ||
619 | mz = page_cgroup_zoneinfo(pc); | 619 | mz = page_cgroup_zoneinfo(pc); |
620 | spin_lock_irqsave(&mz->lru_lock, flags); | 620 | spin_lock_irqsave(&mz->lru_lock, flags); |
621 | __mem_cgroup_add_list(pc); | 621 | __mem_cgroup_add_list(mz, pc); |
622 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 622 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
623 | 623 | ||
624 | unlock_page_cgroup(page); | 624 | unlock_page_cgroup(page); |
@@ -674,7 +674,7 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
674 | if (--(pc->ref_cnt) == 0) { | 674 | if (--(pc->ref_cnt) == 0) { |
675 | mz = page_cgroup_zoneinfo(pc); | 675 | mz = page_cgroup_zoneinfo(pc); |
676 | spin_lock_irqsave(&mz->lru_lock, flags); | 676 | spin_lock_irqsave(&mz->lru_lock, flags); |
677 | __mem_cgroup_remove_list(pc); | 677 | __mem_cgroup_remove_list(mz, pc); |
678 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 678 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
679 | 679 | ||
680 | page_assign_page_cgroup(page, NULL); | 680 | page_assign_page_cgroup(page, NULL); |
@@ -736,7 +736,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |||
736 | 736 | ||
737 | mz = page_cgroup_zoneinfo(pc); | 737 | mz = page_cgroup_zoneinfo(pc); |
738 | spin_lock_irqsave(&mz->lru_lock, flags); | 738 | spin_lock_irqsave(&mz->lru_lock, flags); |
739 | __mem_cgroup_remove_list(pc); | 739 | __mem_cgroup_remove_list(mz, pc); |
740 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 740 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
741 | 741 | ||
742 | page_assign_page_cgroup(page, NULL); | 742 | page_assign_page_cgroup(page, NULL); |
@@ -748,7 +748,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |||
748 | 748 | ||
749 | mz = page_cgroup_zoneinfo(pc); | 749 | mz = page_cgroup_zoneinfo(pc); |
750 | spin_lock_irqsave(&mz->lru_lock, flags); | 750 | spin_lock_irqsave(&mz->lru_lock, flags); |
751 | __mem_cgroup_add_list(pc); | 751 | __mem_cgroup_add_list(mz, pc); |
752 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 752 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
753 | 753 | ||
754 | unlock_page_cgroup(newpage); | 754 | unlock_page_cgroup(newpage); |