diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-07-25 04:47:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 13:53:37 -0400 |
commit | c9b0ed51483cc2fc42bb801b6675c4231b0e4634 (patch) | |
tree | 1d322b4bfcaad3296752f6efcab918d1b13b50a3 | |
parent | 69029cd550284e32de13d6dd2f77b723c8a0e444 (diff) |
memcg: helper function for relcaim from shmem.
A new call, mem_cgroup_shrink_usage() is added for shmem handling and
relacing non-standard usage of mem_cgroup_charge/uncharge.
Now, shmem calls mem_cgroup_charge() just for reclaim some pages from
mem_cgroup. In general, shmem is used by some process group and not for
global resource (like file caches). So, it's reasonable to reclaim pages
from mem_cgroup where shmem is mainly used.
[hugh@veritas.com: shmem_getpage release page sooner]
[hugh@veritas.com: mem_cgroup_shrink_usage css_put]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/memcontrol.h | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 26 | ||||
-rw-r--r-- | mm/shmem.c | 11 |
3 files changed, 37 insertions, 7 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b4980b8f048e..fdf3967e1397 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -37,6 +37,8 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
37 | extern void mem_cgroup_uncharge_page(struct page *page); | 37 | extern void mem_cgroup_uncharge_page(struct page *page); |
38 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 38 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
39 | extern void mem_cgroup_move_lists(struct page *page, bool active); | 39 | extern void mem_cgroup_move_lists(struct page *page, bool active); |
40 | extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); | ||
41 | |||
40 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 42 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
41 | struct list_head *dst, | 43 | struct list_head *dst, |
42 | unsigned long *scanned, int order, | 44 | unsigned long *scanned, int order, |
@@ -102,6 +104,11 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) | |||
102 | { | 104 | { |
103 | } | 105 | } |
104 | 106 | ||
107 | static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | ||
108 | { | ||
109 | return 0; | ||
110 | } | ||
111 | |||
105 | static inline void mem_cgroup_move_lists(struct page *page, bool active) | 112 | static inline void mem_cgroup_move_lists(struct page *page, bool active) |
106 | { | 113 | { |
107 | } | 114 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a61706193c31..f46b8615de6c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -781,6 +781,32 @@ void mem_cgroup_end_migration(struct page *newpage) | |||
781 | } | 781 | } |
782 | 782 | ||
783 | /* | 783 | /* |
784 | * A call to try to shrink memory usage under specified resource controller. | ||
785 | * This is typically used for page reclaiming for shmem for reducing side | ||
786 | * effect of page allocation from shmem, which is used by some mem_cgroup. | ||
787 | */ | ||
788 | int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | ||
789 | { | ||
790 | struct mem_cgroup *mem; | ||
791 | int progress = 0; | ||
792 | int retry = MEM_CGROUP_RECLAIM_RETRIES; | ||
793 | |||
794 | rcu_read_lock(); | ||
795 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | ||
796 | css_get(&mem->css); | ||
797 | rcu_read_unlock(); | ||
798 | |||
799 | do { | ||
800 | progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); | ||
801 | } while (!progress && --retry); | ||
802 | |||
803 | css_put(&mem->css); | ||
804 | if (!retry) | ||
805 | return -ENOMEM; | ||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | /* | ||
784 | * This routine traverse page_cgroup in given list and drop them all. | 810 | * This routine traverse page_cgroup in given list and drop them all. |
785 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | 811 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. |
786 | */ | 812 | */ |
diff --git a/mm/shmem.c b/mm/shmem.c index d58305e8a484..f92fea94d037 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1315,17 +1315,14 @@ repeat: | |||
1315 | shmem_swp_unmap(entry); | 1315 | shmem_swp_unmap(entry); |
1316 | spin_unlock(&info->lock); | 1316 | spin_unlock(&info->lock); |
1317 | unlock_page(swappage); | 1317 | unlock_page(swappage); |
1318 | page_cache_release(swappage); | ||
1318 | if (error == -ENOMEM) { | 1319 | if (error == -ENOMEM) { |
1319 | /* allow reclaim from this memory cgroup */ | 1320 | /* allow reclaim from this memory cgroup */ |
1320 | error = mem_cgroup_cache_charge(swappage, | 1321 | error = mem_cgroup_shrink_usage(current->mm, |
1321 | current->mm, gfp & ~__GFP_HIGHMEM); | 1322 | gfp); |
1322 | if (error) { | 1323 | if (error) |
1323 | page_cache_release(swappage); | ||
1324 | goto failed; | 1324 | goto failed; |
1325 | } | ||
1326 | mem_cgroup_uncharge_cache_page(swappage); | ||
1327 | } | 1325 | } |
1328 | page_cache_release(swappage); | ||
1329 | goto repeat; | 1326 | goto repeat; |
1330 | } | 1327 | } |
1331 | } else if (sgp == SGP_READ && !filepage) { | 1328 | } else if (sgp == SGP_READ && !filepage) { |