diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-11 09:34:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-11 09:34:05 -0500 |
commit | d19b85db9d5c44a4c21dcb10d6fbadaa4425ab2a (patch) | |
tree | 250be7a5a29069f7d1f4524fa45ab0e988833025 /include/linux/memcontrol.h | |
parent | 490dea45d00f01847ebebd007685d564aaf2cd98 (diff) | |
parent | c59765042f53a79a7a65585042ff463b69cb248c (diff) |
Merge commit 'v2.6.29-rc1' into timers/urgent
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r-- | include/linux/memcontrol.h | 154 |
1 files changed, 136 insertions, 18 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1fbe14d39521..326f45c86530 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -19,22 +19,45 @@ | |||
19 | 19 | ||
20 | #ifndef _LINUX_MEMCONTROL_H | 20 | #ifndef _LINUX_MEMCONTROL_H |
21 | #define _LINUX_MEMCONTROL_H | 21 | #define _LINUX_MEMCONTROL_H |
22 | 22 | #include <linux/cgroup.h> | |
23 | struct mem_cgroup; | 23 | struct mem_cgroup; |
24 | struct page_cgroup; | 24 | struct page_cgroup; |
25 | struct page; | 25 | struct page; |
26 | struct mm_struct; | 26 | struct mm_struct; |
27 | 27 | ||
28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
29 | /* | ||
30 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | ||
31 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | ||
32 | * alloc memory but reclaims memory from all available zones. So, "where I want | ||
33 | * memory from" bits of gfp_mask has no meaning. So any bits of that field is | ||
34 | * available but adding a rule is better. charge functions' gfp_mask should | ||
35 | * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous | ||
36 | * codes. | ||
37 | * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) | ||
38 | */ | ||
29 | 39 | ||
30 | extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | 40 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, |
31 | gfp_t gfp_mask); | 41 | gfp_t gfp_mask); |
42 | /* for swap handling */ | ||
43 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | ||
44 | struct page *page, gfp_t mask, struct mem_cgroup **ptr); | ||
45 | extern void mem_cgroup_commit_charge_swapin(struct page *page, | ||
46 | struct mem_cgroup *ptr); | ||
47 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); | ||
48 | |||
32 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 49 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
33 | gfp_t gfp_mask); | 50 | gfp_t gfp_mask); |
34 | extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); | 51 | extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); |
52 | extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); | ||
53 | extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); | ||
54 | extern void mem_cgroup_del_lru(struct page *page); | ||
55 | extern void mem_cgroup_move_lists(struct page *page, | ||
56 | enum lru_list from, enum lru_list to); | ||
35 | extern void mem_cgroup_uncharge_page(struct page *page); | 57 | extern void mem_cgroup_uncharge_page(struct page *page); |
36 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 58 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
37 | extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); | 59 | extern int mem_cgroup_shrink_usage(struct page *page, |
60 | struct mm_struct *mm, gfp_t gfp_mask); | ||
38 | 61 | ||
39 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 62 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
40 | struct list_head *dst, | 63 | struct list_head *dst, |
@@ -47,12 +70,20 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | |||
47 | 70 | ||
48 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 71 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
49 | 72 | ||
50 | #define mm_match_cgroup(mm, cgroup) \ | 73 | static inline |
51 | ((cgroup) == mem_cgroup_from_task((mm)->owner)) | 74 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) |
75 | { | ||
76 | struct mem_cgroup *mem; | ||
77 | rcu_read_lock(); | ||
78 | mem = mem_cgroup_from_task((mm)->owner); | ||
79 | rcu_read_unlock(); | ||
80 | return cgroup == mem; | ||
81 | } | ||
52 | 82 | ||
53 | extern int | 83 | extern int |
54 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage); | 84 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); |
55 | extern void mem_cgroup_end_migration(struct page *page); | 85 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, |
86 | struct page *oldpage, struct page *newpage); | ||
56 | 87 | ||
57 | /* | 88 | /* |
58 | * For memory reclaim. | 89 | * For memory reclaim. |
@@ -65,13 +96,32 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | |||
65 | int priority); | 96 | int priority); |
66 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | 97 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, |
67 | int priority); | 98 | int priority); |
99 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); | ||
100 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | ||
101 | struct zone *zone, | ||
102 | enum lru_list lru); | ||
103 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | ||
104 | struct zone *zone); | ||
105 | struct zone_reclaim_stat* | ||
106 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | ||
68 | 107 | ||
69 | extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, | 108 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
70 | int priority, enum lru_list lru); | 109 | extern int do_swap_account; |
110 | #endif | ||
71 | 111 | ||
112 | static inline bool mem_cgroup_disabled(void) | ||
113 | { | ||
114 | if (mem_cgroup_subsys.disabled) | ||
115 | return true; | ||
116 | return false; | ||
117 | } | ||
118 | |||
119 | extern bool mem_cgroup_oom_called(struct task_struct *task); | ||
72 | 120 | ||
73 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 121 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
74 | static inline int mem_cgroup_charge(struct page *page, | 122 | struct mem_cgroup; |
123 | |||
124 | static inline int mem_cgroup_newpage_charge(struct page *page, | ||
75 | struct mm_struct *mm, gfp_t gfp_mask) | 125 | struct mm_struct *mm, gfp_t gfp_mask) |
76 | { | 126 | { |
77 | return 0; | 127 | return 0; |
@@ -83,6 +133,21 @@ static inline int mem_cgroup_cache_charge(struct page *page, | |||
83 | return 0; | 133 | return 0; |
84 | } | 134 | } |
85 | 135 | ||
136 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | ||
137 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) | ||
138 | { | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | ||
143 | struct mem_cgroup *ptr) | ||
144 | { | ||
145 | } | ||
146 | |||
147 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) | ||
148 | { | ||
149 | } | ||
150 | |||
86 | static inline void mem_cgroup_uncharge_page(struct page *page) | 151 | static inline void mem_cgroup_uncharge_page(struct page *page) |
87 | { | 152 | { |
88 | } | 153 | } |
@@ -91,12 +156,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) | |||
91 | { | 156 | { |
92 | } | 157 | } |
93 | 158 | ||
94 | static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | 159 | static inline int mem_cgroup_shrink_usage(struct page *page, |
160 | struct mm_struct *mm, gfp_t gfp_mask) | ||
95 | { | 161 | { |
96 | return 0; | 162 | return 0; |
97 | } | 163 | } |
98 | 164 | ||
99 | static inline void mem_cgroup_move_lists(struct page *page, bool active) | 165 | static inline void mem_cgroup_add_lru_list(struct page *page, int lru) |
166 | { | ||
167 | } | ||
168 | |||
169 | static inline void mem_cgroup_del_lru_list(struct page *page, int lru) | ||
170 | { | ||
171 | return ; | ||
172 | } | ||
173 | |||
174 | static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) | ||
175 | { | ||
176 | return ; | ||
177 | } | ||
178 | |||
179 | static inline void mem_cgroup_del_lru(struct page *page) | ||
180 | { | ||
181 | return ; | ||
182 | } | ||
183 | |||
184 | static inline void | ||
185 | mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) | ||
100 | { | 186 | { |
101 | } | 187 | } |
102 | 188 | ||
@@ -112,12 +198,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task, | |||
112 | } | 198 | } |
113 | 199 | ||
114 | static inline int | 200 | static inline int |
115 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage) | 201 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) |
116 | { | 202 | { |
117 | return 0; | 203 | return 0; |
118 | } | 204 | } |
119 | 205 | ||
120 | static inline void mem_cgroup_end_migration(struct page *page) | 206 | static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, |
207 | struct page *oldpage, | ||
208 | struct page *newpage) | ||
121 | { | 209 | { |
122 | } | 210 | } |
123 | 211 | ||
@@ -146,12 +234,42 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | |||
146 | { | 234 | { |
147 | } | 235 | } |
148 | 236 | ||
149 | static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, | 237 | static inline bool mem_cgroup_disabled(void) |
150 | struct zone *zone, int priority, | 238 | { |
151 | enum lru_list lru) | 239 | return true; |
240 | } | ||
241 | |||
242 | static inline bool mem_cgroup_oom_called(struct task_struct *task) | ||
243 | { | ||
244 | return false; | ||
245 | } | ||
246 | |||
247 | static inline int | ||
248 | mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) | ||
249 | { | ||
250 | return 1; | ||
251 | } | ||
252 | |||
253 | static inline unsigned long | ||
254 | mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, | ||
255 | enum lru_list lru) | ||
152 | { | 256 | { |
153 | return 0; | 257 | return 0; |
154 | } | 258 | } |
259 | |||
260 | |||
261 | static inline struct zone_reclaim_stat* | ||
262 | mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) | ||
263 | { | ||
264 | return NULL; | ||
265 | } | ||
266 | |||
267 | static inline struct zone_reclaim_stat* | ||
268 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | ||
269 | { | ||
270 | return NULL; | ||
271 | } | ||
272 | |||
155 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 273 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
156 | 274 | ||
157 | #endif /* _LINUX_MEMCONTROL_H */ | 275 | #endif /* _LINUX_MEMCONTROL_H */ |