aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h105
1 files changed, 21 insertions, 84 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..b3e7a667e03c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
53 unsigned int generation; 53 unsigned int generation;
54}; 54};
55 55
56enum mem_cgroup_filter_t {
57 VISIT, /* visit current node */
58 SKIP, /* skip the current node and continue traversal */
59 SKIP_TREE, /* skip the whole subtree and continue traversal */
60};
61
62/*
63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
64 * iterate through the hierarchy tree. Each tree element is checked by the
65 * predicate before it is returned by the iterator. If a filter returns
66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the
67 * next node down the hierarchy or the next node that doesn't belong under the
68 * memcg's subtree).
69 */
70typedef enum mem_cgroup_filter_t
71(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
72
73#ifdef CONFIG_MEMCG 56#ifdef CONFIG_MEMCG
74/* 57/*
75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
137extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 120extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
138 struct page *oldpage, struct page *newpage, bool migration_ok); 121 struct page *oldpage, struct page *newpage, bool migration_ok);
139 122
140struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 123struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
141 struct mem_cgroup *prev, 124 struct mem_cgroup *,
142 struct mem_cgroup_reclaim_cookie *reclaim, 125 struct mem_cgroup_reclaim_cookie *);
143 mem_cgroup_iter_filter cond);
144
145static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
146 struct mem_cgroup *prev,
147 struct mem_cgroup_reclaim_cookie *reclaim)
148{
149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
150}
151
152void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 126void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
153 127
154/* 128/*
@@ -163,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
163extern void mem_cgroup_replace_page_cache(struct page *oldpage, 137extern void mem_cgroup_replace_page_cache(struct page *oldpage,
164 struct page *newpage); 138 struct page *newpage);
165 139
166/** 140static inline void mem_cgroup_oom_enable(void)
167 * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
168 * @new: true to enable, false to disable
169 *
170 * Toggle whether a failed memcg charge should invoke the OOM killer
171 * or just return -ENOMEM. Returns the previous toggle state.
172 *
173 * NOTE: Any path that enables the OOM killer before charging must
174 * call mem_cgroup_oom_synchronize() afterward to finalize the
175 * OOM handling and clean up.
176 */
177static inline bool mem_cgroup_toggle_oom(bool new)
178{ 141{
179 bool old; 142 WARN_ON(current->memcg_oom.may_oom);
180 143 current->memcg_oom.may_oom = 1;
181 old = current->memcg_oom.may_oom;
182 current->memcg_oom.may_oom = new;
183
184 return old;
185} 144}
186 145
187static inline void mem_cgroup_enable_oom(void) 146static inline void mem_cgroup_oom_disable(void)
188{ 147{
189 bool old = mem_cgroup_toggle_oom(true); 148 WARN_ON(!current->memcg_oom.may_oom);
190 149 current->memcg_oom.may_oom = 0;
191 WARN_ON(old == true);
192}
193
194static inline void mem_cgroup_disable_oom(void)
195{
196 bool old = mem_cgroup_toggle_oom(false);
197
198 WARN_ON(old == false);
199} 150}
200 151
201static inline bool task_in_memcg_oom(struct task_struct *p) 152static inline bool task_in_memcg_oom(struct task_struct *p)
202{ 153{
203 return p->memcg_oom.in_memcg_oom; 154 return p->memcg_oom.memcg;
204} 155}
205 156
206bool mem_cgroup_oom_synchronize(void); 157bool mem_cgroup_oom_synchronize(bool wait);
207 158
208#ifdef CONFIG_MEMCG_SWAP 159#ifdef CONFIG_MEMCG_SWAP
209extern int do_swap_account; 160extern int do_swap_account;
@@ -260,9 +211,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
260 mem_cgroup_update_page_stat(page, idx, -1); 211 mem_cgroup_update_page_stat(page, idx, -1);
261} 212}
262 213
263enum mem_cgroup_filter_t 214unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
264mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 215 gfp_t gfp_mask,
265 struct mem_cgroup *root); 216 unsigned long *total_scanned);
266 217
267void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 218void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
268static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 219static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +327,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
376 struct page *oldpage, struct page *newpage, bool migration_ok) 327 struct page *oldpage, struct page *newpage, bool migration_ok)
377{ 328{
378} 329}
379static inline struct mem_cgroup *
380mem_cgroup_iter_cond(struct mem_cgroup *root,
381 struct mem_cgroup *prev,
382 struct mem_cgroup_reclaim_cookie *reclaim,
383 mem_cgroup_iter_filter cond)
384{
385 /* first call must return non-NULL, second return NULL */
386 return (struct mem_cgroup *)(unsigned long)!prev;
387}
388 330
389static inline struct mem_cgroup * 331static inline struct mem_cgroup *
390mem_cgroup_iter(struct mem_cgroup *root, 332mem_cgroup_iter(struct mem_cgroup *root,
@@ -437,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
437{ 379{
438} 380}
439 381
440static inline bool mem_cgroup_toggle_oom(bool new) 382static inline void mem_cgroup_oom_enable(void)
441{ 383{
442 return false;
443} 384}
444 385
445static inline void mem_cgroup_enable_oom(void) 386static inline void mem_cgroup_oom_disable(void)
446{
447}
448
449static inline void mem_cgroup_disable_oom(void)
450{ 387{
451} 388}
452 389
@@ -455,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
455 return false; 392 return false;
456} 393}
457 394
458static inline bool mem_cgroup_oom_synchronize(void) 395static inline bool mem_cgroup_oom_synchronize(bool wait)
459{ 396{
460 return false; 397 return false;
461} 398}
@@ -471,11 +408,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
471} 408}
472 409
473static inline 410static inline
474enum mem_cgroup_filter_t 411unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
475mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 412 gfp_t gfp_mask,
476 struct mem_cgroup *root) 413 unsigned long *total_scanned)
477{ 414{
478 return VISIT; 415 return 0;
479} 416}
480 417
481static inline void mem_cgroup_split_huge_fixup(struct page *head) 418static inline void mem_cgroup_split_huge_fixup(struct page *head)