aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2013-09-24 18:27:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-24 20:00:26 -0400
commit694fbc0fe78518d06efa63910bf4ecee660e7852 (patch)
tree895244c4cc010759ce5c29333bee3cddc8c852d2 /include
parent30361e51cae7a4df3fec89f935a450a6fe6f16fa (diff)
revert "memcg: enhance memcg iterator to support predicates"
Revert commit de57780dc659 ("memcg: enhance memcg iterator to support predicates") I merged this prematurely - Michal and Johannes still disagree about the overall design direction and the future remains unclear. Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/memcontrol.h49
1 files changed, 6 insertions, 43 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..ef2b9bd7fafa 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
53 unsigned int generation; 53 unsigned int generation;
54}; 54};
55 55
56enum mem_cgroup_filter_t {
57 VISIT, /* visit current node */
58 SKIP, /* skip the current node and continue traversal */
59 SKIP_TREE, /* skip the whole subtree and continue traversal */
60};
61
62/*
63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
64 * iterate through the hierarchy tree. Each tree element is checked by the
65 * predicate before it is returned by the iterator. If a filter returns
66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the
67 * next node down the hierarchy or the next node that doesn't belong under the
68 * memcg's subtree).
69 */
70typedef enum mem_cgroup_filter_t
71(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
72
73#ifdef CONFIG_MEMCG 56#ifdef CONFIG_MEMCG
74/* 57/*
75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
137extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 120extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
138 struct page *oldpage, struct page *newpage, bool migration_ok); 121 struct page *oldpage, struct page *newpage, bool migration_ok);
139 122
140struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 123struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
141 struct mem_cgroup *prev, 124 struct mem_cgroup *,
142 struct mem_cgroup_reclaim_cookie *reclaim, 125 struct mem_cgroup_reclaim_cookie *);
143 mem_cgroup_iter_filter cond);
144
145static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
146 struct mem_cgroup *prev,
147 struct mem_cgroup_reclaim_cookie *reclaim)
148{
149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
150}
151
152void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 126void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
153 127
154/* 128/*
@@ -260,8 +234,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
260 mem_cgroup_update_page_stat(page, idx, -1); 234 mem_cgroup_update_page_stat(page, idx, -1);
261} 235}
262 236
263enum mem_cgroup_filter_t 237bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
264mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
265 struct mem_cgroup *root); 238 struct mem_cgroup *root);
266 239
267void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 240void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
@@ -376,15 +349,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
376 struct page *oldpage, struct page *newpage, bool migration_ok) 349 struct page *oldpage, struct page *newpage, bool migration_ok)
377{ 350{
378} 351}
379static inline struct mem_cgroup *
380mem_cgroup_iter_cond(struct mem_cgroup *root,
381 struct mem_cgroup *prev,
382 struct mem_cgroup_reclaim_cookie *reclaim,
383 mem_cgroup_iter_filter cond)
384{
385 /* first call must return non-NULL, second return NULL */
386 return (struct mem_cgroup *)(unsigned long)!prev;
387}
388 352
389static inline struct mem_cgroup * 353static inline struct mem_cgroup *
390mem_cgroup_iter(struct mem_cgroup *root, 354mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +435,10 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
471} 435}
472 436
473static inline 437static inline
474enum mem_cgroup_filter_t 438bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
475mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
476 struct mem_cgroup *root) 439 struct mem_cgroup *root)
477{ 440{
478 return VISIT; 441 return false;
479} 442}
480 443
481static inline void mem_cgroup_split_huge_fixup(struct page *head) 444static inline void mem_cgroup_split_huge_fixup(struct page *head)