aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2013-09-24 18:27:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-24 20:00:25 -0400
commit30361e51cae7a4df3fec89f935a450a6fe6f16fa (patch)
treeb2f51177f08a9e4ef5211fc8f8399c5a1a2db431 /mm
parent3120055e869f2a208480f238680d097eec8f0e02 (diff)
revert "memcg: track children in soft limit excess to improve soft limit"
Revert commit 7d910c054be4 ("memcg: track children in soft limit excess to improve soft limit") I merged this prematurely - Michal and Johannes still disagree about the overall design direction and the future remains unclear. Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c71
1 files changed, 0 insertions, 71 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5cf7726764cc..916892c2b8e0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -124,7 +124,6 @@ static const char * const mem_cgroup_lru_names[] = {
124 */ 124 */
125enum mem_cgroup_events_target { 125enum mem_cgroup_events_target {
126 MEM_CGROUP_TARGET_THRESH, 126 MEM_CGROUP_TARGET_THRESH,
127 MEM_CGROUP_TARGET_SOFTLIMIT,
128 MEM_CGROUP_TARGET_NUMAINFO, 127 MEM_CGROUP_TARGET_NUMAINFO,
129 MEM_CGROUP_NTARGETS, 128 MEM_CGROUP_NTARGETS,
130}; 129};
@@ -303,22 +302,6 @@ struct mem_cgroup {
303 atomic_t numainfo_events; 302 atomic_t numainfo_events;
304 atomic_t numainfo_updating; 303 atomic_t numainfo_updating;
305#endif 304#endif
306 /*
307 * Protects soft_contributed transitions.
308 * See mem_cgroup_update_soft_limit
309 */
310 spinlock_t soft_lock;
311
312 /*
313 * If true then this group has increased parents' children_in_excess
314 * when it got over the soft limit.
315 * When a group falls bellow the soft limit, parents' children_in_excess
316 * is decreased and soft_contributed changed to false.
317 */
318 bool soft_contributed;
319
320 /* Number of children that are in soft limit excess */
321 atomic_t children_in_excess;
322 305
323 struct mem_cgroup_per_node *nodeinfo[0]; 306 struct mem_cgroup_per_node *nodeinfo[0];
324 /* WARNING: nodeinfo must be the last member here */ 307 /* WARNING: nodeinfo must be the last member here */
@@ -806,9 +789,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
806 case MEM_CGROUP_TARGET_THRESH: 789 case MEM_CGROUP_TARGET_THRESH:
807 next = val + THRESHOLDS_EVENTS_TARGET; 790 next = val + THRESHOLDS_EVENTS_TARGET;
808 break; 791 break;
809 case MEM_CGROUP_TARGET_SOFTLIMIT:
810 next = val + SOFTLIMIT_EVENTS_TARGET;
811 break;
812 case MEM_CGROUP_TARGET_NUMAINFO: 792 case MEM_CGROUP_TARGET_NUMAINFO:
813 next = val + NUMAINFO_EVENTS_TARGET; 793 next = val + NUMAINFO_EVENTS_TARGET;
814 break; 794 break;
@@ -822,42 +802,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
822} 802}
823 803
824/* 804/*
825 * Called from rate-limited memcg_check_events when enough
826 * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
827 * that all the parents up the hierarchy will be notified that this group
828 * is in excess or that it is not in excess anymore. mmecg->soft_contributed
829 * makes the transition a single action whenever the state flips from one to
830 * the other.
831 */
832static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
833{
834 unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
835 struct mem_cgroup *parent = memcg;
836 int delta = 0;
837
838 spin_lock(&memcg->soft_lock);
839 if (excess) {
840 if (!memcg->soft_contributed) {
841 delta = 1;
842 memcg->soft_contributed = true;
843 }
844 } else {
845 if (memcg->soft_contributed) {
846 delta = -1;
847 memcg->soft_contributed = false;
848 }
849 }
850
851 /*
852 * Necessary to update all ancestors when hierarchy is used
853 * because their event counter is not touched.
854 */
855 while (delta && (parent = parent_mem_cgroup(parent)))
856 atomic_add(delta, &parent->children_in_excess);
857 spin_unlock(&memcg->soft_lock);
858}
859
860/*
861 * Check events in order. 805 * Check events in order.
862 * 806 *
863 */ 807 */
@@ -867,11 +811,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
867 /* threshold event is triggered in finer grain than soft limit */ 811 /* threshold event is triggered in finer grain than soft limit */
868 if (unlikely(mem_cgroup_event_ratelimit(memcg, 812 if (unlikely(mem_cgroup_event_ratelimit(memcg,
869 MEM_CGROUP_TARGET_THRESH))) { 813 MEM_CGROUP_TARGET_THRESH))) {
870 bool do_softlimit;
871 bool do_numainfo __maybe_unused; 814 bool do_numainfo __maybe_unused;
872 815
873 do_softlimit = mem_cgroup_event_ratelimit(memcg,
874 MEM_CGROUP_TARGET_SOFTLIMIT);
875#if MAX_NUMNODES > 1 816#if MAX_NUMNODES > 1
876 do_numainfo = mem_cgroup_event_ratelimit(memcg, 817 do_numainfo = mem_cgroup_event_ratelimit(memcg,
877 MEM_CGROUP_TARGET_NUMAINFO); 818 MEM_CGROUP_TARGET_NUMAINFO);
@@ -879,8 +820,6 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
879 preempt_enable(); 820 preempt_enable();
880 821
881 mem_cgroup_threshold(memcg); 822 mem_cgroup_threshold(memcg);
882 if (unlikely(do_softlimit))
883 mem_cgroup_update_soft_limit(memcg);
884#if MAX_NUMNODES > 1 823#if MAX_NUMNODES > 1
885 if (unlikely(do_numainfo)) 824 if (unlikely(do_numainfo))
886 atomic_inc(&memcg->numainfo_events); 825 atomic_inc(&memcg->numainfo_events);
@@ -1864,9 +1803,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1864 * hierarchy if 1803 * hierarchy if
1865 * a) it is over its soft limit 1804 * a) it is over its soft limit
1866 * b) any parent up the hierarchy is over its soft limit 1805 * b) any parent up the hierarchy is over its soft limit
1867 *
1868 * If the given group doesn't have any children over the limit then it
1869 * doesn't make any sense to iterate its subtree.
1870 */ 1806 */
1871enum mem_cgroup_filter_t 1807enum mem_cgroup_filter_t
1872mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 1808mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
@@ -1888,8 +1824,6 @@ mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
1888 break; 1824 break;
1889 } 1825 }
1890 1826
1891 if (!atomic_read(&memcg->children_in_excess))
1892 return SKIP_TREE;
1893 return SKIP; 1827 return SKIP;
1894} 1828}
1895 1829
@@ -6021,7 +5955,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6021 mutex_init(&memcg->thresholds_lock); 5955 mutex_init(&memcg->thresholds_lock);
6022 spin_lock_init(&memcg->move_lock); 5956 spin_lock_init(&memcg->move_lock);
6023 vmpressure_init(&memcg->vmpressure); 5957 vmpressure_init(&memcg->vmpressure);
6024 spin_lock_init(&memcg->soft_lock);
6025 5958
6026 return &memcg->css; 5959 return &memcg->css;
6027 5960
@@ -6099,10 +6032,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6099 6032
6100 mem_cgroup_invalidate_reclaim_iterators(memcg); 6033 mem_cgroup_invalidate_reclaim_iterators(memcg);
6101 mem_cgroup_reparent_charges(memcg); 6034 mem_cgroup_reparent_charges(memcg);
6102 if (memcg->soft_contributed) {
6103 while ((memcg = parent_mem_cgroup(memcg)))
6104 atomic_dec(&memcg->children_in_excess);
6105 }
6106 mem_cgroup_destroy_all_caches(memcg); 6035 mem_cgroup_destroy_all_caches(memcg);
6107 vmpressure_cleanup(&memcg->vmpressure); 6036 vmpressure_cleanup(&memcg->vmpressure);
6108} 6037}