diff options
-rw-r--r-- | mm/memcontrol.c | 30 | ||||
-rw-r--r-- | mm/vmscan.c | 12 |
2 files changed, 21 insertions, 21 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 46717d6c62b7..c4524458b7d0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -279,7 +279,7 @@ struct mem_cgroup { | |||
279 | * Should we move charges of a task when a task is moved into this | 279 | * Should we move charges of a task when a task is moved into this |
280 | * mem_cgroup ? And what type of charges should we move ? | 280 | * mem_cgroup ? And what type of charges should we move ? |
281 | */ | 281 | */ |
282 | unsigned long move_charge_at_immigrate; | 282 | unsigned long move_charge_at_immigrate; |
283 | /* | 283 | /* |
284 | * set > 0 if pages under this cgroup are moving to other cgroup. | 284 | * set > 0 if pages under this cgroup are moving to other cgroup. |
285 | */ | 285 | */ |
@@ -324,7 +324,7 @@ struct mem_cgroup { | |||
324 | 324 | ||
325 | /* | 325 | /* |
326 | * If true then this group has increased parents' children_in_excess | 326 | * If true then this group has increased parents' children_in_excess |
327 | * when it got over the soft limit. | 327 | * when it got over the soft limit. |
328 | * When a group falls bellow the soft limit, parents' children_in_excess | 328 | * When a group falls bellow the soft limit, parents' children_in_excess |
329 | * is decreased and soft_contributed changed to false. | 329 | * is decreased and soft_contributed changed to false. |
330 | */ | 330 | */ |
@@ -835,12 +835,12 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, | |||
835 | } | 835 | } |
836 | 836 | ||
837 | /* | 837 | /* |
838 | * Called from rate-limitted memcg_check_events when enough | 838 | * Called from rate-limited memcg_check_events when enough |
839 | * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure | 839 | * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure |
840 | * that all the parents up the hierarchy will be noticed that this group | 840 | * that all the parents up the hierarchy will be notified that this group |
841 | * is in excess or that it is not in excess anymore. mmecg->soft_contributed | 841 | * is in excess or that it is not in excess anymore. mmecg->soft_contributed |
842 | * makes the transition a single action whenever the state flips from one to | 842 | * makes the transition a single action whenever the state flips from one to |
843 | * other. | 843 | * the other. |
844 | */ | 844 | */ |
845 | static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg) | 845 | static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg) |
846 | { | 846 | { |
@@ -1881,8 +1881,8 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) | |||
1881 | /* | 1881 | /* |
1882 | * A group is eligible for the soft limit reclaim under the given root | 1882 | * A group is eligible for the soft limit reclaim under the given root |
1883 | * hierarchy if | 1883 | * hierarchy if |
1884 | * a) it is over its soft limit | 1884 | * a) it is over its soft limit |
1885 | * b) any parent up the hierarchy is over its soft limit | 1885 | * b) any parent up the hierarchy is over its soft limit |
1886 | * | 1886 | * |
1887 | * If the given group doesn't have any children over the limit then it | 1887 | * If the given group doesn't have any children over the limit then it |
1888 | * doesn't make any sense to iterate its subtree. | 1888 | * doesn't make any sense to iterate its subtree. |
@@ -1904,7 +1904,7 @@ mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, | |||
1904 | * If any parent up to the root in the hierarchy is over its soft limit | 1904 | * If any parent up to the root in the hierarchy is over its soft limit |
1905 | * then we have to obey and reclaim from this group as well. | 1905 | * then we have to obey and reclaim from this group as well. |
1906 | */ | 1906 | */ |
1907 | while((parent = parent_mem_cgroup(parent))) { | 1907 | while ((parent = parent_mem_cgroup(parent))) { |
1908 | if (res_counter_soft_limit_excess(&parent->res)) | 1908 | if (res_counter_soft_limit_excess(&parent->res)) |
1909 | return VISIT; | 1909 | return VISIT; |
1910 | if (parent == root) | 1910 | if (parent == root) |
@@ -2309,7 +2309,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) | |||
2309 | flush_work(&stock->work); | 2309 | flush_work(&stock->work); |
2310 | } | 2310 | } |
2311 | out: | 2311 | out: |
2312 | put_online_cpus(); | 2312 | put_online_cpus(); |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | /* | 2315 | /* |
@@ -2741,7 +2741,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2741 | * is accessed after testing USED bit. To make pc->mem_cgroup visible | 2741 | * is accessed after testing USED bit. To make pc->mem_cgroup visible |
2742 | * before USED bit, we need memory barrier here. | 2742 | * before USED bit, we need memory barrier here. |
2743 | * See mem_cgroup_add_lru_list(), etc. | 2743 | * See mem_cgroup_add_lru_list(), etc. |
2744 | */ | 2744 | */ |
2745 | smp_wmb(); | 2745 | smp_wmb(); |
2746 | SetPageCgroupUsed(pc); | 2746 | SetPageCgroupUsed(pc); |
2747 | 2747 | ||
@@ -3483,9 +3483,9 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) | |||
3483 | * the page allocator. Therefore, the following sequence when backed by | 3483 | * the page allocator. Therefore, the following sequence when backed by |
3484 | * the SLUB allocator: | 3484 | * the SLUB allocator: |
3485 | * | 3485 | * |
3486 | * memcg_stop_kmem_account(); | 3486 | * memcg_stop_kmem_account(); |
3487 | * kmalloc(<large_number>) | 3487 | * kmalloc(<large_number>) |
3488 | * memcg_resume_kmem_account(); | 3488 | * memcg_resume_kmem_account(); |
3489 | * | 3489 | * |
3490 | * would effectively ignore the fact that we should skip accounting, | 3490 | * would effectively ignore the fact that we should skip accounting, |
3491 | * since it will drive us directly to this function without passing | 3491 | * since it will drive us directly to this function without passing |
@@ -4514,7 +4514,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | |||
4514 | MEM_CGROUP_RECLAIM_SHRINK); | 4514 | MEM_CGROUP_RECLAIM_SHRINK); |
4515 | curusage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4515 | curusage = res_counter_read_u64(&memcg->res, RES_USAGE); |
4516 | /* Usage is reduced ? */ | 4516 | /* Usage is reduced ? */ |
4517 | if (curusage >= oldusage) | 4517 | if (curusage >= oldusage) |
4518 | retry_count--; | 4518 | retry_count--; |
4519 | else | 4519 | else |
4520 | oldusage = curusage; | 4520 | oldusage = curusage; |
@@ -4535,7 +4535,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | |||
4535 | int enlarge = 0; | 4535 | int enlarge = 0; |
4536 | 4536 | ||
4537 | /* see mem_cgroup_resize_res_limit */ | 4537 | /* see mem_cgroup_resize_res_limit */ |
4538 | retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; | 4538 | retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; |
4539 | oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4539 | oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); |
4540 | while (retry_count) { | 4540 | while (retry_count) { |
4541 | if (signal_pending(current)) { | 4541 | if (signal_pending(current)) { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index fa91c20fe4b7..76d1d5eaeec3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2205,12 +2205,12 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc) | |||
2205 | 2205 | ||
2206 | scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim); | 2206 | scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim); |
2207 | /* | 2207 | /* |
2208 | * memcg iterator might race with other reclaimer or start from | 2208 | * memcg iterator might race with other reclaimer or start from |
2209 | * a incomplete tree walk so the tree walk in __shrink_zone | 2209 | * a incomplete tree walk so the tree walk in __shrink_zone |
2210 | * might have missed groups that are above the soft limit. Try | 2210 | * might have missed groups that are above the soft limit. Try |
2211 | * another loop to catch up with others. Do it just once to | 2211 | * another loop to catch up with others. Do it just once to |
2212 | * prevent from reclaim latencies when other reclaimers always | 2212 | * prevent from reclaim latencies when other reclaimers always |
2213 | * preempt this one. | 2213 | * preempt this one. |
2214 | */ | 2214 | */ |
2215 | if (do_soft_reclaim && !scanned_groups) | 2215 | if (do_soft_reclaim && !scanned_groups) |
2216 | __shrink_zone(zone, sc, do_soft_reclaim); | 2216 | __shrink_zone(zone, sc, do_soft_reclaim); |