diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2014-10-09 18:28:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:59 -0400 |
commit | 3fbe724424fb104aaca9973389b4a9df428c3e2a (patch) | |
tree | e7ea929957ae225de95624b6b59b85f8c34684e8 /mm/memcontrol.c | |
parent | aabfb57296e3dd9761e47736ec69305c95461d7d (diff) |
mm: memcontrol: simplify detecting when the memory+swap limit is hit
When attempting to charge pages, we first charge the memory counter and
then the memory+swap counter. If one of the counters is at its limit, we
enter reclaim, but if it's the memory+swap counter, reclaim shouldn't swap
because that wouldn't change the situation. However, if the counters have
the same limits, we never get to the memory+swap limit. To know whether
reclaim should swap or not, there is a state flag that indicates whether
the limits are equal and whether hitting the memory limit implies hitting
the memory+swap limit.
Just try the memory+swap counter first.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Dave Hansen <dave@sr71.net>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 47 |
1 files changed, 13 insertions, 34 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fff511e25bb2..9cda99dfac4f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -318,9 +318,6 @@ struct mem_cgroup { | |||
318 | /* OOM-Killer disable */ | 318 | /* OOM-Killer disable */ |
319 | int oom_kill_disable; | 319 | int oom_kill_disable; |
320 | 320 | ||
321 | /* set when res.limit == memsw.limit */ | ||
322 | bool memsw_is_minimum; | ||
323 | |||
324 | /* protect arrays of thresholds */ | 321 | /* protect arrays of thresholds */ |
325 | struct mutex thresholds_lock; | 322 | struct mutex thresholds_lock; |
326 | 323 | ||
@@ -1818,8 +1815,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, | |||
1818 | 1815 | ||
1819 | if (flags & MEM_CGROUP_RECLAIM_NOSWAP) | 1816 | if (flags & MEM_CGROUP_RECLAIM_NOSWAP) |
1820 | noswap = true; | 1817 | noswap = true; |
1821 | if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) | ||
1822 | noswap = true; | ||
1823 | 1818 | ||
1824 | for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { | 1819 | for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { |
1825 | if (loop) | 1820 | if (loop) |
@@ -2557,16 +2552,17 @@ retry: | |||
2557 | goto done; | 2552 | goto done; |
2558 | 2553 | ||
2559 | size = batch * PAGE_SIZE; | 2554 | size = batch * PAGE_SIZE; |
2560 | if (!res_counter_charge(&memcg->res, size, &fail_res)) { | 2555 | if (!do_swap_account || |
2561 | if (!do_swap_account) | 2556 | !res_counter_charge(&memcg->memsw, size, &fail_res)) { |
2557 | if (!res_counter_charge(&memcg->res, size, &fail_res)) | ||
2562 | goto done_restock; | 2558 | goto done_restock; |
2563 | if (!res_counter_charge(&memcg->memsw, size, &fail_res)) | 2559 | if (do_swap_account) |
2564 | goto done_restock; | 2560 | res_counter_uncharge(&memcg->memsw, size); |
2565 | res_counter_uncharge(&memcg->res, size); | 2561 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); |
2562 | } else { | ||
2566 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); | 2563 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); |
2567 | flags |= MEM_CGROUP_RECLAIM_NOSWAP; | 2564 | flags |= MEM_CGROUP_RECLAIM_NOSWAP; |
2568 | } else | 2565 | } |
2569 | mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); | ||
2570 | 2566 | ||
2571 | if (batch > nr_pages) { | 2567 | if (batch > nr_pages) { |
2572 | batch = nr_pages; | 2568 | batch = nr_pages; |
@@ -3629,7 +3625,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | |||
3629 | unsigned long long val) | 3625 | unsigned long long val) |
3630 | { | 3626 | { |
3631 | int retry_count; | 3627 | int retry_count; |
3632 | u64 memswlimit, memlimit; | ||
3633 | int ret = 0; | 3628 | int ret = 0; |
3634 | int children = mem_cgroup_count_children(memcg); | 3629 | int children = mem_cgroup_count_children(memcg); |
3635 | u64 curusage, oldusage; | 3630 | u64 curusage, oldusage; |
@@ -3656,24 +3651,16 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | |||
3656 | * We have to guarantee memcg->res.limit <= memcg->memsw.limit. | 3651 | * We have to guarantee memcg->res.limit <= memcg->memsw.limit. |
3657 | */ | 3652 | */ |
3658 | mutex_lock(&set_limit_mutex); | 3653 | mutex_lock(&set_limit_mutex); |
3659 | memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 3654 | if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) { |
3660 | if (memswlimit < val) { | ||
3661 | ret = -EINVAL; | 3655 | ret = -EINVAL; |
3662 | mutex_unlock(&set_limit_mutex); | 3656 | mutex_unlock(&set_limit_mutex); |
3663 | break; | 3657 | break; |
3664 | } | 3658 | } |
3665 | 3659 | ||
3666 | memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 3660 | if (res_counter_read_u64(&memcg->res, RES_LIMIT) < val) |
3667 | if (memlimit < val) | ||
3668 | enlarge = 1; | 3661 | enlarge = 1; |
3669 | 3662 | ||
3670 | ret = res_counter_set_limit(&memcg->res, val); | 3663 | ret = res_counter_set_limit(&memcg->res, val); |
3671 | if (!ret) { | ||
3672 | if (memswlimit == val) | ||
3673 | memcg->memsw_is_minimum = true; | ||
3674 | else | ||
3675 | memcg->memsw_is_minimum = false; | ||
3676 | } | ||
3677 | mutex_unlock(&set_limit_mutex); | 3664 | mutex_unlock(&set_limit_mutex); |
3678 | 3665 | ||
3679 | if (!ret) | 3666 | if (!ret) |
@@ -3698,7 +3685,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | |||
3698 | unsigned long long val) | 3685 | unsigned long long val) |
3699 | { | 3686 | { |
3700 | int retry_count; | 3687 | int retry_count; |
3701 | u64 memlimit, memswlimit, oldusage, curusage; | 3688 | u64 oldusage, curusage; |
3702 | int children = mem_cgroup_count_children(memcg); | 3689 | int children = mem_cgroup_count_children(memcg); |
3703 | int ret = -EBUSY; | 3690 | int ret = -EBUSY; |
3704 | int enlarge = 0; | 3691 | int enlarge = 0; |
@@ -3717,22 +3704,14 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | |||
3717 | * We have to guarantee memcg->res.limit <= memcg->memsw.limit. | 3704 | * We have to guarantee memcg->res.limit <= memcg->memsw.limit. |
3718 | */ | 3705 | */ |
3719 | mutex_lock(&set_limit_mutex); | 3706 | mutex_lock(&set_limit_mutex); |
3720 | memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 3707 | if (res_counter_read_u64(&memcg->res, RES_LIMIT) > val) { |
3721 | if (memlimit > val) { | ||
3722 | ret = -EINVAL; | 3708 | ret = -EINVAL; |
3723 | mutex_unlock(&set_limit_mutex); | 3709 | mutex_unlock(&set_limit_mutex); |
3724 | break; | 3710 | break; |
3725 | } | 3711 | } |
3726 | memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 3712 | if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) |
3727 | if (memswlimit < val) | ||
3728 | enlarge = 1; | 3713 | enlarge = 1; |
3729 | ret = res_counter_set_limit(&memcg->memsw, val); | 3714 | ret = res_counter_set_limit(&memcg->memsw, val); |
3730 | if (!ret) { | ||
3731 | if (memlimit == val) | ||
3732 | memcg->memsw_is_minimum = true; | ||
3733 | else | ||
3734 | memcg->memsw_is_minimum = false; | ||
3735 | } | ||
3736 | mutex_unlock(&set_limit_mutex); | 3715 | mutex_unlock(&set_limit_mutex); |
3737 | 3716 | ||
3738 | if (!ret) | 3717 | if (!ret) |