aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c82
1 files changed, 55 insertions, 27 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bd9052a5d3ad..ddffc74cdebe 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -35,6 +35,7 @@
35#include <linux/limits.h> 35#include <linux/limits.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/rbtree.h> 37#include <linux/rbtree.h>
38#include <linux/shmem_fs.h>
38#include <linux/slab.h> 39#include <linux/slab.h>
39#include <linux/swap.h> 40#include <linux/swap.h>
40#include <linux/swapops.h> 41#include <linux/swapops.h>
@@ -359,7 +360,7 @@ enum charge_type {
359static void mem_cgroup_get(struct mem_cgroup *mem); 360static void mem_cgroup_get(struct mem_cgroup *mem);
360static void mem_cgroup_put(struct mem_cgroup *mem); 361static void mem_cgroup_put(struct mem_cgroup *mem);
361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 362static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
362static void drain_all_stock_async(void); 363static void drain_all_stock_async(struct mem_cgroup *mem);
363 364
364static struct mem_cgroup_per_zone * 365static struct mem_cgroup_per_zone *
365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 366mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -735,7 +736,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
735 struct mem_cgroup, css); 736 struct mem_cgroup, css);
736} 737}
737 738
738static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 739struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
739{ 740{
740 struct mem_cgroup *mem = NULL; 741 struct mem_cgroup *mem = NULL;
741 742
@@ -1663,15 +1664,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1664 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664 1665
1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1666 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1666 if (root_mem->memsw_is_minimum) 1667 if (!check_soft && root_mem->memsw_is_minimum)
1667 noswap = true; 1668 noswap = true;
1668 1669
1669 while (1) { 1670 while (1) {
1670 victim = mem_cgroup_select_victim(root_mem); 1671 victim = mem_cgroup_select_victim(root_mem);
1671 if (victim == root_mem) { 1672 if (victim == root_mem) {
1672 loop++; 1673 loop++;
1673 if (loop >= 1) 1674 /*
1674 drain_all_stock_async(); 1675 * We are not draining per cpu cached charges during
1676 * soft limit reclaim because global reclaim doesn't
1677 * care about charges. It tries to free some memory and
1678 * charges will not give any.
1679 */
1680 if (!check_soft && loop >= 1)
1681 drain_all_stock_async(root_mem);
1675 if (loop >= 2) { 1682 if (loop >= 2) {
1676 /* 1683 /*
1677 * If we have not been able to reclaim 1684 * If we have not been able to reclaim
@@ -1934,9 +1941,11 @@ struct memcg_stock_pcp {
1934 struct mem_cgroup *cached; /* this never be root cgroup */ 1941 struct mem_cgroup *cached; /* this never be root cgroup */
1935 unsigned int nr_pages; 1942 unsigned int nr_pages;
1936 struct work_struct work; 1943 struct work_struct work;
1944 unsigned long flags;
1945#define FLUSHING_CACHED_CHARGE (0)
1937}; 1946};
1938static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1947static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1939static atomic_t memcg_drain_count; 1948static DEFINE_MUTEX(percpu_charge_mutex);
1940 1949
1941/* 1950/*
1942 * Try to consume stocked charge on this cpu. If success, one page is consumed 1951 * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1993,7 @@ static void drain_local_stock(struct work_struct *dummy)
1984{ 1993{
1985 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1994 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1986 drain_stock(stock); 1995 drain_stock(stock);
1996 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1987} 1997}
1988 1998
1989/* 1999/*
@@ -2008,26 +2018,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2008 * expects some charges will be back to res_counter later but cannot wait for 2018 * expects some charges will be back to res_counter later but cannot wait for
2009 * it. 2019 * it.
2010 */ 2020 */
2011static void drain_all_stock_async(void) 2021static void drain_all_stock_async(struct mem_cgroup *root_mem)
2012{ 2022{
2013 int cpu; 2023 int cpu, curcpu;
2014 /* This function is for scheduling "drain" in asynchronous way. 2024 /*
2015 * The result of "drain" is not directly handled by callers. Then, 2025 * If someone calls draining, avoid adding more kworker runs.
2016 * if someone is calling drain, we don't have to call drain more.
2017 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
2018 * there is a race. We just do loose check here.
2019 */ 2026 */
2020 if (atomic_read(&memcg_drain_count)) 2027 if (!mutex_trylock(&percpu_charge_mutex))
2021 return; 2028 return;
2022 /* Notify other cpus that system-wide "drain" is running */ 2029 /* Notify other cpus that system-wide "drain" is running */
2023 atomic_inc(&memcg_drain_count);
2024 get_online_cpus(); 2030 get_online_cpus();
2031 /*
2032 * Get a hint for avoiding draining charges on the current cpu,
2033 * which must be exhausted by our charging. It is not required that
2034 * this be a precise check, so we use raw_smp_processor_id() instead of
2035 * getcpu()/putcpu().
2036 */
2037 curcpu = raw_smp_processor_id();
2025 for_each_online_cpu(cpu) { 2038 for_each_online_cpu(cpu) {
2026 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2039 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2027 schedule_work_on(cpu, &stock->work); 2040 struct mem_cgroup *mem;
2041
2042 if (cpu == curcpu)
2043 continue;
2044
2045 mem = stock->cached;
2046 if (!mem)
2047 continue;
2048 if (mem != root_mem) {
2049 if (!root_mem->use_hierarchy)
2050 continue;
2051 /* check whether "mem" is under tree of "root_mem" */
2052 if (!css_is_ancestor(&mem->css, &root_mem->css))
2053 continue;
2054 }
2055 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2056 schedule_work_on(cpu, &stock->work);
2028 } 2057 }
2029 put_online_cpus(); 2058 put_online_cpus();
2030 atomic_dec(&memcg_drain_count); 2059 mutex_unlock(&percpu_charge_mutex);
2031 /* We don't wait for flush_work */ 2060 /* We don't wait for flush_work */
2032} 2061}
2033 2062
@@ -2035,9 +2064,9 @@ static void drain_all_stock_async(void)
2035static void drain_all_stock_sync(void) 2064static void drain_all_stock_sync(void)
2036{ 2065{
2037 /* called when force_empty is called */ 2066 /* called when force_empty is called */
2038 atomic_inc(&memcg_drain_count); 2067 mutex_lock(&percpu_charge_mutex);
2039 schedule_on_each_cpu(drain_local_stock); 2068 schedule_on_each_cpu(drain_local_stock);
2040 atomic_dec(&memcg_drain_count); 2069 mutex_unlock(&percpu_charge_mutex);
2041} 2070}
2042 2071
2043/* 2072/*
@@ -4640,6 +4669,7 @@ static struct cftype mem_cgroup_files[] = {
4640 { 4669 {
4641 .name = "numa_stat", 4670 .name = "numa_stat",
4642 .open = mem_control_numa_stat_open, 4671 .open = mem_control_numa_stat_open,
4672 .mode = S_IRUGO,
4643 }, 4673 },
4644#endif 4674#endif
4645}; 4675};
@@ -5414,18 +5444,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5414 struct cgroup *old_cont, 5444 struct cgroup *old_cont,
5415 struct task_struct *p) 5445 struct task_struct *p)
5416{ 5446{
5417 struct mm_struct *mm; 5447 struct mm_struct *mm = get_task_mm(p);
5418 5448
5419 if (!mc.to)
5420 /* no need to move charge */
5421 return;
5422
5423 mm = get_task_mm(p);
5424 if (mm) { 5449 if (mm) {
5425 mem_cgroup_move_charge(mm); 5450 if (mc.to)
5451 mem_cgroup_move_charge(mm);
5452 put_swap_token(mm);
5426 mmput(mm); 5453 mmput(mm);
5427 } 5454 }
5428 mem_cgroup_clear_mc(); 5455 if (mc.to)
5456 mem_cgroup_clear_mc();
5429} 5457}
5430#else /* !CONFIG_MMU */ 5458#else /* !CONFIG_MMU */
5431static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5459static int mem_cgroup_can_attach(struct cgroup_subsys *ss,