diff options
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 38 |
1 files changed, 17 insertions, 21 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ec4e7ca4cd..ebd1e86bef1c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1841,29 +1841,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1841 | */ | 1841 | */ |
1842 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 1842 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) |
1843 | { | 1843 | { |
1844 | int lock_count = -1; | ||
1845 | struct mem_cgroup *iter, *failed = NULL; | 1844 | struct mem_cgroup *iter, *failed = NULL; |
1846 | bool cond = true; | 1845 | bool cond = true; |
1847 | 1846 | ||
1848 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { | 1847 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { |
1849 | bool locked = iter->oom_lock; | 1848 | if (iter->oom_lock) { |
1850 | |||
1851 | iter->oom_lock = true; | ||
1852 | if (lock_count == -1) | ||
1853 | lock_count = iter->oom_lock; | ||
1854 | else if (lock_count != locked) { | ||
1855 | /* | 1849 | /* |
1856 | * this subtree of our hierarchy is already locked | 1850 | * this subtree of our hierarchy is already locked |
1857 | * so we cannot give a lock. | 1851 | * so we cannot give a lock. |
1858 | */ | 1852 | */ |
1859 | lock_count = 0; | ||
1860 | failed = iter; | 1853 | failed = iter; |
1861 | cond = false; | 1854 | cond = false; |
1862 | } | 1855 | } else |
1856 | iter->oom_lock = true; | ||
1863 | } | 1857 | } |
1864 | 1858 | ||
1865 | if (!failed) | 1859 | if (!failed) |
1866 | goto done; | 1860 | return true; |
1867 | 1861 | ||
1868 | /* | 1862 | /* |
1869 | * OK, we failed to lock the whole subtree so we have to clean up | 1863 | * OK, we failed to lock the whole subtree so we have to clean up |
@@ -1877,8 +1871,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | |||
1877 | } | 1871 | } |
1878 | iter->oom_lock = false; | 1872 | iter->oom_lock = false; |
1879 | } | 1873 | } |
1880 | done: | 1874 | return false; |
1881 | return lock_count; | ||
1882 | } | 1875 | } |
1883 | 1876 | ||
1884 | /* | 1877 | /* |
@@ -2091,6 +2084,7 @@ struct memcg_stock_pcp { | |||
2091 | #define FLUSHING_CACHED_CHARGE (0) | 2084 | #define FLUSHING_CACHED_CHARGE (0) |
2092 | }; | 2085 | }; |
2093 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); | 2086 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); |
2087 | static DEFINE_MUTEX(percpu_charge_mutex); | ||
2094 | 2088 | ||
2095 | /* | 2089 | /* |
2096 | * Try to consume stocked charge on this cpu. If success, one page is consumed | 2090 | * Try to consume stocked charge on this cpu. If success, one page is consumed |
@@ -2168,13 +2162,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2168 | 2162 | ||
2169 | /* Notify other cpus that system-wide "drain" is running */ | 2163 | /* Notify other cpus that system-wide "drain" is running */ |
2170 | get_online_cpus(); | 2164 | get_online_cpus(); |
2171 | /* | 2165 | curcpu = get_cpu(); |
2172 | * Get a hint for avoiding draining charges on the current cpu, | ||
2173 | * which must be exhausted by our charging. It is not required that | ||
2174 | * this be a precise check, so we use raw_smp_processor_id() instead of | ||
2175 | * getcpu()/putcpu(). | ||
2176 | */ | ||
2177 | curcpu = raw_smp_processor_id(); | ||
2178 | for_each_online_cpu(cpu) { | 2166 | for_each_online_cpu(cpu) { |
2179 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2167 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2180 | struct mem_cgroup *mem; | 2168 | struct mem_cgroup *mem; |
@@ -2191,14 +2179,14 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2191 | schedule_work_on(cpu, &stock->work); | 2179 | schedule_work_on(cpu, &stock->work); |
2192 | } | 2180 | } |
2193 | } | 2181 | } |
2182 | put_cpu(); | ||
2194 | 2183 | ||
2195 | if (!sync) | 2184 | if (!sync) |
2196 | goto out; | 2185 | goto out; |
2197 | 2186 | ||
2198 | for_each_online_cpu(cpu) { | 2187 | for_each_online_cpu(cpu) { |
2199 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2188 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2200 | if (mem_cgroup_same_or_subtree(root_mem, stock->cached) && | 2189 | if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) |
2201 | test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) | ||
2202 | flush_work(&stock->work); | 2190 | flush_work(&stock->work); |
2203 | } | 2191 | } |
2204 | out: | 2192 | out: |
@@ -2213,14 +2201,22 @@ out: | |||
2213 | */ | 2201 | */ |
2214 | static void drain_all_stock_async(struct mem_cgroup *root_mem) | 2202 | static void drain_all_stock_async(struct mem_cgroup *root_mem) |
2215 | { | 2203 | { |
2204 | /* | ||
2205 | * If someone calls draining, avoid adding more kworker runs. | ||
2206 | */ | ||
2207 | if (!mutex_trylock(&percpu_charge_mutex)) | ||
2208 | return; | ||
2216 | drain_all_stock(root_mem, false); | 2209 | drain_all_stock(root_mem, false); |
2210 | mutex_unlock(&percpu_charge_mutex); | ||
2217 | } | 2211 | } |
2218 | 2212 | ||
2219 | /* This is a synchronous drain interface. */ | 2213 | /* This is a synchronous drain interface. */ |
2220 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) | 2214 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) |
2221 | { | 2215 | { |
2222 | /* called when force_empty is called */ | 2216 | /* called when force_empty is called */ |
2217 | mutex_lock(&percpu_charge_mutex); | ||
2223 | drain_all_stock(root_mem, true); | 2218 | drain_all_stock(root_mem, true); |
2219 | mutex_unlock(&percpu_charge_mutex); | ||
2224 | } | 2220 | } |
2225 | 2221 | ||
2226 | /* | 2222 | /* |