diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-09-14 19:02:28 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-09-14 19:02:28 -0400 |
commit | 1db3706b05b11abcf2673ffbed5ad43b4c90ed11 (patch) | |
tree | 09fa867d9d9db6ed475eaa889da143603441e1a6 /mm/memcontrol.c | |
parent | ad30a2bbdc20cf0111156e2aa6d2cc3e3c0d1893 (diff) | |
parent | 63d15148b6058ab0037343390e8918503ed81968 (diff) |
Merge branch 'zImage_DTB_append' of git://git.linaro.org/people/nico/linux into devel-stable
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 26 |
1 files changed, 7 insertions, 19 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 930de9437271..ebd1e86bef1c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1841,29 +1841,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1841 | */ | 1841 | */ |
1842 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 1842 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) |
1843 | { | 1843 | { |
1844 | int lock_count = -1; | ||
1845 | struct mem_cgroup *iter, *failed = NULL; | 1844 | struct mem_cgroup *iter, *failed = NULL; |
1846 | bool cond = true; | 1845 | bool cond = true; |
1847 | 1846 | ||
1848 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { | 1847 | for_each_mem_cgroup_tree_cond(iter, mem, cond) { |
1849 | bool locked = iter->oom_lock; | 1848 | if (iter->oom_lock) { |
1850 | |||
1851 | iter->oom_lock = true; | ||
1852 | if (lock_count == -1) | ||
1853 | lock_count = iter->oom_lock; | ||
1854 | else if (lock_count != locked) { | ||
1855 | /* | 1849 | /* |
1856 | * this subtree of our hierarchy is already locked | 1850 | * this subtree of our hierarchy is already locked |
1857 | * so we cannot give a lock. | 1851 | * so we cannot give a lock. |
1858 | */ | 1852 | */ |
1859 | lock_count = 0; | ||
1860 | failed = iter; | 1853 | failed = iter; |
1861 | cond = false; | 1854 | cond = false; |
1862 | } | 1855 | } else |
1856 | iter->oom_lock = true; | ||
1863 | } | 1857 | } |
1864 | 1858 | ||
1865 | if (!failed) | 1859 | if (!failed) |
1866 | goto done; | 1860 | return true; |
1867 | 1861 | ||
1868 | /* | 1862 | /* |
1869 | * OK, we failed to lock the whole subtree so we have to clean up | 1863 | * OK, we failed to lock the whole subtree so we have to clean up |
@@ -1877,8 +1871,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | |||
1877 | } | 1871 | } |
1878 | iter->oom_lock = false; | 1872 | iter->oom_lock = false; |
1879 | } | 1873 | } |
1880 | done: | 1874 | return false; |
1881 | return lock_count; | ||
1882 | } | 1875 | } |
1883 | 1876 | ||
1884 | /* | 1877 | /* |
@@ -2169,13 +2162,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2169 | 2162 | ||
2170 | /* Notify other cpus that system-wide "drain" is running */ | 2163 | /* Notify other cpus that system-wide "drain" is running */ |
2171 | get_online_cpus(); | 2164 | get_online_cpus(); |
2172 | /* | 2165 | curcpu = get_cpu(); |
2173 | * Get a hint for avoiding draining charges on the current cpu, | ||
2174 | * which must be exhausted by our charging. It is not required that | ||
2175 | * this be a precise check, so we use raw_smp_processor_id() instead of | ||
2176 | * getcpu()/putcpu(). | ||
2177 | */ | ||
2178 | curcpu = raw_smp_processor_id(); | ||
2179 | for_each_online_cpu(cpu) { | 2166 | for_each_online_cpu(cpu) { |
2180 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2167 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2181 | struct mem_cgroup *mem; | 2168 | struct mem_cgroup *mem; |
@@ -2192,6 +2179,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2192 | schedule_work_on(cpu, &stock->work); | 2179 | schedule_work_on(cpu, &stock->work); |
2193 | } | 2180 | } |
2194 | } | 2181 | } |
2182 | put_cpu(); | ||
2195 | 2183 | ||
2196 | if (!sync) | 2184 | if (!sync) |
2197 | goto out; | 2185 | goto out; |