aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c62
1 files changed, 42 insertions, 20 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8afed2819b8f..df1e180f6c30 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1910,32 +1910,59 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1910 * If there is, we take a lock. 1910 * If there is, we take a lock.
1911 */ 1911 */
1912 1912
1913void __mem_cgroup_begin_update_page_stat(struct page *page,
1914 bool *locked, unsigned long *flags)
1915{
1916 struct mem_cgroup *memcg;
1917 struct page_cgroup *pc;
1918
1919 pc = lookup_page_cgroup(page);
1920again:
1921 memcg = pc->mem_cgroup;
1922 if (unlikely(!memcg || !PageCgroupUsed(pc)))
1923 return;
1924 /*
1925 * If this memory cgroup is not under account moving, we don't
1926 * need to take move_lock_page_cgroup(). Because we already hold
1927 * rcu_read_lock(), any calls to move_account will be delayed until
1928 * rcu_read_unlock() if mem_cgroup_stealed() == true.
1929 */
1930 if (!mem_cgroup_stealed(memcg))
1931 return;
1932
1933 move_lock_mem_cgroup(memcg, flags);
1934 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
1935 move_unlock_mem_cgroup(memcg, flags);
1936 goto again;
1937 }
1938 *locked = true;
1939}
1940
1941void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
1942{
1943 struct page_cgroup *pc = lookup_page_cgroup(page);
1944
1945 /*
1946 * It's guaranteed that pc->mem_cgroup never changes while
1947 * lock is held because a routine modifies pc->mem_cgroup
1948 * should take move_lock_page_cgroup().
1949 */
1950 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
1951}
1952
1913void mem_cgroup_update_page_stat(struct page *page, 1953void mem_cgroup_update_page_stat(struct page *page,
1914 enum mem_cgroup_page_stat_item idx, int val) 1954 enum mem_cgroup_page_stat_item idx, int val)
1915{ 1955{
1916 struct mem_cgroup *memcg; 1956 struct mem_cgroup *memcg;
1917 struct page_cgroup *pc = lookup_page_cgroup(page); 1957 struct page_cgroup *pc = lookup_page_cgroup(page);
1918 bool need_unlock = false;
1919 unsigned long uninitialized_var(flags); 1958 unsigned long uninitialized_var(flags);
1920 1959
1921 if (mem_cgroup_disabled()) 1960 if (mem_cgroup_disabled())
1922 return; 1961 return;
1923again: 1962
1924 rcu_read_lock();
1925 memcg = pc->mem_cgroup; 1963 memcg = pc->mem_cgroup;
1926 if (unlikely(!memcg || !PageCgroupUsed(pc))) 1964 if (unlikely(!memcg || !PageCgroupUsed(pc)))
1927 goto out; 1965 return;
1928 /* pc->mem_cgroup is unstable ? */
1929 if (unlikely(mem_cgroup_stealed(memcg))) {
1930 /* take a lock against to access pc->mem_cgroup */
1931 move_lock_mem_cgroup(memcg, &flags);
1932 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
1933 move_unlock_mem_cgroup(memcg, &flags);
1934 rcu_read_unlock();
1935 goto again;
1936 }
1937 need_unlock = true;
1938 }
1939 1966
1940 switch (idx) { 1967 switch (idx) {
1941 case MEMCG_NR_FILE_MAPPED: 1968 case MEMCG_NR_FILE_MAPPED:
@@ -1950,11 +1977,6 @@ again:
1950 } 1977 }
1951 1978
1952 this_cpu_add(memcg->stat->count[idx], val); 1979 this_cpu_add(memcg->stat->count[idx], val);
1953
1954out:
1955 if (unlikely(need_unlock))
1956 move_unlock_mem_cgroup(memcg, &flags);
1957 rcu_read_unlock();
1958} 1980}
1959 1981
1960/* 1982/*