diff options
author | Jason Low <jason.low2@hp.com> | 2015-04-15 19:14:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:35:18 -0400 |
commit | 4db0c3c2983cc6b7a08a33542af5e14de8a9258c (patch) | |
tree | 66cfeaeae432f904c09af45e030b7e1e00476011 /mm/memcontrol.c | |
parent | 9d8c47e4bb1c20dbceee437f9fa7d76dafee80a2 (diff) |
mm: remove rest of ACCESS_ONCE() usages
We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/
tree since it doesn't work reliably on non-scalar types.
This patch removes the rest of the usages of ACCESS_ONCE, and use the new
READ_ONCE API for the read accesses. This makes things cleaner, instead
of using separate/multiple sets of APIs.
Signed-off-by: Jason Low <jason.low2@hp.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Davidlohr Bueso <dave@stgolabs.net>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 74a9641d8f9f..14c2f2017e37 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -674,7 +674,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, | |||
674 | static unsigned long soft_limit_excess(struct mem_cgroup *memcg) | 674 | static unsigned long soft_limit_excess(struct mem_cgroup *memcg) |
675 | { | 675 | { |
676 | unsigned long nr_pages = page_counter_read(&memcg->memory); | 676 | unsigned long nr_pages = page_counter_read(&memcg->memory); |
677 | unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit); | 677 | unsigned long soft_limit = READ_ONCE(memcg->soft_limit); |
678 | unsigned long excess = 0; | 678 | unsigned long excess = 0; |
679 | 679 | ||
680 | if (nr_pages > soft_limit) | 680 | if (nr_pages > soft_limit) |
@@ -1042,7 +1042,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, | |||
1042 | goto out_unlock; | 1042 | goto out_unlock; |
1043 | 1043 | ||
1044 | do { | 1044 | do { |
1045 | pos = ACCESS_ONCE(iter->position); | 1045 | pos = READ_ONCE(iter->position); |
1046 | /* | 1046 | /* |
1047 | * A racing update may change the position and | 1047 | * A racing update may change the position and |
1048 | * put the last reference, hence css_tryget(), | 1048 | * put the last reference, hence css_tryget(), |
@@ -1359,13 +1359,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) | |||
1359 | unsigned long limit; | 1359 | unsigned long limit; |
1360 | 1360 | ||
1361 | count = page_counter_read(&memcg->memory); | 1361 | count = page_counter_read(&memcg->memory); |
1362 | limit = ACCESS_ONCE(memcg->memory.limit); | 1362 | limit = READ_ONCE(memcg->memory.limit); |
1363 | if (count < limit) | 1363 | if (count < limit) |
1364 | margin = limit - count; | 1364 | margin = limit - count; |
1365 | 1365 | ||
1366 | if (do_swap_account) { | 1366 | if (do_swap_account) { |
1367 | count = page_counter_read(&memcg->memsw); | 1367 | count = page_counter_read(&memcg->memsw); |
1368 | limit = ACCESS_ONCE(memcg->memsw.limit); | 1368 | limit = READ_ONCE(memcg->memsw.limit); |
1369 | if (count <= limit) | 1369 | if (count <= limit) |
1370 | margin = min(margin, limit - count); | 1370 | margin = min(margin, limit - count); |
1371 | } | 1371 | } |
@@ -2637,7 +2637,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) | |||
2637 | return cachep; | 2637 | return cachep; |
2638 | 2638 | ||
2639 | memcg = get_mem_cgroup_from_mm(current->mm); | 2639 | memcg = get_mem_cgroup_from_mm(current->mm); |
2640 | kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id); | 2640 | kmemcg_id = READ_ONCE(memcg->kmemcg_id); |
2641 | if (kmemcg_id < 0) | 2641 | if (kmemcg_id < 0) |
2642 | goto out; | 2642 | goto out; |
2643 | 2643 | ||
@@ -5007,7 +5007,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, | |||
5007 | * tunable will only affect upcoming migrations, not the current one. | 5007 | * tunable will only affect upcoming migrations, not the current one. |
5008 | * So we need to save it, and keep it going. | 5008 | * So we need to save it, and keep it going. |
5009 | */ | 5009 | */ |
5010 | move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate); | 5010 | move_flags = READ_ONCE(memcg->move_charge_at_immigrate); |
5011 | if (move_flags) { | 5011 | if (move_flags) { |
5012 | struct mm_struct *mm; | 5012 | struct mm_struct *mm; |
5013 | struct mem_cgroup *from = mem_cgroup_from_task(p); | 5013 | struct mem_cgroup *from = mem_cgroup_from_task(p); |
@@ -5241,7 +5241,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css, | |||
5241 | static int memory_low_show(struct seq_file *m, void *v) | 5241 | static int memory_low_show(struct seq_file *m, void *v) |
5242 | { | 5242 | { |
5243 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); | 5243 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); |
5244 | unsigned long low = ACCESS_ONCE(memcg->low); | 5244 | unsigned long low = READ_ONCE(memcg->low); |
5245 | 5245 | ||
5246 | if (low == PAGE_COUNTER_MAX) | 5246 | if (low == PAGE_COUNTER_MAX) |
5247 | seq_puts(m, "max\n"); | 5247 | seq_puts(m, "max\n"); |
@@ -5271,7 +5271,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of, | |||
5271 | static int memory_high_show(struct seq_file *m, void *v) | 5271 | static int memory_high_show(struct seq_file *m, void *v) |
5272 | { | 5272 | { |
5273 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); | 5273 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); |
5274 | unsigned long high = ACCESS_ONCE(memcg->high); | 5274 | unsigned long high = READ_ONCE(memcg->high); |
5275 | 5275 | ||
5276 | if (high == PAGE_COUNTER_MAX) | 5276 | if (high == PAGE_COUNTER_MAX) |
5277 | seq_puts(m, "max\n"); | 5277 | seq_puts(m, "max\n"); |
@@ -5301,7 +5301,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, | |||
5301 | static int memory_max_show(struct seq_file *m, void *v) | 5301 | static int memory_max_show(struct seq_file *m, void *v) |
5302 | { | 5302 | { |
5303 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); | 5303 | struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); |
5304 | unsigned long max = ACCESS_ONCE(memcg->memory.limit); | 5304 | unsigned long max = READ_ONCE(memcg->memory.limit); |
5305 | 5305 | ||
5306 | if (max == PAGE_COUNTER_MAX) | 5306 | if (max == PAGE_COUNTER_MAX) |
5307 | seq_puts(m, "max\n"); | 5307 | seq_puts(m, "max\n"); |