aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2015-04-15 19:14:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:18 -0400
commit4db0c3c2983cc6b7a08a33542af5e14de8a9258c (patch)
tree66cfeaeae432f904c09af45e030b7e1e00476011 /mm
parent9d8c47e4bb1c20dbceee437f9fa7d76dafee80a2 (diff)
mm: remove rest of ACCESS_ONCE() usages
We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/ tree since it doesn't work reliably on non-scalar types. This patch removes the rest of the usages of ACCESS_ONCE, and use the new READ_ONCE API for the read accesses. This makes things cleaner, instead of using separate/multiple sets of APIs. Signed-off-by: Jason Low <jason.low2@hp.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/internal.h4
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/memcontrol.c18
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/slub.c4
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c2
11 files changed, 33 insertions, 33 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4914e1b29fdb..1db93fbda06a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -183,7 +183,7 @@ static struct page *get_huge_zero_page(void)
183 struct page *zero_page; 183 struct page *zero_page;
184retry: 184retry:
185 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 185 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
186 return ACCESS_ONCE(huge_zero_page); 186 return READ_ONCE(huge_zero_page);
187 187
188 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 188 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
189 HPAGE_PMD_ORDER); 189 HPAGE_PMD_ORDER);
@@ -202,7 +202,7 @@ retry:
202 /* We take additional reference here. It will be put back by shrinker */ 202 /* We take additional reference here. It will be put back by shrinker */
203 atomic_set(&huge_zero_refcount, 2); 203 atomic_set(&huge_zero_refcount, 2);
204 preempt_enable(); 204 preempt_enable();
205 return ACCESS_ONCE(huge_zero_page); 205 return READ_ONCE(huge_zero_page);
206} 206}
207 207
208static void put_huge_zero_page(void) 208static void put_huge_zero_page(void)
diff --git a/mm/internal.h b/mm/internal.h
index edaab69a9c35..a25e359a4039 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -224,13 +224,13 @@ static inline unsigned long page_order(struct page *page)
224 * PageBuddy() should be checked first by the caller to minimize race window, 224 * PageBuddy() should be checked first by the caller to minimize race window,
225 * and invalid values must be handled gracefully. 225 * and invalid values must be handled gracefully.
226 * 226 *
227 * ACCESS_ONCE is used so that if the caller assigns the result into a local 227 * READ_ONCE is used so that if the caller assigns the result into a local
228 * variable and e.g. tests it for valid range before using, the compiler cannot 228 * variable and e.g. tests it for valid range before using, the compiler cannot
229 * decide to remove the variable and inline the page_private(page) multiple 229 * decide to remove the variable and inline the page_private(page) multiple
230 * times, potentially observing different values in the tests and the actual 230 * times, potentially observing different values in the tests and the actual
231 * use of the result. 231 * use of the result.
232 */ 232 */
233#define page_order_unsafe(page) ACCESS_ONCE(page_private(page)) 233#define page_order_unsafe(page) READ_ONCE(page_private(page))
234 234
235static inline bool is_cow_mapping(vm_flags_t flags) 235static inline bool is_cow_mapping(vm_flags_t flags)
236{ 236{
diff --git a/mm/ksm.c b/mm/ksm.c
index 4162dce2eb44..7ee101eaacdf 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -542,7 +542,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
542 expected_mapping = (void *)stable_node + 542 expected_mapping = (void *)stable_node +
543 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 543 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
544again: 544again:
545 kpfn = ACCESS_ONCE(stable_node->kpfn); 545 kpfn = READ_ONCE(stable_node->kpfn);
546 page = pfn_to_page(kpfn); 546 page = pfn_to_page(kpfn);
547 547
548 /* 548 /*
@@ -551,7 +551,7 @@ again:
551 * but on Alpha we need to be more careful. 551 * but on Alpha we need to be more careful.
552 */ 552 */
553 smp_read_barrier_depends(); 553 smp_read_barrier_depends();
554 if (ACCESS_ONCE(page->mapping) != expected_mapping) 554 if (READ_ONCE(page->mapping) != expected_mapping)
555 goto stale; 555 goto stale;
556 556
557 /* 557 /*
@@ -577,14 +577,14 @@ again:
577 cpu_relax(); 577 cpu_relax();
578 } 578 }
579 579
580 if (ACCESS_ONCE(page->mapping) != expected_mapping) { 580 if (READ_ONCE(page->mapping) != expected_mapping) {
581 put_page(page); 581 put_page(page);
582 goto stale; 582 goto stale;
583 } 583 }
584 584
585 if (lock_it) { 585 if (lock_it) {
586 lock_page(page); 586 lock_page(page);
587 if (ACCESS_ONCE(page->mapping) != expected_mapping) { 587 if (READ_ONCE(page->mapping) != expected_mapping) {
588 unlock_page(page); 588 unlock_page(page);
589 put_page(page); 589 put_page(page);
590 goto stale; 590 goto stale;
@@ -600,7 +600,7 @@ stale:
600 * before checking whether node->kpfn has been changed. 600 * before checking whether node->kpfn has been changed.
601 */ 601 */
602 smp_rmb(); 602 smp_rmb();
603 if (ACCESS_ONCE(stable_node->kpfn) != kpfn) 603 if (READ_ONCE(stable_node->kpfn) != kpfn)
604 goto again; 604 goto again;
605 remove_node_from_stable_tree(stable_node); 605 remove_node_from_stable_tree(stable_node);
606 return NULL; 606 return NULL;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 74a9641d8f9f..14c2f2017e37 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -674,7 +674,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
674static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 674static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
675{ 675{
676 unsigned long nr_pages = page_counter_read(&memcg->memory); 676 unsigned long nr_pages = page_counter_read(&memcg->memory);
677 unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit); 677 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
678 unsigned long excess = 0; 678 unsigned long excess = 0;
679 679
680 if (nr_pages > soft_limit) 680 if (nr_pages > soft_limit)
@@ -1042,7 +1042,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1042 goto out_unlock; 1042 goto out_unlock;
1043 1043
1044 do { 1044 do {
1045 pos = ACCESS_ONCE(iter->position); 1045 pos = READ_ONCE(iter->position);
1046 /* 1046 /*
1047 * A racing update may change the position and 1047 * A racing update may change the position and
1048 * put the last reference, hence css_tryget(), 1048 * put the last reference, hence css_tryget(),
@@ -1359,13 +1359,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1359 unsigned long limit; 1359 unsigned long limit;
1360 1360
1361 count = page_counter_read(&memcg->memory); 1361 count = page_counter_read(&memcg->memory);
1362 limit = ACCESS_ONCE(memcg->memory.limit); 1362 limit = READ_ONCE(memcg->memory.limit);
1363 if (count < limit) 1363 if (count < limit)
1364 margin = limit - count; 1364 margin = limit - count;
1365 1365
1366 if (do_swap_account) { 1366 if (do_swap_account) {
1367 count = page_counter_read(&memcg->memsw); 1367 count = page_counter_read(&memcg->memsw);
1368 limit = ACCESS_ONCE(memcg->memsw.limit); 1368 limit = READ_ONCE(memcg->memsw.limit);
1369 if (count <= limit) 1369 if (count <= limit)
1370 margin = min(margin, limit - count); 1370 margin = min(margin, limit - count);
1371 } 1371 }
@@ -2637,7 +2637,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2637 return cachep; 2637 return cachep;
2638 2638
2639 memcg = get_mem_cgroup_from_mm(current->mm); 2639 memcg = get_mem_cgroup_from_mm(current->mm);
2640 kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id); 2640 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2641 if (kmemcg_id < 0) 2641 if (kmemcg_id < 0)
2642 goto out; 2642 goto out;
2643 2643
@@ -5007,7 +5007,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5007 * tunable will only affect upcoming migrations, not the current one. 5007 * tunable will only affect upcoming migrations, not the current one.
5008 * So we need to save it, and keep it going. 5008 * So we need to save it, and keep it going.
5009 */ 5009 */
5010 move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate); 5010 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5011 if (move_flags) { 5011 if (move_flags) {
5012 struct mm_struct *mm; 5012 struct mm_struct *mm;
5013 struct mem_cgroup *from = mem_cgroup_from_task(p); 5013 struct mem_cgroup *from = mem_cgroup_from_task(p);
@@ -5241,7 +5241,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
5241static int memory_low_show(struct seq_file *m, void *v) 5241static int memory_low_show(struct seq_file *m, void *v)
5242{ 5242{
5243 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5243 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5244 unsigned long low = ACCESS_ONCE(memcg->low); 5244 unsigned long low = READ_ONCE(memcg->low);
5245 5245
5246 if (low == PAGE_COUNTER_MAX) 5246 if (low == PAGE_COUNTER_MAX)
5247 seq_puts(m, "max\n"); 5247 seq_puts(m, "max\n");
@@ -5271,7 +5271,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
5271static int memory_high_show(struct seq_file *m, void *v) 5271static int memory_high_show(struct seq_file *m, void *v)
5272{ 5272{
5273 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5273 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5274 unsigned long high = ACCESS_ONCE(memcg->high); 5274 unsigned long high = READ_ONCE(memcg->high);
5275 5275
5276 if (high == PAGE_COUNTER_MAX) 5276 if (high == PAGE_COUNTER_MAX)
5277 seq_puts(m, "max\n"); 5277 seq_puts(m, "max\n");
@@ -5301,7 +5301,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
5301static int memory_max_show(struct seq_file *m, void *v) 5301static int memory_max_show(struct seq_file *m, void *v)
5302{ 5302{
5303 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5303 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5304 unsigned long max = ACCESS_ONCE(memcg->memory.limit); 5304 unsigned long max = READ_ONCE(memcg->memory.limit);
5305 5305
5306 if (max == PAGE_COUNTER_MAX) 5306 if (max == PAGE_COUNTER_MAX)
5307 seq_puts(m, "max\n"); 5307 seq_puts(m, "max\n");
diff --git a/mm/memory.c b/mm/memory.c
index ac20b2a6a0c3..656593f73c8e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2845,7 +2845,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
2845 struct vm_fault vmf; 2845 struct vm_fault vmf;
2846 int off; 2846 int off;
2847 2847
2848 nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT; 2848 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
2849 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 2849 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
2850 2850
2851 start_addr = max(address & mask, vma->vm_start); 2851 start_addr = max(address & mask, vma->vm_start);
diff --git a/mm/mmap.c b/mm/mmap.c
index 06a6076c92e5..e65cbe0d64fc 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1133,7 +1133,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
1133 * by another page fault trying to merge _that_. But that's ok: if it 1133 * by another page fault trying to merge _that_. But that's ok: if it
1134 * is being set up, that automatically means that it will be a singleton 1134 * is being set up, that automatically means that it will be a singleton
1135 * acceptable for merging, so we can do all of this optimistically. But 1135 * acceptable for merging, so we can do all of this optimistically. But
1136 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. 1136 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1137 * 1137 *
1138 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1138 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1139 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1139 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
@@ -1147,7 +1147,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
1147static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1147static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1148{ 1148{
1149 if (anon_vma_compatible(a, b)) { 1149 if (anon_vma_compatible(a, b)) {
1150 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); 1150 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1151 1151
1152 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1152 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1153 return anon_vma; 1153 return anon_vma;
@@ -2100,7 +2100,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2100 actual_size = size; 2100 actual_size = size;
2101 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2101 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2102 actual_size -= PAGE_SIZE; 2102 actual_size -= PAGE_SIZE;
2103 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2103 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2104 return -ENOMEM; 2104 return -ENOMEM;
2105 2105
2106 /* mlock limit tests */ 2106 /* mlock limit tests */
@@ -2108,7 +2108,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2108 unsigned long locked; 2108 unsigned long locked;
2109 unsigned long limit; 2109 unsigned long limit;
2110 locked = mm->locked_vm + grow; 2110 locked = mm->locked_vm + grow;
2111 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); 2111 limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2112 limit >>= PAGE_SHIFT; 2112 limit >>= PAGE_SHIFT;
2113 if (locked > limit && !capable(CAP_IPC_LOCK)) 2113 if (locked > limit && !capable(CAP_IPC_LOCK))
2114 return -ENOMEM; 2114 return -ENOMEM;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1b849500640c..ebffa0e4a9c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1371,7 +1371,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1371 int to_drain, batch; 1371 int to_drain, batch;
1372 1372
1373 local_irq_save(flags); 1373 local_irq_save(flags);
1374 batch = ACCESS_ONCE(pcp->batch); 1374 batch = READ_ONCE(pcp->batch);
1375 to_drain = min(pcp->count, batch); 1375 to_drain = min(pcp->count, batch);
1376 if (to_drain > 0) { 1376 if (to_drain > 0) {
1377 free_pcppages_bulk(zone, to_drain, pcp); 1377 free_pcppages_bulk(zone, to_drain, pcp);
@@ -1570,7 +1570,7 @@ void free_hot_cold_page(struct page *page, bool cold)
1570 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1570 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1571 pcp->count++; 1571 pcp->count++;
1572 if (pcp->count >= pcp->high) { 1572 if (pcp->count >= pcp->high) {
1573 unsigned long batch = ACCESS_ONCE(pcp->batch); 1573 unsigned long batch = READ_ONCE(pcp->batch);
1574 free_pcppages_bulk(zone, batch, pcp); 1574 free_pcppages_bulk(zone, batch, pcp);
1575 pcp->count -= batch; 1575 pcp->count -= batch;
1576 } 1576 }
@@ -6207,7 +6207,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6207 mask <<= (BITS_PER_LONG - bitidx - 1); 6207 mask <<= (BITS_PER_LONG - bitidx - 1);
6208 flags <<= (BITS_PER_LONG - bitidx - 1); 6208 flags <<= (BITS_PER_LONG - bitidx - 1);
6209 6209
6210 word = ACCESS_ONCE(bitmap[word_bitidx]); 6210 word = READ_ONCE(bitmap[word_bitidx]);
6211 for (;;) { 6211 for (;;) {
6212 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 6212 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6213 if (word == old_word) 6213 if (word == old_word)
diff --git a/mm/rmap.c b/mm/rmap.c
index c161a14b6a8f..24dd3f9fee27 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -456,7 +456,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
456 unsigned long anon_mapping; 456 unsigned long anon_mapping;
457 457
458 rcu_read_lock(); 458 rcu_read_lock();
459 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 459 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
460 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 460 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
461 goto out; 461 goto out;
462 if (!page_mapped(page)) 462 if (!page_mapped(page))
@@ -500,14 +500,14 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
500 unsigned long anon_mapping; 500 unsigned long anon_mapping;
501 501
502 rcu_read_lock(); 502 rcu_read_lock();
503 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 503 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
504 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 504 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
505 goto out; 505 goto out;
506 if (!page_mapped(page)) 506 if (!page_mapped(page))
507 goto out; 507 goto out;
508 508
509 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 509 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
510 root_anon_vma = ACCESS_ONCE(anon_vma->root); 510 root_anon_vma = READ_ONCE(anon_vma->root);
511 if (down_read_trylock(&root_anon_vma->rwsem)) { 511 if (down_read_trylock(&root_anon_vma->rwsem)) {
512 /* 512 /*
513 * If the page is still mapped, then this anon_vma is still 513 * If the page is still mapped, then this anon_vma is still
diff --git a/mm/slub.c b/mm/slub.c
index 0fdd6c1e1f82..54c0876b43d5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4277,7 +4277,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4277 int node; 4277 int node;
4278 struct page *page; 4278 struct page *page;
4279 4279
4280 page = ACCESS_ONCE(c->page); 4280 page = READ_ONCE(c->page);
4281 if (!page) 4281 if (!page)
4282 continue; 4282 continue;
4283 4283
@@ -4292,7 +4292,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4292 total += x; 4292 total += x;
4293 nodes[node] += x; 4293 nodes[node] += x;
4294 4294
4295 page = ACCESS_ONCE(c->partial); 4295 page = READ_ONCE(c->partial);
4296 if (page) { 4296 if (page) {
4297 node = page_to_nid(page); 4297 node = page_to_nid(page);
4298 if (flags & SO_TOTAL) 4298 if (flags & SO_TOTAL)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 405923f77334..8bc8e66138da 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -390,7 +390,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
390 unsigned int pages, max_pages, last_ra; 390 unsigned int pages, max_pages, last_ra;
391 static atomic_t last_readahead_pages; 391 static atomic_t last_readahead_pages;
392 392
393 max_pages = 1 << ACCESS_ONCE(page_cluster); 393 max_pages = 1 << READ_ONCE(page_cluster);
394 if (max_pages <= 1) 394 if (max_pages <= 1)
395 return 1; 395 return 1;
396 396
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 63f55ccb9b26..a7e72103f23b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1312,7 +1312,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1312 else 1312 else
1313 continue; 1313 continue;
1314 } 1314 }
1315 count = ACCESS_ONCE(si->swap_map[i]); 1315 count = READ_ONCE(si->swap_map[i]);
1316 if (count && swap_count(count) != SWAP_MAP_BAD) 1316 if (count && swap_count(count) != SWAP_MAP_BAD)
1317 break; 1317 break;
1318 } 1318 }