summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2015-04-15 19:14:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:18 -0400
commit4db0c3c2983cc6b7a08a33542af5e14de8a9258c (patch)
tree66cfeaeae432f904c09af45e030b7e1e00476011 /mm/page_alloc.c
parent9d8c47e4bb1c20dbceee437f9fa7d76dafee80a2 (diff)
mm: remove rest of ACCESS_ONCE() usages
We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/ tree since it doesn't work reliably on non-scalar types. This patch removes the rest of the usages of ACCESS_ONCE, and use the new READ_ONCE API for the read accesses. This makes things cleaner, instead of using separate/multiple sets of APIs. Signed-off-by: Jason Low <jason.low2@hp.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1b849500640c..ebffa0e4a9c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1371,7 +1371,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1371 int to_drain, batch; 1371 int to_drain, batch;
1372 1372
1373 local_irq_save(flags); 1373 local_irq_save(flags);
1374 batch = ACCESS_ONCE(pcp->batch); 1374 batch = READ_ONCE(pcp->batch);
1375 to_drain = min(pcp->count, batch); 1375 to_drain = min(pcp->count, batch);
1376 if (to_drain > 0) { 1376 if (to_drain > 0) {
1377 free_pcppages_bulk(zone, to_drain, pcp); 1377 free_pcppages_bulk(zone, to_drain, pcp);
@@ -1570,7 +1570,7 @@ void free_hot_cold_page(struct page *page, bool cold)
1570 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1570 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1571 pcp->count++; 1571 pcp->count++;
1572 if (pcp->count >= pcp->high) { 1572 if (pcp->count >= pcp->high) {
1573 unsigned long batch = ACCESS_ONCE(pcp->batch); 1573 unsigned long batch = READ_ONCE(pcp->batch);
1574 free_pcppages_bulk(zone, batch, pcp); 1574 free_pcppages_bulk(zone, batch, pcp);
1575 pcp->count -= batch; 1575 pcp->count -= batch;
1576 } 1576 }
@@ -6207,7 +6207,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6207 mask <<= (BITS_PER_LONG - bitidx - 1); 6207 mask <<= (BITS_PER_LONG - bitidx - 1);
6208 flags <<= (BITS_PER_LONG - bitidx - 1); 6208 flags <<= (BITS_PER_LONG - bitidx - 1);
6209 6209
6210 word = ACCESS_ONCE(bitmap[word_bitidx]); 6210 word = READ_ONCE(bitmap[word_bitidx]);
6211 for (;;) { 6211 for (;;) {
6212 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 6212 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6213 if (word == old_word) 6213 if (word == old_word)